From 4b18ba9a70e91cd8370f843a6e26f3bb11a86b77 Mon Sep 17 00:00:00 2001 From: dgmccart Date: Thu, 30 Nov 2023 19:54:19 +0000 Subject: [PATCH] deploy: 1d403e773f93f4e5b01d34f418daffc90124c390 --- api_reference/index.html | 156 ++++++++++++++++++--------------------- search/search_index.json | 2 +- sitemap.xml.gz | Bin 127 -> 127 bytes 3 files changed, 74 insertions(+), 84 deletions(-) diff --git a/api_reference/index.html b/api_reference/index.html index d825fbae..57748467 100644 --- a/api_reference/index.html +++ b/api_reference/index.html @@ -1529,7 +1529,7 @@

Class Camera

Class DeviceIdentifier

-

The DeviceIdentifier class represents an identifier for a device, such as a camera or video source.

+

The DeviceIdentifier class represents an identifier for a supported device, including its unique id and type, such as a camera or storage.

class DeviceIdentifier:
     id: Tuple[int, int]
     kind: DeviceKind
@@ -1624,29 +1624,30 @@ 

Class DeviceIdentifier

@staticmethod def none() -> DeviceIdentifier: ... - """Returns a "None" type DeviceIdentifier. Useful when a DeviceIdentifier is not needed.""" - - def __eq__(self, other: object) -> bool: - """Checks if two DeviceIdentifier objects are equal.""" - - def __ge__(self, other: object) -> bool: - """Checks if this DeviceIdentifier is greater than or equal to another.""" - - def __gt__(self, other: object) -> bool: - """Checks if this DeviceIdentifier is greater than another.""" - - def __le__(self, other: object) -> bool: - """Checks if this DeviceIdentifier is less than or equal to another.""" - - def __lt__(self, other: object) -> bool: - """Checks if this DeviceIdentifier is less than another.""" - - def __ne__(self, other: object) -> bool: - """Checks if two DeviceIdentifier objects are not equal.""" + """Returns a "None" type DeviceIdentifier. + Useful when a DeviceIdentifier is not needed.""" + + def __eq__(self, other: object) -> bool: + """Checks if two DeviceIdentifier objects are equal.""" + + def __ge__(self, other: object) -> bool: + """Checks if this DeviceIdentifier is greater than or equal to another.""" + + def __gt__(self, other: object) -> bool: + """Checks if this DeviceIdentifier is greater than another.""" + + def __le__(self, other: object) -> bool: + """Checks if this DeviceIdentifier is less than or equal to another.""" + + def __lt__(self, other: object) -> bool: + """Checks if this DeviceIdentifier is less than another.""" + + def __ne__(self, other: object) -> bool: + """Checks if two DeviceIdentifier objects are not equal."""

Class DeviceKind

-

The DeviceKind class represents properties for supported devices, such as a camera or video source, in a given system.

+

The DeviceKind class represents the types of devices in a given system.

class DeviceKind:
     Camera: ClassVar[DeviceKind] = DeviceKind.Camera
     NONE: ClassVar[DeviceKind] = DeviceKind.NONE
@@ -1693,70 +1694,59 @@ 

Class DeviceKind

Class DeviceManager

-

The DeviceManager class manages selection of available devices in the system.

+

The DeviceManager class manages selection of available devices in the system. Regular expressions are accepted for the name argument.

class DeviceManager:
     def devices(self) -> List[DeviceIdentifier]:
         """Returns a list of all available device identifiers."""
 
     @overload
-    def select(self, kind: DeviceKind) -> Optional[DeviceIdentifier]:
-        """Selects the first available device of `kind`.
+    def select(self, kind: DeviceKind, name: Optional[str]) -> Optional[DeviceIdentifier]:
+        """Selects a specified device.
 
         Args:
             kind (DeviceKind): The type of device to select.
-
-        Returns:
-            Optional[DeviceIdentifier]: The identifier of the first available device of `kind`, or `None` if no such device is available.
-        """
-
-    @overload
-    def select(self, kind: DeviceKind, name: Optional[str]) -> Optional[DeviceIdentifier]:
-        """Selects a specified device.
+            name (Optional[str]): The name of the device to select. Regular expressions supported.
+
+        Returns:
+            Optional[DeviceIdentifier]: The selected device identifier, or None if the specified device is not available.
+        """
+
+    def select_one_of(self, kind: DeviceKind, names: List[str]) -> Optional[DeviceIdentifier]:
+        """Selects the first device in the list of devices that is of one of the specified kinds.
 
         Args:
             kind (DeviceKind): The type of device to select.
-            name (Optional[str]): The name of the device to select.
+            names (List[str]): A list of device names to choose from. Regular expressions supported.
 
         Returns:
-            Optional[DeviceIdentifier]: The selected device identifier, or None if the specified device is not available.
+            Optional[DeviceIdentifier]: The selected device identifier, or None if none of the specified devices are available.
         """
-
-    def select_one_of(self, kind: DeviceKind, names: List[str]) -> Optional[DeviceIdentifier]:
-        """Selects the first device in the list of devices that is of one of the specified kinds.
-
-        Args:
-            kind (DeviceKind): The type of device to select.
-            names (List[str]): A list of device names to choose from.
-
-        Returns:
-            Optional[DeviceIdentifier]: The selected device identifier, or None if none of the specified devices are available.
-        """
 

Class DeviceState

@@ -1838,7 +1828,7 @@

Class Direction

Class InputTriggers

-

The InputTriggers class represents input triggers for a device.

+

The InputTriggers class represents input triggers for a camera device.

class InputTriggers:
     acquisition_start: Trigger
     exposure: Trigger
@@ -1862,7 +1852,7 @@ 

Class InputTriggers

Class OutputTriggers

-

The OutputTriggers class represents output triggers for a device.

+

The OutputTriggers class represents output triggers for a camera device.

class OutputTriggers:
     exposure: Trigger
     frame_start: Trigger
@@ -1925,7 +1915,7 @@ 

Class Properties

  • -

    video: A tuple containing two VideoStream instances which contain information on the camera was used for acquisition and how the data is stored.

    +

    video: A tuple containing two VideoStream instances since Acquire supports simultaneous streaming from 2 video sources. VideoStream objects have 2 attributes camera and storage to set the source and sink for the stream.

  • The dict method creates a dictionary of a Properties object's attributes.

    @@ -1947,7 +1937,7 @@

    Class Runtime

    stream_id (int): The ID of the stream for which available data is requested. Returns: - AvailableData: The AvailableData instance for the given stream ID. + AvailableData: The AvailableData instance for the given VideoStream ID. """ def get_configuration(self) -> Properties: @@ -1980,7 +1970,7 @@

    Class Runtime

    Call device_manager() to return the DeviceManager object associated with this Runtime instance.

  • -

    Call get_available_data with a specific stream_id to return the AvailableData associated with the stream_id.

    +

    Call get_available_data with a specific stream_id, 0 or 1, to return the AvailableData associated with the 1st or 2nd video source, respectively.

  • Call get_configuration() to return the Properties object associated with this Runtime instance.

    @@ -1995,10 +1985,10 @@

    Class Runtime

    Call start() to begin data acquisition.

  • -

    Call stop() to end data acquisition once the max number of frames specified in config.video[0].max_frame_count is collected.

    +

    Call stop() to end data acquisition once the max number of frames specified in acquire.VideoStream.max_frame_count is collected. All objects are deleted to free up disk space upon shutdown of Runtime.

  • -

    Call abort() to Runtime instance.

    +

    Call abort() to immediately end data acqusition. All objects are deleted to free up disk space upon shutdown of Runtime.

Class SampleRateHz

@@ -2084,7 +2074,7 @@

Class SampleType

Class SignalIOKind

-

The SignalIOKind class defines the type of input and output signals.

+

The SignalIOKind class defines the signal type, input or output, for a trigger.

class SignalIOKind:
     Input: ClassVar[SignalIOKind] = SignalIOKind.Input
     Output: ClassVar[SignalIOKind] = SignalIOKind.Output
@@ -2154,7 +2144,7 @@ 

Class SignalType

Class Storage

-

The Storage class represents storage settings for the acquired data.

+

The Storage class represents storage devices and their settings.

class Storage:
     identifier: Optional[DeviceIdentifier]
     settings: StorageProperties
@@ -2188,29 +2178,29 @@ 

Class StorageProperties

  • -

    external_metadata_json: An optional attribute representing external metadata in JSON format.

    +

    external_metadata_json: An optional attribute of the metadata JSON filename as a string.

  • -

    filename: An optional attribute representing the filename.

    +

    filename: An optional attribute representing the filename for storing the image data.

  • -

    first_frame_id: An integer representing the ID of the first frame.

    +

    first_frame_id: An integer representing the ID of the first frame for a given acquisition.

  • -

    pixel_scale_um: A tuple of two floats representing pixel size in micrometers.

    +

    pixel_scale_um: A tuple of two floats representing the pixel size of the camera in micrometers.

  • -

    chunking: An instance of the ChunkingProperties class representing data chunking settings.

    +

    chunking: An instance of the ChunkingProperties class representing data chunking settings for Zarr storage.

  • -

    enable_multiscale: A boolean indicating whether multiscale storage is desired.

    +

    enable_multiscale: A boolean indicating whether multiscale storage is enabled.

  • The dict method creates a dictionary of a StorageProperties object's attributes.

Class TileShape

-

The TileShape class represents the tile shape, or voxel size, for tile scanning acquisition.

+

The TileShape class represents the shape of data chunks for storage in Zarr containers.

class TileShape:
     width: int
     height: int
@@ -2221,13 +2211,13 @@ 

Class TileShape

  • -

    width: The width of the tile.

    +

    width: The width of the chunk.

  • -

    height: The height of the tile.

    +

    height: The height of the chunk.

  • -

    planes: The number of planes in the tile.

    +

    planes: The number of planes in the chunk.

  • The dict method creates a dictionary of a TileShape object's attributes.

    @@ -2255,7 +2245,7 @@

    Class Trigger

    enable: A boolean indicating whether the trigger is enabled.

  • -

    line: An integer representing the line of the trigger signal.

    +

    line: An integer representing the max value of the trigger signal.

  • kind: An instance of the SignalIOKind class specifying if the signal is input or output.

    @@ -2317,7 +2307,7 @@

    Class VideoFrame

    Call data() to create an NDArray of the VideoFrame data.

  • -

    Call metadata() to query the metadata of VideoFrame.

    +

    Call metadata() to create a VideoFrameMetadata object containing the metadata of VideoFrame.

Class VideoFrameMetadata

@@ -2334,7 +2324,7 @@

Class VideoFrameMetadata

frame_id: An integer representing the ID of the video frame.

  • -

    timestamps: An instance of the VideoFrameTimestamps class specifying whether the video timestamps are based on the hardware clock or the acquisition clock.

    +

    timestamps: An instance of the VideoFrameTimestamps class specifying the video timestamps based on the hardware clock and the acquisition clock.

  • The dict method creates a dictionary of a VideoFrameTimestamps object's attributes.

    @@ -2373,16 +2363,16 @@

    Class VideoStream

    • -

      camera: An instance of the Camera class representing the camera used in the video stream.

      +

      camera: An instance of the Camera class representing the camera device for the video stream.

    • -

      storage: An instance of the Storage class representing the storage settings for the video stream.

      +

      storage: An instance of the Storage class representing the storage device for the video stream.

    • -

      max_frame_count: An integer representing the maximum number of frames in the video stream.

      +

      max_frame_count: An integer representing the maximum number of frames to acquire.

    • -

      frame_average_count: An integer representing the number of frames to average in the video stream.

      +

      frame_average_count: An integer representing the number of frames to average, if any, before streaming. The default value is 0, which disables this feature. Setting this to 1 will also prevent averaging.

    • The dict method creates a dictionary of a VideoStream object's attributes.

      diff --git a/search/search_index.json b/search/search_index.json index dae99363..dfd7af3d 100644 --- a/search/search_index.json +++ b/search/search_index.json @@ -1 +1 @@ -{"config":{"lang":["en"],"separator":"[\\s\\-]+","pipeline":["stopWordFilter"]},"docs":[{"location":"","title":"Acquire Docs","text":""},{"location":"#guides","title":"Guides","text":"Get Started

      Install Acquire and use simulated cameras

      Get Started API Reference

      Information on classes and methods

      API Reference Tutorials

      Guides on using Acquire for specific tasks

      Tutorials For contributors

      Learn how to contribute code or documentation to Acquire

      For contributors"},{"location":"#about-acquire","title":"About Acquire","text":"

      Acquire (acquire-imaging on PyPI) provides high-speed, multi-camera, video streaming and image acquisition with a programming interface for streaming video data directly to napari, Python and cloud-friendly file formats.

      "},{"location":"#installation","title":"Installation","text":"

      To install Acquire on Windows, macOS, or Ubuntu, simply run the following command:

      python -m pip install acquire-imaging\n
      "},{"location":"#supported-cameras-and-file-formats","title":"Supported Cameras and File Formats","text":"

      Acquire supports the following cameras (currently only on Windows):

      • Hamamatsu Orca Fusion BT (C15440-20UP)
      • Vieworks VC-151MX-M6H00
      • FLIR Blackfly USB3 (BFLY-U3-23S6M-C)
      • FLIR Oryx 10GigE (ORX-10GS-51S5M-C)

      Acquire also supports the following output file formats:

      • Tiff
      • Zarr

      For testing and demonstration purposes, Acquire provides a few simulated cameras, as well as raw and trash output devices.

      "},{"location":"#citing-acquire","title":"Citing Acquire","text":"
      cff-version: 1.2.0\ntitle: Acquire: a multi-camera video streaming software focusing on microscopy\nmessage: >-\n  If you use this software, please cite it using the\n  metadata from this file.\ntype: software\nauthors:\n  - given-names: Nathan\n    family-names: Clack\n    email: nclack@chanzuckerberg.com\n    affiliation: Chan-Zuckerberg Initiative Foundation\n    orcid: 'https://orcid.org/0000-0001-6236-9282'\n  - given-names: Alan\n    family-names: Liddell\n    email: aliddell@chanzuckerberg.com\n    affiliation: Chan-Zuckerberg Initiative Foundation\n  - given-names: Andrew\n    family-names: Sweet\n    email: andrewdsweet@gmail.com\n    affiliation: Chan-Zuckerberg Initiative Foundation\nrepository-code: 'https://github.com/acquire-project/acquire-python'\nrepository-artifact: 'https://pypi.org/project/acquire-imaging/'\nabstract: >-\n  acquire-imaging is a library focusing on multi-camera video\n  streaming for microscopy.\nlicense: Apache-2.0\n
      "},{"location":"#acquire-license","title":"Acquire License","text":"

      Acquire is provided under an Apache 2.0 license. You can learn more about the Apache license in the documentation here.

      "},{"location":"api_reference/","title":"API Reference","text":"

      Information on the classes in acquire-imaging along with the attributes and methods associated with them.

      "},{"location":"api_reference/#class-availabledata","title":"Class AvailableData","text":"

      The AvailableData class represents the collection of frames that have been captured since the last call to runtime.get_available_data(). AvailableData objects should be set to have a short lifetime, since these objects reserve space on the video queue and will eventually block camera acquisition to ensure no data is overwritten before it can be processed.

      class AvailableData:\n    def frames(self) -> Iterator[VideoFrame]:\n        \"\"\"Returns an iterator over the video frames in the available data.\"\"\"\n\n    def get_frame_count(self) -> int:\n        \"\"\"Returns the total number of video frames in the available data.\"\"\"\n\n    def __iter__(self) -> Iterator[VideoFrame]:\n        \"\"\"Returns an iterator over the video frames in the available data.\"\"\"\n
      • The frames method provides an iterator over these frames.

      • Call get_frame_count() to query the number of frames in an AvailableData object.

      • The __iter__ method enables AvailableData objects to be iterated.

      "},{"location":"api_reference/#class-camera","title":"Class Camera","text":"

      The Camera class is used to describe cameras or other video sources.

      class Camera:\n    identifier: Optional[DeviceIdentifier]\n    settings: CameraProperties\n\n    def __init__(self, *args: None, **kwargs: Any) -> None: ...\n    \"\"\"Initializes a Camera object with optional arguments.\"\"\"\n\n    def dict(self) -> Dict[str, Any]: ...\n    \"\"\"Returns a dictionary of the Camera attributes.\"\"\"\n
      • identifier: An optional attribute which contains an instance of the DeviceIdentifier class that describes the camera, or video source, if that device is natively supported. Otherwise, it is of type None.

      • settings: An instance of the CameraProperties class which contains the settings for the camera.

      • The dict method creates a dictionary of a Camera object's attributes.

      "},{"location":"api_reference/#class-cameraproperties","title":"Class CameraProperties","text":"

      The CameraProperties class is used to set the desired camera properties for acquisition.

      class CameraProperties:\n    exposure_time_us: float\n    line_interval_us: float\n    binning: float\n    pixel_type: SampleType\n    readout_direction: Direction\n    offset: Tuple[int, int]\n    shape: Tuple[int, int]\n    input_triggers: InputTriggers\n    output_triggers: OutputTriggers\n\n    def __init__(self, *args: None, **kwargs: Any) -> None: ...\n    \"\"\"Initializes a CameraProperties object with optional arguments.\"\"\"\n\n    def dict(self) -> Dict[str, Any]: ...\n    \"\"\"Returns a dictionary of the CameraProperties attributes.\"\"\"\n
      • exposure_time_us: How long in microseconds your camera should collect light from the sample. However, for simulated cameras, this is just a waiting period before generating the next frame.

      • line_interval_us: The time to scan one line in microseconds in a rolling shutter camera.

      • binning: How many adjacent pixels in each direction to combine by averaging. For example, if binning is set to 2, a 2x2 square of pixels will be combined by averaging. If binning is set to 1, no pixels will be combined.

      • pixel_type: An instance of the SampleType class which specifies the numerical data type, for example u16, a 16-bit unsigned integer type.

      • readout_direction: An instance of the Direction class which specifies whether the data is readout forwards or backwards.

      • offset: A tuple of two integers representing the (x, y) offset in pixels of the image region of interest on the camera.

      • shape: A tuple of two integers representing the size in pixels of the image region of interest on the camera.

      • input_triggers: An instance of the InputTriggers class, which describes the trigger signals for starting acquisition, camera exposure, and acquiring a frame.

      • output_triggers: An instance of the OutputTriggers class, which describes the trigger signals for the camera exposure, acquiring a frame, as well as any wait times for sending the trigger signal.

      • The dict method create a dictionary of a CameraProperties object's attributes.

      "},{"location":"api_reference/#class-chunkingproperties","title":"Class ChunkingProperties","text":"

      The ChunkingProperties class represents properties related to data chunking for storage in a Zarr container.

      class ChunkingProperties:\n    max_bytes_per_chunk: int\n    tile: TileShape\n\n    def dict(self) -> Dict[str, Any]: ...\n    \"\"\"Returns a dictionary of the ChunkingProperties attributes.\"\"\"\n
      • max_bytes_per_chunk: The maximum number of bytes per data chunk.

      • tile: An instance of the TileShape class representing the shape of the data chunk tile.

      • The dict method creates a dictionary of a ChunkingProperties object's attributes.

      "},{"location":"api_reference/#class-deviceidentifier","title":"Class DeviceIdentifier","text":"

      The DeviceIdentifier class represents an identifier for a device, such as a camera or video source.

      class DeviceIdentifier:\n    id: Tuple[int, int]\n    kind: DeviceKind\n    name: str\n\n    def __init__(self, *args: None, **kwargs: Any) -> None: ...\n    \"\"\"Initializes a DeviceIdentifier object with optional arguments.\"\"\"\n\n    def dict(self) -> Dict[str, Any]: ...\n    \"\"\"Returns a dictionary of the DeviceIdentifier attributes.\"\"\"\n\n    @staticmethod\n    def none() -> DeviceIdentifier: ...\n    \"\"\"Returns a \"None\" type DeviceIdentifier. Useful when a DeviceIdentifier is not needed.\"\"\"\n\n    def __eq__(self, other: object) -> bool:\n        \"\"\"Checks if two DeviceIdentifier objects are equal.\"\"\"\n\n    def __ge__(self, other: object) -> bool:\n        \"\"\"Checks if this DeviceIdentifier is greater than or equal to another.\"\"\"\n\n    def __gt__(self, other: object) -> bool:\n        \"\"\"Checks if this DeviceIdentifier is greater than another.\"\"\"\n\n    def __le__(self, other: object) -> bool:\n        \"\"\"Checks if this DeviceIdentifier is less than or equal to another.\"\"\"\n\n    def __lt__(self, other: object) -> bool:\n        \"\"\"Checks if this DeviceIdentifier is less than another.\"\"\"\n\n    def __ne__(self, other: object) -> bool:\n        \"\"\"Checks if two DeviceIdentifier objects are not equal.\"\"\"\n
      • id: A tuple (driver_id, device_id) containing two U8 integers that serve to identify each driver and device uniquely for a given run.

      • kind: An instance of the DeviceKind class that represents the type or kind of the device.

      • name: A string representing the name or label of the device.

      • The dict method creates a dictionary of a DeviceIdentifier object's attributes.

      "},{"location":"api_reference/#class-devicekind","title":"Class DeviceKind","text":"

      The DeviceKind class represents properties for supported devices, such as a camera or video source, in a given system.

      class DeviceKind:\n    Camera: ClassVar[DeviceKind] = DeviceKind.Camera\n    NONE: ClassVar[DeviceKind] = DeviceKind.NONE\n    Signals: ClassVar[DeviceKind] = DeviceKind.Signals\n    StageAxis: ClassVar[DeviceKind] = DeviceKind.StageAxis\n    Storage: ClassVar[DeviceKind] = DeviceKind.Storage\n\n    def __init__(self, *args: None, **kwargs: Any) -> None:\n        \"\"\"Initializes the DeviceKind class.\"\"\"\n\n    def __eq__(self, other: object) -> bool:\n        \"\"\"Checks if two DeviceKind objects are equal.\"\"\"\n\n    def __ge__(self, other: object) -> bool:\n        \"\"\"Checks if this DeviceKind is greater than or equal to another.\"\"\"\n\n    def __gt__(self, other: object) -> bool:\n        \"\"\"Checks if this DeviceKind is greater than another.\"\"\"\n\n    def __int__(self) -> int:\n        \"\"\"Converts the DeviceKind to an integer.\"\"\"\n\n    def __le__(self, other: object) -> bool:\n        \"\"\"Checks if this DeviceKind is less than or equal to another.\"\"\"\n\n    def __lt__(self, other: object) -> bool:\n        \"\"\"Checks if this DeviceKind is less than another.\"\"\"\n\n    def __ne__(self, other: object) -> bool:\n        \"\"\"Checks if two DeviceKind objects are not equal.\"\"\"\n
      • Camera: Enum-type class variable of DeviceKind that defines the cameras supported by the system.

      • NONE: Enum-type class variable of DeviceKind that is set to None if no device of the specified kind is available.

      • Signals: Enum-type class variable of DeviceKind that defines the signals supported by the system.

      • StageAxis: Enum-type class variable of DeviceKind that defines the stage axes supported by the system.

      • Storage: Enum-type class variable of DeviceKind that defines the storage supported by the system.

      "},{"location":"api_reference/#class-devicemanager","title":"Class DeviceManager","text":"

      The DeviceManager class manages selection of available devices in the system.

      class DeviceManager:\n    def devices(self) -> List[DeviceIdentifier]:\n        \"\"\"Returns a list of all available device identifiers.\"\"\"\n\n    @overload\n    def select(self, kind: DeviceKind) -> Optional[DeviceIdentifier]:\n        \"\"\"Selects the first available device of `kind`.\n\n        Args:\n            kind (DeviceKind): The type of device to select.\n\n        Returns:\n            Optional[DeviceIdentifier]: The identifier of the first available device of `kind`, or `None` if no such device is available.\n        \"\"\"\n\n    @overload\n    def select(self, kind: DeviceKind, name: Optional[str]) -> Optional[DeviceIdentifier]:\n        \"\"\"Selects a specified device.\n\n        Args:\n            kind (DeviceKind): The type of device to select.\n            name (Optional[str]): The name of the device to select.\n\n        Returns:\n            Optional[DeviceIdentifier]: The selected device identifier, or None if the specified device is not available.\n        \"\"\"\n\n    def select_one_of(self, kind: DeviceKind, names: List[str]) -> Optional[DeviceIdentifier]:\n        \"\"\"Selects the first device in the list of devices that is of one of the specified kinds.\n\n        Args:\n            kind (DeviceKind): The type of device to select.\n            names (List[str]): A list of device names to choose from.\n\n        Returns:\n            Optional[DeviceIdentifier]: The selected device identifier, or None if none of the specified devices are available.\n        \"\"\"\n
      • Call devices to list all available devices.

      • Call select to choose a type of device for acquisition.

      • Call select_one_of to choose a particular instance in a category of devices for acquisition.

      "},{"location":"api_reference/#class-devicestate","title":"Class DeviceState","text":"

      The DeviceState class represents the acquisition status of a device.

      class DeviceState:\n    Closed: ClassVar[DeviceState] = DeviceState.Closed\n    AwaitingConfiguration: ClassVar[DeviceState] = DeviceState.AwaitingConfiguration\n    Armed: ClassVar[DeviceState] = DeviceState.Armed\n    Running: ClassVar[DeviceState] = DeviceState.Running\n\n    def __eq__(self, other: object) -> bool:\n        \"\"\"Checks if two DeviceState objects are equal.\"\"\"\n\n    def __ge__(self, other: object) -> bool:\n        \"\"\"Checks if this DeviceState is greater than or equal to another.\"\"\"\n\n    def __gt__(self, other: object) -> bool:\n        \"\"\"Checks if this DeviceState is greater than another.\"\"\"\n\n    def __int__(self) -> int:\n        \"\"\"Converts the DeviceState to an integer.\"\"\"\n\n    def __le__(self, other: object) -> bool:\n        \"\"\"Checks if this DeviceState is less than or equal to another.\"\"\"\n\n    def __lt__(self, other: object) -> bool:\n        \"\"\"Checks if this DeviceState is less than another.\"\"\"\n\n    def __ne__(self, other: object) -> bool:\n        \"\"\"Checks if two DeviceState objects are not equal.\"\"\"\n
      • Closed: Enum-type class variable of DeviceState that species when a device is not ready for configuration.

      • AwaitingConfiguration: Enum-type class variable of DeviceState that species when a device is ready for configuration.

      • Armed: Enum-type class variable of DeviceState that species when a device ready to stream data.

      • Running: Enum-type class variable of DeviceState that species when a device is streaming data.

      "},{"location":"api_reference/#class-direction","title":"Class Direction","text":"

      The Direction class represents the direction that data is read for streaming.

      class Direction:\n    Backward: ClassVar[Direction] = Direction.Backward\n    Forward: ClassVar[Direction] = Direction.Forward\n\n    def __eq__(self, other: object) -> bool:\n        \"\"\"Checks if two Direction objects are equal.\"\"\"\n\n    def __ge__(self, other: object) -> bool:\n        \"\"\"Checks if this Direction is greater than or equal to another.\"\"\"\n\n    def __gt__(self, other: object) -> bool:\n        \"\"\"Checks if this Direction is greater than another.\"\"\"\n\n    def __int__(self) -> int:\n        \"\"\"Converts the Direction to an integer.\"\"\"\n\n    def __le__(self, other: object) -> bool:\n        \"\"\"Checks if this Direction is less than or equal to another.\"\"\"\n\n    def __lt__(self, other: object) -> bool:\n        \"\"\"Checks if this Direction is less than another.\"\"\"\n\n    def __ne__(self, other: object) -> bool:\n        \"\"\"Checks if two Direction objects are not equal.\"\"\"\n
      • Backward: Enum-type class variable of Direction that species when data is streamed backward.

      • Forward: Enum-type class variable of Direction that species when data is streamed forward.

      "},{"location":"api_reference/#class-inputtriggers","title":"Class InputTriggers","text":"

      The InputTriggers class represents input triggers for a device.

      class InputTriggers:\n    acquisition_start: Trigger\n    exposure: Trigger\n    frame_start: Trigger\n\n    def dict(self) -> Dict[str, Any]: ...\n    \"\"\"Returns a dictionary of the InputTriggers attributes.\"\"\"\n
      • acquisition_start: An instance of the Trigger class representing the trigger for starting acquisition.

      • exposure: An instance of the Trigger class representing the trigger for exposure.

      • frame_start: An instance of the Trigger class representing the trigger for starting a frame.

      • The dict method creates a dictionary of a InputTriggers object's attributes.

      "},{"location":"api_reference/#class-outputtriggers","title":"Class OutputTriggers","text":"

      The OutputTriggers class represents output triggers for a device.

      class OutputTriggers:\n    exposure: Trigger\n    frame_start: Trigger\n    trigger_wait: Trigger\n\n    def dict(self) -> Dict[str, Any]: ...\n    \"\"\"Returns a dictionary of the OutputTriggers attributes.\"\"\"\n
      • exposure: An instance of the Trigger class representing the trigger for exposure.

      • frame_start: An instance of the Trigger class representing the trigger for starting a frame.

      • trigger_wait: An instance of the Trigger class representing the trigger for waiting before continuing acquisition.

      • The dict method creates a dictionary of a OutputTriggers object's attributes.

      "},{"location":"api_reference/#class-pid","title":"Class PID","text":"

      The PID class represents proportional-integral-derivative (PID) values.

      class PID:\n    derivative: float\n    integral: float\n    proportional: float\n\n    def __init__(self, *args: None, **kwargs: Any) -> None: ...\n    \"\"\"Initializes a PID object with optional arguments.\"\"\"\n\n    def dict(self) -> Dict[str, Any]: ...\n    \"\"\"Returns a dictionary of the PID attributes.\"\"\"\n
      • derivative: The derivative value for the PID.

      • integral: The integral value for the PID.

      • proportional: The proportional value for the PID.

      • The dict method creates a dictionary of a PID object's attributes.

      "},{"location":"api_reference/#class-properties","title":"Class Properties","text":"

      The Properties class represents properties related to video streams.

      class Properties:\n    video: Tuple[VideoStream, VideoStream]\n\n    def __init__(self, *args: None, **kwargs: Any) -> None: ..\n    \"\"\"Initializes a Properties object with optional arguments.\"\"\".\n\n    def dict(self) -> Dict[str, Any]: ...\n    \"\"\"Returns a dictionary of the Properties attributes.\"\"\"\n
      • video: A tuple containing two VideoStream instances which contain information on the camera was used for acquisition and how the data is stored.

      • The dict method creates a dictionary of a Properties object's attributes.

      "},{"location":"api_reference/#class-runtime","title":"Class Runtime","text":"

      The Runtime class coordinates the devices with the storage disc including selecting the devices, setting their properties, and starting and stopping acqusition.

      class Runtime:\n    def __init__(self, *args: None, **kwargs: Any) -> None:\n        \"\"\"Initializes the Runtime object with optional arguments.\"\"\"\n\n    def device_manager(self) -> DeviceManager:\n        \"\"\"Returns the DeviceManager instance associated with this Runtime.\"\"\"\n\n    def get_available_data(self, stream_id: int) -> AvailableData:\n        \"\"\"Returns the AvailableData instance for the given stream ID.\n\n        Args:\n            stream_id (int): The ID of the stream for which available data is requested.\n\n        Returns:\n            AvailableData: The AvailableData instance for the given stream ID.\n        \"\"\"\n\n    def get_configuration(self) -> Properties:\n        \"\"\"Returns the current configuration properties of the runtime.\"\"\"\n\n    def get_state(self) -> DeviceState:\n        \"\"\"Returns the current state of the device.\"\"\"\n\n    def set_configuration(self, properties: Properties) -> Properties:\n        \"\"\"Applies the provided configuration properties to the runtime.\n\n        Args:\n            properties (Properties): The properties to be set.\n\n        Returns:\n            Properties: The updated configuration properties.\n        \"\"\"\n\n    def start(self) -> None:\n        \"\"\"Starts the runtime, allowing it to collect data.\"\"\"\n\n    def stop(self) -> None:\n        \"\"\"Stops the runtime, ending data collection after the max number of frames is collected.\"\"\"\n\n    def abort(self) -> None:\n        \"\"\"Aborts the runtime, terminating it immediately.\"\"\"\n
      • Call device_manager() to return the DeviceManager object associated with this Runtime instance.

      • Call get_available_data with a specific stream_id to return the AvailableData associated with the stream_id.

      • Call get_configuration() to return the Properties object associated with this Runtime instance.

      • Call get_state() to return the DeviceState object associated with this Runtime instance.

      • Call set_configuration with a Properties object to change the properties of this Runtime instance.

      • Call start() to begin data acquisition.

      • Call stop() to end data acquisition once the max number of frames specified in config.video[0].max_frame_count is collected.

      • Call abort() to Runtime instance.

      "},{"location":"api_reference/#class-sampleratehz","title":"Class SampleRateHz","text":"

      The SampleRateHz class represents the sampling rate in hertz.

      class SampleRateHz:\n    numerator: int\n    denominator: int\n\n    def __init__(self, *args: None, **kwargs: Any) -> None: ...\n    \"\"\"Initializes a SampleRateHz object with optional arguments.\"\"\"\n\n    def dict(self) -> Dict[str, Any]: ...\n    \"\"\"Returns a dictionary of the SampleRateHz attributes.\"\"\"\n
      • numerator: The numerator part of the sampling rate fraction.

      • denominator: The denominator part of the sampling rate fraction.

      • The dict method creates a dictionary of a SampleRateHz object's attributes.

      "},{"location":"api_reference/#class-sampletype","title":"Class SampleType","text":"

      The SampleType class defines the type of the values in the streamed data.

      class SampleType:\n    F32: ClassVar[SampleType] = SampleType.F32\n    I16: ClassVar[SampleType] = SampleType.I16\n    I8: ClassVar[SampleType] = SampleType.I8\n    U16: ClassVar[SampleType] = SampleType.U16\n    U8: ClassVar[SampleType] = SampleType.U8\n    U10: ClassVar[SampleType] = SampleType.U10\n    U12: ClassVar[SampleType] = SampleType.U12\n    U14: ClassVar[SampleType] = SampleType.U14\n\n    def __eq__(self, other: object) -> bool:\n        \"\"\"Checks if two SampleType objects are equal.\"\"\"\n\n    def __ge__(self, other: object) -> bool:\n        \"\"\"Checks if this SampleType is greater than or equal to another.\"\"\"\n\n    def __gt__(self, other: object) -> bool:\n        \"\"\"Checks if this SampleType is greater than another.\"\"\"\n\n    def __int__(self) -> int:\n        \"\"\"Converts the SampleType to an integer.\"\"\"\n\n    def __le__(self, other: object) -> bool:\n        \"\"\"Checks if this SampleType is less than or equal to another.\"\"\"\n\n    def __lt__(self, other: object) -> bool:\n        \"\"\"Checks if this SampleType is less than another.\"\"\"\n\n    def __ne__(self, other: object) -> bool:\n        \"\"\"Checks if two SampleType objects are not equal.\"\"\"\n
      • F32: Enum-type class variable of SampleType that specifies values of 32-bit floating point type.

      • I16: Enum-type class variable of SampleType that specifies values of 16-bit signed integer type.

      • I8: Enum-type class variable of SampleType that specifies values of 8-bit signed integer type.

      • U16: Enum-type class variable of SampleType that specifies values of 16-bit unsigned integer type.

      • U8: Enum-type class variable of SampleType that specifies values of 8-bit unsigned integer type.

      • U10: Enum-type class variable of SampleType that specifies values of 10-bit unsigned integer type.

      • U12: Enum-type class variable of SampleType that specifies values of 12-bit unsigned integer type.

      • U14: Enum-type class variable of SampleType that specifies values of 14-bit unsigned integer type.

      "},{"location":"api_reference/#class-signaliokind","title":"Class SignalIOKind","text":"

      The SignalIOKind class defines the type of input and output signals.

      class SignalIOKind:\n    Input: ClassVar[SignalIOKind] = SignalIOKind.Input\n    Output: ClassVar[SignalIOKind] = SignalIOKind.Output\n\n    def __eq__(self, other: object) -> bool:\n        \"\"\"Checks if two SignalIOKind objects are equal.\"\"\"\n\n    def __ge__(self, other: object) -> bool:\n        \"\"\"Checks if this SignalIOKind is greater than or equal to another.\"\"\"\n\n    def __gt__(self, other: object) -> bool:\n        \"\"\"Checks if this SignalIOKind is greater than another.\"\"\"\n\n    def __int__(self) -> int:\n        \"\"\"Converts the SignalIOKind to an integer.\"\"\"\n\n    def __le__(self, other: object) -> bool:\n        \"\"\"Checks if this SignalIOKind is less than or equal to another.\"\"\"\n\n    def __lt__(self, other: object) -> bool:\n        \"\"\"Checks if this SignalIOKind is less than another.\"\"\"\n\n    def __ne__(self, other: object) -> bool:\n        \"\"\"Checks if two SignalIOKind objects are not equal.\"\"\"\n
      • Input: Enum-type class variable of SignalIOKind that specifies signal coming in to the device.

      • Output: Enum-type class variable of SignalIOKind that specifies signal sent out of the device.

      "},{"location":"api_reference/#class-signaltype","title":"Class SignalType","text":"

      The SignalType class specifies whether a signal is analog or digital.

      class SignalType:\n    Analog: ClassVar[SignalType] = SignalType.Analog\n    Digital: ClassVar[SignalType] = SignalType.Digital\n\n    def __eq__(self, other: object) -> bool:\n        \"\"\"Checks if two SignalType objects are equal.\"\"\"\n\n    def __ge__(self, other: object) -> bool:\n        \"\"\"Checks if this SignalType is greater than or equal to another.\"\"\"\n\n    def __gt__(self, other: object) -> bool:\n        \"\"\"Checks if this SignalType is greater than another.\"\"\"\n\n    def __int__(self) -> int:\n        \"\"\"Converts the SignalType to an integer.\"\"\"\n\n   def __le__(self, other: object) -> bool:\n        \"\"\"Checks if this SignalType is less than or equal to another.\"\"\"\n\n    def __lt__(self, other: object) -> bool:\n        \"\"\"Checks if this SignalType is less than another.\"\"\"\n\n    def __ne__(self, other: object) -> bool:\n        \"\"\"Checks if two SignalType objects are not equal.\"\"\"\n
      • Analog: Enum-type class variable of SignalType that specifies a signal is analog.

      • Input: Enum-type class variable of SignalType that specifies signal coming in to the device.

      "},{"location":"api_reference/#class-storage","title":"Class Storage","text":"

      The Storage class represents storage settings for the acquired data.

      class Storage:\n    identifier: Optional[DeviceIdentifier]\n    settings: StorageProperties\n\n    def dict(self) -> Dict[str, Any]: ...\n    \"\"\"Returns a dictionary of the Storage attributes.\"\"\"\n
      • identifier: An optional attribute which contains an instance of the DeviceIdentifier class that describes the storage device if that device is natively supported. Otherwise, it is of type None.

      • settings: An instance of the StorageProperties class which contains the settings for the data storage.

      • The dict method creates a dictionary of a Storage object's attributes.

      "},{"location":"api_reference/#class-storageproperties","title":"Class StorageProperties","text":"

      The StorageProperties class represents properties for data storage.

      class StorageProperties:\n    external_metadata_json: Optional[str]\n    filename: Optional[str]\n    first_frame_id: int\n    pixel_scale_um: Tuple[float, float]\n    chunking: ChunkingProperties\n    enable_multiscale: bool\n\n    def dict(self) -> Dict[str, Any]: ...\n    \"\"\"Returns a dictionary of the StorageProperties attributes.\"\"\"\n
      • external_metadata_json: An optional attribute representing external metadata in JSON format.

      • filename: An optional attribute representing the filename.

      • first_frame_id: An integer representing the ID of the first frame.

      • pixel_scale_um: A tuple of two floats representing pixel size in micrometers.

      • chunking: An instance of the ChunkingProperties class representing data chunking settings.

      • enable_multiscale: A boolean indicating whether multiscale storage is desired.

      • The dict method creates a dictionary of a StorageProperties object's attributes.

      "},{"location":"api_reference/#class-tileshape","title":"Class TileShape","text":"

      The TileShape class represents the tile shape, or voxel size, for tile scanning acquisition.

      class TileShape:\n    width: int\n    height: int\n    planes: int\n\n    def dict(self) -> Dict[str, Any]: ...\n    \"\"\"Returns a dictionary of the TileShape attributes.\"\"\"\n
      • width: The width of the tile.

      • height: The height of the tile.

      • planes: The number of planes in the tile.

      • The dict method creates a dictionary of a TileShape object's attributes.

      "},{"location":"api_reference/#class-trigger","title":"Class Trigger","text":"

      The Trigger class represents a trigger signal.

      class Trigger:\n    edge: TriggerEdge\n    enable: bool\n    line: int\n    kind: SignalIOKind\n\n    def __init__(self, *args: None, **kwargs: Any) -> None: ...\n    \"\"\"Initializes a Trigger object with optional arguments.\"\"\"\n\n    def dict(self) -> Dict[str, Any]: ...\n    \"\"\"Returns a dictionary of the Trigger attributes.\"\"\"\n
      • edge: An instance of the TriggerEdge class specifying if the trigger is on the rising or falling edge trigger signal.

      • enable: A boolean indicating whether the trigger is enabled.

      • line: An integer representing the line of the trigger signal.

      • kind: An instance of the SignalIOKind class specifying if the signal is input or output.

      • The dict method creates a dictionary of a Trigger object's attributes.

      "},{"location":"api_reference/#class-triggeredge","title":"Class TriggerEdge","text":"

      The TriggerEdge class represents what edge of the trigger function initiates the trigger.

      class TriggerEdge:\n    Falling: ClassVar[TriggerEdge] = TriggerEdge.Falling\n    NotApplicable: ClassVar[TriggerEdge] = TriggerEdge.NotApplicable\n    Rising: ClassVar[TriggerEdge] = TriggerEdge.Rising\n\n    def __eq__(self, other: object) -> bool:\n        \"\"\"Checks if two TriggerEdge objects are equal.\"\"\"\n\n    def __ge__(self, other: object) -> bool:\n        \"\"\"Checks if this TriggerEdge is greater than or equal to another.\"\"\"\n\n    def __gt__(self, other: object) -> bool:\n        \"\"\"Checks if this TriggerEdge is greater than another.\"\"\"\n\n    def __int__(self) -> int:\n        \"\"\"Converts the TriggerEdge to an integer.\"\"\"\n\n    def __le__(self, other: object) -> bool:\n        \"\"\"Checks if this TriggerEdge is less than or equal to another.\"\"\"\n\n    def __lt__(self, other: object) -> bool:\n        \"\"\"Checks if this TriggerEdge is less than another.\"\"\"\n\n    def __ne__(self, other: object) -> bool:\n        \"\"\"Checks if two TriggerEdge objects are not equal.\"\"\"\n
      • Falling: Enum-type class variable of TriggerEdge that defines the falling edge of the trigger.

      • NotApplicable: Enum-type class variable of TriggerEdge that defines if a trigger does not have a rising or falling edge.

      • Rising: Enum-type class variable of TriggerEdge that defines the rising edge of the trigger.

      "},{"location":"api_reference/#class-videoframe","title":"Class VideoFrame","text":"

      The VideoFrame class represents data from acquisition of a frame.

      class VideoFrame:\n    def data(self) -> NDArray[Any]:\n        \"\"\"Returns the data of the video frame as an NDArray.\"\"\"\n\n    def metadata(self) -> VideoFrameMetadata:\n        \"\"\"Returns the metadata associated with the video frame.\"\"\"\n
      • Call data() to create an NDArray of the VideoFrame data.

      • Call metadata() to query the metadata of VideoFrame.

      "},{"location":"api_reference/#class-videoframemetadata","title":"Class VideoFrameMetadata","text":"

      The VideoFrameMetadata class represents metadata related to a video frame.

      class VideoFrameMetadata:\n    frame_id: int\n    timestamps: VideoFrameTimestamps\n\n    def dict(self) -> Dict[str, Any]: ...\n    \"\"\"Returns a dictionary of the VideoFrameMetadata attributes.\"\"\"\n
      • frame_id: An integer representing the ID of the video frame.

      • timestamps: An instance of the VideoFrameTimestamps class specifying whether the video timestamps are based on the hardware clock or the acquisition clock.

      • The dict method creates a dictionary of a VideoFrameTimestamps object's attributes.

      "},{"location":"api_reference/#class-videoframetimestamps","title":"Class VideoFrameTimestamps","text":"

      The VideoFrameTimestamps class represents timestamps related to a video frame.

      class VideoFrameTimestamps:\n    hardware: int\n    acq_thread: int\n\n    def dict(self) -> Dict[str, Any]: ...\n    \"\"\"Returns a dictionary of the VideoFrameTimestamps attributes.\"\"\"\n
      • hardware: An integer representing hardware timestamps.

      • acq_thread: An integer representing timestamps from the acquisition thread.

      • The dict method creates a dictionary of a VideoFrameTimestamps object's attributes.

      "},{"location":"api_reference/#class-videostream","title":"Class VideoStream","text":"

      The VideoStream class represents a video stream.

      class VideoStream:\n    camera: Camera\n    storage: Storage\n    max_frame_count: int\n    frame_average_count: int\n\n    def dict(self) -> Dict[str, Any]: ...\n    \"\"\"Returns a dictionary of the VideoStream attributes.\"\"\"\n
      • camera: An instance of the Camera class representing the camera used in the video stream.

      • storage: An instance of the Storage class representing the storage settings for the video stream.

      • max_frame_count: An integer representing the maximum number of frames in the video stream.

      • frame_average_count: An integer representing the number of frames to average in the video stream.

      • The dict method creates a dictionary of a VideoStream object's attributes.

      "},{"location":"api_reference/#class-voltagerange","title":"Class VoltageRange","text":"

      The VoltageRange class represents a range of voltage values.

      class VoltageRange:\n    mn: float\n    mx: float\n\n    @overload\n    def __init__(self) -> None: ...\n    \"\"\"Initializes a VoltageRange object\"\"\"\n\n    @overload\n    def __init__(self, mn: float, mx: float) -> None: ...\n    \"\"\"Initializes a VoltageObject object with mn and mx provided.\"\"\"\n\n    def dict(self) -> Dict[str, float]: ...\n    \"\"\"Returns a dictionary of the VoltageRange attributes.\"\"\"\n
      • mn: A float representing the minimum voltage value.

      • mx: A float representing the maximum voltage value.

      • The dict method creates a dictionary of a VoltageRange object's attributes.

      "},{"location":"get_started/","title":"Getting Started with Acquire","text":"

      Acquire (acquire-imaging on PyPI) is a Python package providing a multi-camera video streaming library focused on performant microscopy, with support for up to two simultaneous, independent, video streams.

      This tutorial covers Acquire installation and shows an example of using Acquire with its provided simulated cameras to demonstrate the acquisition process.

      "},{"location":"get_started/#installation","title":"Installation","text":"

      To install Acquire on Windows, macOS, or Ubuntu, simply run the following command:

      python -m pip install acquire-imaging\n

      You will probably want to have a fresh conda environment or virtualenv. For example, with conda:

      conda create -n acquire python=3.10 # follow the prompts and proceed with the defaults\nconda activate acquire\npython -m pip install acquire-imaging\n

      or with virtualenv:

      $ python -m venv venv\n$ . ./venv/bin/activate # or on Windows: .\\venv\\Scripts\\Activate.bat or .\\venv\\Scripts\\Activate.ps1\n(venv) $ python -m pip install acquire-imaging\n

      Once you have Acquire installed, simply call import acquire in your script, notebook, or module to start utilizing the package.

      import acquire\n
      "},{"location":"get_started/#supported-cameras-and-file-formats","title":"Supported Cameras and File Formats","text":"

      Acquire supports the following cameras (currently only on Windows):

      • Hamamatsu Orca Fusion BT (C15440-20UP)
      • Vieworks VC-151MX-M6H00
      • FLIR Blackfly USB3 (BFLY-U3-23S6M-C)
      • FLIR Oryx 10GigE (ORX-10GS-51S5M-C)

      Acquire also supports the following output file formats:

      • Tiff
      • Zarr

      For testing and demonstration purposes, Acquire provides a few simulated cameras, as well as raw and trash output devices. To see all the devices that Acquire supports, you can run the following script:

      import acquire\n\nfor device in acquire.Runtime().device_manager().devices():\n    print(device)\n
      "},{"location":"get_started/#tutorial-prerequisites","title":"Tutorial Prerequisites","text":"

      We will be writing to and reading from the Zarr format, using the Dask library to load and inspect the data, and visualizing the data using napari.

      You can install these prerequisites with:

      python -m pip install dask \"napari[all]\" zarr\n
      "},{"location":"get_started/#setup-for-acquisition","title":"Setup for Acquisition","text":"

      We will use one of Acquire's simulated cameras to generate data for us and use Zarr for our output file format.

      Let's set up our runtime and device manager, then list the currently supported devices.

      import acquire\n\nruntime = acquire.Runtime()\ndm = runtime.device_manager()\n\nfor device in dm.devices():\n    print(device)\n
      The runtime is the main entry point in Acquire. Through the runtime, you configure your devices, start acquisition, check acquisition status, inspect data as it streams from your cameras, and terminate acquisition.

      Let's configure our devices now. To do this, we'll get a copy of the current runtime configuration. We can update the configuration with identifiers from the the runtime's device manager, but we won't actually instantiate these devices until we start acquiring.

      Acquire supports up to two video streams. These streams consist of a source (i.e., a camera), optionally a filter, and a sink (an output, like a Zarr dataset or a Tiff file). Before configuring the streams, grab the current configuration of the Runtime object with:

      config = runtime.get_configuration()\n

      Video streams are configured independently. Configure the first video stream by setting properties on config.video[0] and the second video stream with config.video[1]. We'll be using simulated cameras, one generating a radial sine pattern and one generating a random pattern.

      config.video[0].camera.identifier = dm.select(acquire.DeviceKind.Camera, \"simulated: radial sin\")\n\n# how many adjacent pixels in each direction to combine by averaging; here, 1 means not to combine\nconfig.video[0].camera.settings.binning = 1\n\n# how long (in microseconds) your camera should collect light from the sample; for simulated cameras,\n# this is just a waiting period before generating the next frame\nconfig.video[0].camera.settings.exposure_time_us = 5e4  # 50 ms\n\n# the data type representing each pixel; here we choose unsigned 8-bit integer\nconfig.video[0].camera.settings.pixel_type = acquire.SampleType.U8\n\n# the shape, in pixels, of the image; width first, then height\nconfig.video[0].camera.settings.shape = (1024, 768)\n
      config.video[1].camera.identifier = dm.select(acquire.DeviceKind.Camera, \"simulated: uniform random\")\n\n# how many adjacent pixels in each direction to combine by averaging; here, 1 means not to combine\nconfig.video[1].camera.settings.binning = 1\n\n# how long (in microseconds) your camera should collect light from the sample; for simulated cameras,\n# this is just a waiting period before generating the next frame\nconfig.video[1].camera.settings.exposure_time_us = 1e4  # 10 ms\n\n# the data type representing each pixel; here we choose unsigned 8-bit integer\nconfig.video[1].camera.settings.pixel_type = acquire.SampleType.U8\n\n# the shape, in pixels, of the image; width first, then height\nconfig.video[1].camera.settings.shape = (1280, 720)\n

      Now we'll configure each output, or sink device. For both simulated cameras, we'll be writing to Zarr, a format which supports chunked arrays.

      config.video[0].storage.identifier = dm.select(acquire.DeviceKind.Storage, \"Zarr\")\n\n# what file or directory to write the data to\nconfig.video[0].storage.settings.filename = \"output1.zarr\"\n\n# where applicable, how large should a chunk file get before opening the next chunk file\nconfig.video[0].storage.settings.chunking.max_bytes_per_chunk = 32 * 2**20  # 32 MiB chunk sizes\n
      config.video[1].storage.identifier = dm.select(acquire.DeviceKind.Storage, \"Zarr\")\n\n# what file or directory to write the data to\nconfig.video[1].storage.settings.filename = \"output2.zarr\"\n\n# where applicable, how large should a chunk file get before opening the next chunk file\nconfig.video[1].storage.settings.chunking.max_bytes_per_chunk = 64 * 2**20  # 64 MiB chunk sizes\n

      Finally, let's specify how many frames to generate for each camera before stopping our simulated acquisition. We also need to register our configuration with the runtime.

      If you want to let the runtime just keep acquiring effectively forever, you can set max_frame_count to 2**64 - 1.

      config.video[0].max_frame_count = 100 # collect 100 frames\nconfig.video[1].max_frame_count = 150 # collect 150 frames\n\nconfig = runtime.set_configuration(config)\n

      Note

      If you run this tutorial multiple times, you can clear output from previous runs with:

      import os\nimport shutil\n\nif config.video[0].storage.settings.filename in os.listdir(\".\"):\n    shutil.rmtree(config.video[0].storage.settings.filename)\n\nif config.video[1].storage.settings.filename in os.listdir(\".\"):\n    shutil.rmtree(config.video[1].storage.settings.filename)\n
      "},{"location":"get_started/#acquire-data","title":"Acquire Data","text":"

      To start aquiring data:

      runtime.start()\n

      Acquisition happens in a separate thread, so at any point we can check on the status by calling runtime.get_state().

      runtime.get_state()\n

      Finally, once we're done acquiring, we call runtime.stop(). This method will wait until you've reached the number of frames specified in config.video[0].max_frame_count or config.video[1].max_frame_count, whichever is larger.

      runtime.stop()\n
      "},{"location":"get_started/#visualizing-the-data-with-napari","title":"Visualizing the Data with napari","text":"

      Let's take a look at what we've written. We'll load each Zarr dataset as a Dask array and inspect its dimensions, then we'll use napari to view it.

      import dask.array as da\nimport napari\n
      data1 = da.from_zarr(config.video[0].storage.settings.filename, component=\"0\")\ndata1\n
      data2 = da.from_zarr(config.video[1].storage.settings.filename, component=\"0\")\ndata2\n
      viewer1 = napari.view_image(data1)\n
      viewer2 = napari.view_image(data2)\n
      "},{"location":"get_started/#conclusion","title":"Conclusion","text":"

      For more examples of using Acquire, check out our tutorials page. #ADD LINK

      "},{"location":"for_contributors/","title":"For contributors","text":"

      Documentation for those looking to contribute to the Acquire project.

      GitHub repositories: https://github.com/acquire-project

      "},{"location":"for_contributors/docs_contribution_quickstart/","title":"Quick Start Acquire Docs Contribution Guide","text":"
      1. Make sure you have a fresh environment with the latest mkdocs and mkdocs-material installed. You can install them with pip install -r requirements.txt from the root of the repository.
      2. Your pages should be written as markdown files, using the basic markdown syntax or following the mkdocs or material for mkdocs syntax.
      3. Pages can be added to the top level menu or submenus by editing the mkdocs.yml file. The order of the pages in the menu is determined by the order of the pages in the mkdocs.yml file. Subpages can be added by creating subfolders in the docs/ folder (see, for example, the docs/tutorials/ folder).
      4. To add images, place them in the docs/images/ folder and reference them in your markdown files using the relative path ../images/your_image.png.
      5. Custom CSS configuration goes into the docs/stylesheets/custom.css file.
      6. To build the website locally, after activating your environment (either using conda activate <your-environment> or source activate <your-env>, for example), run mkdocs serve to start a local server. You can then view the website at the URL indicated on your console.
      "},{"location":"tutorials/","title":"Tutorials","text":"

      These tutorials will help you explore the main use cases of Acquire and show examples of using the API. Please submit an issue on Github if you'd like to request a tutorial, or if you are also interested in contributing to a tutorial to this documentation please visit our contribution guide

      "},{"location":"tutorials/chunked/","title":"Chunking Data for Zarr Storage","text":"

      This tutorial will provide an example of writing chunked data to a Zarr storage device.

      Zarr has additional capabilities relative to the basic storage devices, namely chunking, compression, and multiscale storage. To enable chunking, set the attributes in an instance of the ChunkingProperties class. You can learn more about the Zarr capabilities in Acquire here.

      "},{"location":"tutorials/chunked/#configure-runtime","title":"Configure Runtime","text":"

      To start, we'll create a Runtime object and configure the streaming process, selecting Zarr as the storage device to enable chunking the data.

      import acquire\n\n# Initialize a Runtime object\nruntime = acquire.Runtime()\n\n# Initialize the device manager\ndm = runtime.device_manager()\n\n# Grab the current configuration\nconfig = runtime.get_configuration() \n\n# Select the radial sine simulated camera as the video source\nconfig.video[0].camera.identifier = dm.select(acquire.DeviceKind.Camera, \"simulated: radial sin\") \n\n# Set the storage to Zarr to take advantage of chunking\nconfig.video[0].storage.identifier = dm.select(acquire.DeviceKind.Storage, \"Zarr\")\n\n# Set the time for collecting data for a each frame\nconfig.video[0].camera.settings.exposure_time_us = 5e4  # 50 ms\n\n# size of image region of interest on the camera (x, y)\nconfig.video[0].camera.settings.shape = (1920, 1080)\n\n# specify the pixel datatype as a uint8\nconfig.video[0].camera.settings.pixel_type = acquire.SampleType.U8\n\n# Set the max frame count\nconfig.video[0].max_frame_count = 10 # collect 10 frames\n\n# Set the output file to out.zarr\nconfig.video[0].storage.settings.filename = \"out.zarr\"\n
      Below we'll configure the chunking specific settings.

      # Chunk size may need to be optimized for each acquisition. \n# See Zarr documentation for further guiddance: https://zarr.readthedocs.io/en/stable/tutorial.html#chunk-optimizations\nconfig.video[0].storage.settings.chunking.max_bytes_per_chunk = 32 * 2**20 # 32 MB\n\n# x, y dimensions of each chunk to 1/2 of the width and height of the image, generating 4 chunks\nconfig.video[0].storage.settings.chunking.tile.width = 1920 // 2\nconfig.video[0].storage.settings.chunking.tile.height = 1080 // 2\n\n# Update the configuration with the chosen parameters\nconfig = runtime.set_configuration(config)\n
      "},{"location":"tutorials/chunked/#collect-and-inspect-the-data","title":"Collect and Inspect the Data","text":"
      # collect data\nruntime.start()\nruntime.stop()\n

      You can inspect the Zarr file directory to check that the data saved as expected. Alternatively, you can inspect the data programmatically with:

      # Utilize the zarr library to open the data\nimport zarr\n\n# create a zarr Group object\ngroup = zarr.open(config.video[0].storage.settings.filename)\n\n# check how many directories are in the zarr container\nassert len(group) == 1\n\n# inspect the characteristics of the data\ngroup[\"0\"]\n

      The output will be:

      <zarr.core.Array '/0' (10, 1, 1080, 1920) uint8>\n
      As expected, we have only 1 top level directory, corresponding to the single array in the group (we would expect more than 1 array only if we were writing multiscale data). The overall array shape is (10, 1, 1080, 1920), corresponding to 10 frames, 1 channel, and a height and width of 1080 and 1920, respectively, per frame.

      "},{"location":"tutorials/compressed/","title":"Writing to Compressed Zarr Files","text":"

      This tutorial will provide an example of writing compressed data to a Zarr file.

      Acquire supports streaming compressed data to the ZarrBlosc1* storage devices. Compression is done via Blosc. Supported codecs are lz4 and zstd, available with ZarrBlosc1Lz4ByteShuffle and ZarrBlosc1ZstdByteShuffle devices, respectively. For a comparison of these codecs, please refer to the Blosc docs. You can learn more about the Zarr capabilities in Acquire here.

      "},{"location":"tutorials/compressed/#configure-runtime","title":"Configure Runtime","text":"

      To start, we'll create a Runtime object and configure the streaming process, selecting ZarrBlosc1ZstdByteShuffle as the storage device to enable compressing the data.

      import acquire\n\n# Initialize a Runtime object\nruntime = acquire.Runtime()\n\n# Initialize the device manager\ndm = runtime.device_manager()\n\n# Grab the current configuration\nconfig = runtime.get_configuration() \n\n# Select the radial sine simulated camera as the video source\nconfig.video[0].camera.identifier = dm.select(acquire.DeviceKind.Camera, \"simulated: radial sin\") \n\n# Set the storage to ZarrBlosc1ZstdByteShuffle to avoid saving the data\nconfig.video[0].storage.identifier = dm.select(acquire.DeviceKind.Storage, \"ZarrBlosc1ZstdByteShuffle\")\n\n# Set the time for collecting data for a each frame\nconfig.video[0].camera.settings.exposure_time_us = 5e4  # 50 ms\n\nconfig.video[0].camera.settings.shape = (1024, 768)\n\n# Set the max frame count\nconfig.video[0].max_frame_count = 100 # collect 100 frames\n\n# Set the output file to out.zarr\nconfig.video[0].storage.settings.filename = \"out.zarr\"\n\n# Update the configuration with the chosen parameters \nconfig = runtime.set_configuration(config) \n
      "},{"location":"tutorials/compressed/#inspect-acquired-data","title":"Inspect Acquired Data","text":"

      Now that the configuration is set to utilize the ZarrBlosc1ZstdByteShuffle storage device, we can acquire data, which will be compressed before it is stored to out.zarr. Since we did not specify the size of chunks, the data will be saved as a single chunk that is the size of the image data. You may specify chunk sizes using the TileShape class. For example, using acquire.StorageProperties.chunking.tile.width to set the width of the chunks.

      # acquire data\nruntime.start()\nruntime.stop()\n
      We'll use the Zarr Python package to read the data in out.zarr file.
      # We'll utilize the Zarr python package to read the data\nimport zarr\n\n# load from Zarr\ncompressed = zarr.open(config.video[0].storage.settings.filename)\n

      We'll print some of the data properties to illustrate how the data was compressed. Since we have not enabled multiscale output, out.zarr will only have one top level array\"0\".

      # All of the data is stored in the \"0\" directory since the data was stored as a single chunk.\ndata = compressed[\"0\"]\n\nprint(data.compressor.cname)\nprint(data.compressor.clevel)\nprint(data.compressor.shuffle)\n

      Output:

      zstd\n1\n1\n
      As expected, the data was compressed using the zstd codec.

      "},{"location":"tutorials/configure/","title":"Configure an Acquisition","text":"

      This tutorial will provide an in-depth explanation of setting configuration properites and demonstrate the relationships between various Acquire classes, such as CameraProperties and StorageProperties, used in the configuration process. In this example, we'll only configure one video source.

      "},{"location":"tutorials/configure/#initialize-runtime","title":"Initialize Runtime","text":"

      Runtime is the main entry point in Acquire. Through the runtime, you configure your devices, start acquisition, check acquisition status, inspect data as it streams from your cameras, and terminate acquisition. The device_manager method in Runtime creates an instance of the DeviceManager class. The get_configuration method in Runtime creates an instance of the Properties class. To configure the acquisition, we'll use those two methods to grab the configuration and to initialize a DeviceManager to set the attributes of Properties and related classes.

      import acquire\n\n# Initialize a Runtime object\nruntime = acquire.Runtime()\n\n# Initialize the device manager\ndm = runtime.device_manager()\n\n# Grab the current configuration\nconfig = runtime.get_configuration()\n
      "},{"location":"tutorials/configure/#utilize-devicemanager","title":"Utilize DeviceManager","text":"

      DeviceManager contains a devices method which creates a list of DeviceIdentifier objects each representing a discovered camera or storage device. Each DeviceIdentifier has an attribute kind that is a DeviceKind object, which has attributes specifying whether the device is a camera or storage device, as well as Signals and StageAxes attributes. The Signals and StageAxes attributes would apply to device kinds such as stages, which are not yet supported by Acquire.

      DeviceManager has 2 methods for selecting devices for the camera and storage. For more information on these methods, check out the Device Selection tutorial. We'll use the select method in this example to choose a specific device.

      # Select the radial sine simulated camera as the video source\nconfig.video[0].camera.identifier = dm.select(acquire.DeviceKind.Camera, \"simulated: radial sin\") \n\n# Set the storage to Tiff\nconfig.video[0].storage.identifier = dm.select(acquire.DeviceKind.Storage, \"Tiff\")\n
      "},{"location":"tutorials/configure/#properties-class-explanation","title":"Properties Class Explanation","text":"

      Using Runtime's get_configuration method we created config, an instance of the Properties class. Properties contains only one attribute video which is a tuple of VideoStream objects since Acquire currently supports 2 camera streaming. To configure the first video stream, we'll index this tuple to select the first VideoStream object config.video[0].

      VideoStream objects have 2 attributes camera and storage which are instances of the Camera and Storage classes, respectively, and will be used to set the attributes of the selected camera device simulated: radial sin and storage device Tiff. The other attributes of VideoStream are integers that specify the maximum number of frames to collect and how many frames to average, if any, before storing the data. The frame_average_count has a default value of 0, which disables this feature.

      "},{"location":"tutorials/configure/#configure-camera","title":"Configure Camera","text":"

      Camera class objects have 2 attributes, settings, a CameraProperties object, and an optional attribute identifier, which is a DeviceIdentifier object.

      CameraProperties has 5 attributes that are numbers and specify the exposure time and line interval in microseconds, how many pixels, if any, to bin (set to 1 by default to disable), and tuples for the image size and location on the camera chip. The other attributes are all instances of different classes. The pixel_type attribute is a SampleType object which indicates the data type of the pixel values in the image, such as Uint8. The readout_direction attribute is a Direction object specifying whether the data is read forwards or backwards from the camera. The input_triggers attribute is an InputTriggers object that details the characteristics of any input triggers in the system. The output_triggers attribute is an OutputTriggers object that details the characteristics of any output triggers in the system. All of the attributes of InputTriggers and OutputTriggers objects are instances of the Trigger class. The Trigger class is described in this tutorial.

      We'll configure some camera settings below.

      # Set the time for collecting data for a each frame\nconfig.video[0].camera.settings.exposure_time_us = 5e4  # 50 ms\n\n# (x, y) size of the image in pixels\nconfig.video[0].camera.settings.shape = (1024, 768)\n\n# Specify the pixel type as Uint32\nconfig.video[0].camera.settings.pixel_type = acquire.SampleType.U32\n
      "},{"location":"tutorials/configure/#configure-storage","title":"Configure Storage","text":"

      Storage objects have 2 attributes, settings, a StorageProperties object, and an optional attribute identifier, which is an instance of the DeviceIdentifier class described above.

      StorageProperties has 2 attributes external_metadata_json and filename which are strings of the filename or filetree of the output metadata in JSON format and image data in whatever format corresponds to the selected storage device, respectively. first_frame_id is an integer ID that corresponds to the first frame of the current acquisition and is typically 0. pixel_scale_um is the pixel size in microns. enable_multiscale is a boolean used to specify if the data should be saved as an image pyramid. See the multiscale tutorial for more information. The chunking attribute is an instance of the ChunkingProperties class, used for Zarr storage. See the chunking tutorial for more information.

      We'll specify the name of the output image file below.

      # Set the output file to out.tiff\nconfig.video[0].storage.settings.filename = \"out.tiff\"\n
      "},{"location":"tutorials/configure/#update-configuration-settings","title":"Update Configuration Settings","text":"

      None of the configuration settings are updated in Runtime until the set_configuration method is called. We'll be creating a new Properties object with the set_configuration method. For simplicity, we'll reuse config for the name of that object as well, but note that new_config = runtime.set_configuration(config) also works here.

      # Update the configuration with the chosen parameters \nconfig = runtime.set_configuration(config) \n
      "},{"location":"tutorials/drivers/","title":"Test Camera Drivers","text":"

      This tutorial will cover testing that your camera has been properly identified.

      Acquire supports the following cameras (currently only on Windows):

      • Hamamatsu Orca Fusion BT (C15440-20UP)
      • Vieworks VC-151MX-M6H00
      • FLIR Blackfly USB3 (BFLY-U3-23S6M-C)
      • FLIR Oryx 10GigE (ORX-10GS-51S5M-C)

      Acquire provides the following simulated cameras:

      • simulated: uniform random - Produces uniform random noise for each pixel.
      • simulated: radial sin - Produces an animated radial sine wave pattern.
      • simulated: empty - Produces no data, leaving a blank image. This camera simulates acquiring as fast as possible.

      Acquire will only identify cameras whose drivers are present on your machine. The DeviceManager class manages selection of cameras and storage. We can create a DeviceManager object using the following:

      import acquire \n\n# Instantiate a Runtime object\nruntime = acquire.Runtime()\n\n# Instantiate a DeviceManager object for the Runtime\nmanager = runtime.device_manager()\n

      DeviceManager objects have device methods which lists the identifiers for discovered devices. You can iterate over this list to determine which cameras were recognized.

      for device in manager.devices():\n    print(device)\n
      The output of this code is below. All identified cameras will be listed, and in the case of this tutorial, no cameras were connected to the machine, so only simulated cameras were found. Note that any storage devices will also print.

      <DeviceIdentifier Camera \"simulated: uniform random\">\n<DeviceIdentifier Camera \"simulated: radial sin\">\n<DeviceIdentifier Camera \"simulated: empty\">\n\n# storage devices will also print\n<DeviceIdentifier Storage \"raw\">\n<DeviceIdentifier Storage \"tiff\">\n<DeviceIdentifier Storage \"trash\">\n<DeviceIdentifier Storage \"tiff-json\">\n<DeviceIdentifier Storage \"Zarr\">\n<DeviceIdentifier Storage \"ZarrBlosc1ZstdByteShuffle\">\n<DeviceIdentifier Storage \"ZarrBlosc1Lz4ByteShuffle\">\n

      For cameras that weren't discovered you will see an error like the one below. These errors will not affect performance and can be ignored.

      ERROR acquire.runtime 2023-10-20 19:03:17,917 runtime.rs:40 C:\\actions-runner\\_work\\acquire-driver-hdcam\\acquire-driver-hdcam\\src\\acquire-core-libs\\src\\acquire-device-hal\\device\\hal\\loader.c:114 - driver_load(): Failed to load driver at \"acquire-driver-hdcam\".\n
      "},{"location":"tutorials/framedata/","title":"Accessing Data from the Video Source","text":"

      This tutorial will provide an example of accessing data from a video source during acquisition.

      "},{"location":"tutorials/framedata/#configure-runtime","title":"Configure Runtime","text":"

      To start, we'll create a Runtime object and configure the streaming process.

      import acquire\n\n# Initialize a Runtime object\nruntime = acquire.Runtime()\n\n# Initialize the device manager\ndm = runtime.device_manager()\n\n# Grab the current configuration\nconfig = runtime.get_configuration() \n\n# Select the radial sine simulated camera as the video source\nconfig.video[0].camera.identifier = dm.select(acquire.DeviceKind.Camera, \"simulated: radial sin\") \n\n# Set the storage to trash to avoid saving the data\nconfig.video[0].storage.identifier = dm.select(acquire.DeviceKind.Storage, \"Trash\")\n\n# Set the time for collecting data for a each frame\nconfig.video[0].camera.settings.exposure_time_us = 5e4  # 50 ms\n\nconfig.video[0].camera.settings.shape = (1024, 768)\n\n# Set the max frame count to 2**(64-1) the largest number supported by Uint64 for essentially infinite acquisition\nconfig.video[0].max_frame_count = 100 # collect 100 frames\n\n# Update the configuration with the chosen parameters \nconfig = runtime.set_configuration(config) \n
      "},{"location":"tutorials/framedata/#working-with-availabledata-objects","title":"Working with AvailableData objects","text":"

      During Acquisition, the AvailableData object is the streaming interface, and this class has a frames method which iterates over the VideoFrame objects in AvailableData. Once we start acquisition, we'll utilize this iterator method to list the frames.

      # To increase the likelihood of `AvailableData` containing data, we'll utilize the time python package to introduce a delay before we create our `AvailableData` object\n\nimport time\n\n# start acquisition\nruntime.start()\n\n# time delay of 0.5 seconds\ntime.sleep(0.5)\n\n# grab the packet of data available on disk for video stream 0. This is an AvailableData object.\navailable_data = runtime.get_available_data(0) \n
      Once get_available_data() is called the AvailableData object will be locked into memory, so the circular buffer that stores the available data will overflow if AvailableData isn\u2019t released.

      There may not be data available, in which case our variable available_data would be None. To avoid errors associated with this circumstance, we'll only grab data if available_data is not None.

      # NoneType if there is no available data. We can only grab frames if data is available.\nif available_data is not None:\n\n\n    # frames is an iterator over available_data, so we'll use this iterator to make a list of the frames\n    video_frames = list(available_data.frames())\n\nelse:         \n    # delete the available_data variable if there is no data in the packet to free up RAM\n    del available_data\n
      video_frames is a list with each element being an instance of the VideoFrame class. VideoFrame has a data method which provides the frame as an NDArray. The shape of this NDArray corresponds to the image dimensions used internally by Acquire. Since we have a single channel, both the first and the last dimensions will be 1. The interior dimensions will be height and width, respectively.

      # grab the first VideoStream object in frames and convert it to an NDArray\nfirst_frame = video_frames[0].data()\n\nprint(first_frame.shape)\n
      Output:
      (1, 768, 1024, 1) \n

      To grab the desired NDArray image data from first_frame, we'll slice the array as shown:

      image = image.squeeze()\n\n\nprint(image.shape)\n
      Output:
      (768, 1024)\n
      Finally, delete the available_data to unlock the region in the circular buffer.

      # delete the available_data to free up disk space\ndel available_data\n\n# stop runtime\nruntime.stop()\n
      "},{"location":"tutorials/livestream/","title":"Livestream to napari","text":"

      The below script can be used to livestream data to the napari viewer. You may also utilize the Acquire napari plugin, which is provided in the package upon install. You can access the plugin in the napari plugins menu once Acquire is installed. You can review the plugin code here. You may also stream using other packages such at matplotlib.

      \"\"\"\nThis script will livestream data to the [napari viewer](https://napari.org/stable/). You may also utilize the `Acquire` napari plugin, which is provided in the `acquire-imaging` package on PyPI upon install. You can access the plugin in the napari plugins menu once `Acquire` is installed. You can review the [plugin code here](https://github.com/acquire-project/acquire-python/blob/main/python/acquire/__init__.py).\n\"\"\"\n\nimport acquire\nruntime = acquire.Runtime()\n\n# Initialize the device manager\ndm = runtime.device_manager()\n\n# Grab the current configuration\nconfig = runtime.get_configuration() \n\n# Select the uniform random camera as the video source\nconfig.video[0].camera.identifier = dm.select(acquire.DeviceKind.Camera, \".*random.*\")\n\n# Set the storage to trash to avoid saving the data\nconfig.video[0].storage.identifier = dm.select(acquire.DeviceKind.Storage, \"Trash\")\n\n# Set the time for collecting data for a each frame\nconfig.video[0].camera.settings.exposure_time_us = 5e4  # 500 ms\n\nconfig.video[0].camera.settings.shape = (300, 200)\n\n# Set the max frame count to 100 frames\nconfig.video[0].max_frame_count = 100\n\n# Update the configuration with the chosen parameters \nconfig = runtime.set_configuration(config) \n\n# import napari and open a viewer to stream the data\nimport napari\nviewer = napari.Viewer()\n\nimport time\nfrom napari.qt.threading import thread_worker\n\ndef update_layer(args) -> None:\n    (new_image, stream_id) = args\n    print(f\"update layer: {new_image.shape=}, {stream_id=}\")\n    layer_key = f\"Video {stream_id}\"\n    try:\n        layer = viewer.layers[layer_key]\n        layer._slice.image._view = new_image\n        layer.data = new_image\n        # you can use the private api with layer.events.set_data() to speed up by 1-2 ms/frame\n\n    except KeyError:\n        viewer.add_image(new_image, name=layer_key)\n\n@thread_worker(connect={\"yielded\": update_layer})\ndef do_acquisition():\n    time.sleep(5)\n    runtime.start()\n\n    nframes = [0, 0]\n    stream_id = 0\n\n    def is_not_done() -> bool:\n        return (nframes[0] < config.video[0].max_frame_count) or (\n                nframes[1] < config.video[1].max_frame_count\n                )\n\n    def next_frame(): #-> Optional[npt.NDArray[Any]]:\n        \"\"\"Get the next frame from the current stream.\"\"\"\n        if nframes[stream_id] < config.video[stream_id].max_frame_count:\n            if packet := runtime.get_available_data(stream_id):\n                n = packet.get_frame_count()\n                nframes[stream_id] += n\n                f = next(packet.frames())\n                return f.data().squeeze().copy()\n        return None\n\n    stream = 1\n    # loop to continue to update the data in napari while acquisition is running\n    while is_not_done():  \n        if (frame := next_frame()) is not None:\n            yield frame, stream_id\n        time.sleep(0.1)\n\ndo_acquisition()\n\nnapari.run()\n
      "},{"location":"tutorials/multiscale/","title":"Writing Multiscale Zarr Files","text":"

      This tutorial will provide an example of writing multiscale data to a Zarr file.

      Zarr has additional capabilities relative to Acquire's basic storage devices, namely chunking, compression, and multiscale storage. To enable chunking and multiscale storage, set those attributes in instances of the ChunkingProperties and StorageProperties classes, respectively. You can learn more about the Zarr capabilities in Acquire here.

      "},{"location":"tutorials/multiscale/#configure-runtime","title":"Configure Runtime","text":"

      To start, we'll create a Runtime object and begin to configure the streaming process, selecting Zarr as the storage device so that writing multiscale data is possible.

      import acquire\n\n# Initialize a Runtime object\nruntime = acquire.Runtime()\n\n# Initialize the device manager\ndm = runtime.device_manager()\n\n# Grab the current configuration\nconfig = runtime.get_configuration() \n\n# Select the radial sine simulated camera as the video source\nconfig.video[0].camera.identifier = dm.select(acquire.DeviceKind.Camera, \"simulated: radial sin\") \n\n# Set the storage to Zarr to have the option to save multiscale data\nconfig.video[0].storage.identifier = dm.select(acquire.DeviceKind.Storage, \"Zarr\")\n\n# Set the time for collecting data for a each frame\nconfig.video[0].camera.settings.exposure_time_us = 5e4  # 50 ms\n\n# size of image region of interest on the camera (x, y)\nconfig.video[0].camera.settings.shape = (1920, 1080)\n\n# Set the max frame count\nconfig.video[0].max_frame_count = 5 # collect 5 frames\n\n# specify the pixel datatype as a uint8\nconfig.video[0].camera.settings.pixel_type = acquire.SampleType.U8\n\n# set the scale of the pixels\nconfig.video[0].storage.settings.pixel_scale_um = (1, 1) # 1 micron by 1 micron\n\n# Set the output file to out.zarr\nconfig.video[0].storage.settings.filename = \"out.zarr\"\n

      To complete configuration, we'll configure the multiscale specific settings.

      # Chunk size may need to be optimized for each acquisition. \n# See Zarr documentation for further guiddance: https://zarr.readthedocs.io/en/stable/tutorial.html#chunk-optimizations\nconfig.video[0].storage.settings.chunking.max_bytes_per_chunk = 16 * 2**20 # 16 MB\n\n# x, y dimensions of each chunk to 1/3 of the width and height of the image, generating 9 chunks\nconfig.video[0].storage.settings.chunking.tile.width = (config.video[0].camera.settings.shape[0] // 3)\nconfig.video[0].storage.settings.chunking.tile.height = (config.video[0].camera.settings.shape[1] // 3)\n\n# turn on multiscale mode\nconfig.video[0].storage.settings.enable_multiscale = True\n\n# Update the configuration with the chosen parameters \nconfig = runtime.set_configuration(config) \n
      "},{"location":"tutorials/multiscale/#collect-and-inspect-the-data","title":"Collect and Inspect the Data","text":"
      # collect data\nruntime.start()\nruntime.stop()\n

      You can inspect the Zarr file directory to check that the data saved as expected. This zarr file should have multiple subdirectories, one for each resolution in the multiscale data. Alternatively, you can inspect the data programmatically with:

      # Utilize the zarr python library to read the data\nimport zarr\n\n# Open the data to create a zarr Group\ngroup = zarr.open(\"out.zarr\")\n
      With multiscale mode enabled, an image pyramid will be formed by rescaling the data by a factor of 2 progressively until the rescaled image is smaller than the specified zarr chunk size in both dimensions. In this example, the original image dimensions are (1920, 1080), and we chunked the data using tiles 1/3 of the size of the image, namely (640, 360). To illustrate this point, we'll inspect the sizes of the various levels in the multiscale data and compare it to our specified chunk size.

      group[\"0\"], group[\"1\"], group[\"2\"]\n
      The output will be:
      (<zarr.core.Array '/0' (10, 1, 1080, 1920) uint8>,\n <zarr.core.Array '/1' (5, 1, 540, 960) uint8>,\n <zarr.core.Array '/2' (2, 1, 270, 480) uint8>)\n
      Here, the \"0\" directory contains the full-resolution array of frames of size 1920 x 1080, with a single channel, saving all 10 frames. The \"1\" directory contains the first rescaled array of frames of size 960 x 540, averaging every two frames, taking the frame count from 10 to 5. The \"2\" directory contains a further rescaled array of frames of size 480 x 270, averaging every four frames, taking the frame count from 10 to 2. Notice that both the frame width and frame height are now smaller than the chunk width and chunk height of 640 and 360, respectively, so this should be the last array in the group.

      "},{"location":"tutorials/props_json/","title":"Saving and Loading Properties from a JSON file","text":"

      This tutorial will provide an example of saving and subsequently loading a Properties object from a JSON file.

      "},{"location":"tutorials/props_json/#initialize-runtime","title":"Initialize Runtime","text":"

      To start, we'll import Acquire and create a Runtime object, which coordinates the streaming process.

      import acquire\nruntime = acquire.Runtime()\n
      "},{"location":"tutorials/props_json/#configure-camera","title":"Configure Camera","text":"

      All camera settings are captured by an instance of the Properties class, which will be associated with a given camera acquisition.

      # Instantiate a Properties object for the Runtime\nprops = runtime.get_configuration()\n
      You can update any of the settings in this instance of Properties. To save any updated settings, use the set_configuration function. For this tutorial, we'll simply specify a camera, and then save these new settings. Note that more settings must be provided before this Properties object could be used for an acquistion.

      # set the radial sine simulated camera as the first video stream\nprops.video[0].camera.identifier = runtime.device_manager().select(acquire.DeviceKind.Camera, \"simulated: radial sin\")\n\n# save the updated settings\nprops = runtime.set_configuration(props)\n
      "},{"location":"tutorials/props_json/#save-properties-to-a-json-file","title":"Save Properties to a JSON file","text":"

      We'll utilize the json library to write our properties to a JSON file to save for subsequent acquisition.

      import json\n\n# cast the properties to a dictionary\nprops = props.dict()\n\n# convert the dictionary to json with \"human-readable\" formatting\nprops = json.dumps(props, indent=4, sort_keys=True)\n\n# save the properties to file \"sample_props.json\" in the current directory\nwith open(\"sample_props.json\", \"w\") as outfile:\n    outfile.write(props)\n
      "},{"location":"tutorials/props_json/#example-json-file","title":"Example JSON file","text":"

      The resulting sample_props.json file is below:

      {\n  \"video\": [\n    {\n      \"camera\": {\n        \"identifier\": {\n          \"id\": [\n            0,\n            1\n          ],\n          \"kind\": \"Camera\",\n          \"name\": \"simulated: radial sin\"\n        },\n        \"settings\": {\n          \"binning\": 1,\n          \"exposure_time_us\": 0.0,\n          \"input_triggers\": {\n            \"acquisition_start\": {\n              \"edge\": \"Rising\",\n              \"enable\": false,\n              \"kind\": \"Input\",\n              \"line\": 0\n            },\n            \"exposure\": {\n              \"edge\": \"Rising\",\n              \"enable\": false,\n              \"kind\": \"Input\",\n              \"line\": 0\n            },\n            \"frame_start\": {\n              \"edge\": \"Rising\",\n              \"enable\": false,\n              \"kind\": \"Input\",\n              \"line\": 0\n            }\n          },\n          \"line_interval_us\": 0.0,\n          \"offset\": [\n            0,\n            0\n          ],\n          \"output_triggers\": {\n            \"exposure\": {\n              \"edge\": \"Rising\",\n              \"enable\": false,\n              \"kind\": \"Input\",\n              \"line\": 0\n            },\n            \"frame_start\": {\n              \"edge\": \"Rising\",\n              \"enable\": false,\n              \"kind\": \"Input\",\n              \"line\": 0\n            },\n            \"trigger_wait\": {\n              \"edge\": \"Rising\",\n              \"enable\": false,\n              \"kind\": \"Input\",\n              \"line\": 0\n            }\n          },\n          \"pixel_type\": \"U16\",\n          \"readout_direction\": \"Forward\",\n          \"shape\": [\n            1,\n            1\n          ]\n        }\n      },\n      \"frame_average_count\": 0,\n      \"max_frame_count\": 18446744073709551615,\n      \"storage\": {\n        \"identifier\": {\n          \"id\": [\n            0,\n            0\n          ],\n          \"kind\": \"NONE\",\n          \"name\": \"\"\n        },\n        \"settings\": {\n          \"chunking\": {\n            \"max_bytes_per_chunk\": 16777216,\n            \"tile\": {\n              \"height\": 0,\n              \"planes\": 0,\n              \"width\": 0\n            }\n          },\n          \"enable_multiscale\": false,\n          \"external_metadata_json\": \"\",\n          \"filename\": \"\",\n          \"first_frame_id\": 0,\n          \"pixel_scale_um\": [\n            0.0,\n            0.0\n          ]\n        },\n        \"write_delay_ms\": 0.0\n      }\n    },\n    {\n      \"camera\": {\n        \"identifier\": {\n          \"id\": [\n            0,\n            0\n          ],\n          \"kind\": \"NONE\",\n          \"name\": \"\"\n        },\n        \"settings\": {\n          \"binning\": 1,\n          \"exposure_time_us\": 0.0,\n          \"input_triggers\": {\n            \"acquisition_start\": {\n              \"edge\": \"Rising\",\n              \"enable\": false,\n              \"kind\": \"Input\",\n              \"line\": 0\n            },\n            \"exposure\": {\n              \"edge\": \"Rising\",\n              \"enable\": false,\n              \"kind\": \"Input\",\n              \"line\": 0\n            },\n            \"frame_start\": {\n              \"edge\": \"Rising\",\n              \"enable\": false,\n              \"kind\": \"Input\",\n              \"line\": 0\n            }\n          },\n          \"line_interval_us\": 0.0,\n          \"offset\": [\n            0,\n            0\n          ],\n          \"output_triggers\": {\n            \"exposure\": {\n              \"edge\": \"Rising\",\n              \"enable\": false,\n              \"kind\": \"Input\",\n              \"line\": 0\n            },\n            \"frame_start\": {\n              \"edge\": \"Rising\",\n              \"enable\": false,\n              \"kind\": \"Input\",\n              \"line\": 0\n            },\n            \"trigger_wait\": {\n              \"edge\": \"Rising\",\n              \"enable\": false,\n              \"kind\": \"Input\",\n              \"line\": 0\n            }\n          },\n          \"pixel_type\": \"U16\",\n          \"readout_direction\": \"Forward\",\n          \"shape\": [\n            0,\n            0\n          ]\n        }\n      },\n      \"frame_average_count\": 0,\n      \"max_frame_count\": 18446744073709551615,\n      \"storage\": {\n        \"identifier\": {\n          \"id\": [\n            0,\n            0\n          ],\n          \"kind\": \"NONE\",\n          \"name\": \"\"\n        },\n        \"settings\": {\n          \"chunking\": {\n            \"max_bytes_per_chunk\": 16777216,\n            \"tile\": {\n              \"height\": 0,\n              \"planes\": 0,\n              \"width\": 0\n            }\n          },\n          \"enable_multiscale\": false,\n          \"external_metadata_json\": \"\",\n          \"filename\": \"\",\n          \"first_frame_id\": 0,\n          \"pixel_scale_um\": [\n            0.0,\n            0.0\n          ]\n        },\n        \"write_delay_ms\": 0.0\n      }\n    }\n  ]\n}\n
      "},{"location":"tutorials/props_json/#load-properties-from-a-json-file","title":"Load Properties from a JSON file","text":"

      You can load the settings in the JSON file to a Properties object and set this configuration for your Runtime as shown below:

      import acquire\nimport json\n\n# create a Runtime object\nruntime = acquire.Runtime()\n\n# Instantiate a `Properties` object from the settings in sample_props.json\nprops = acquire.Properties(**json.load(open('sample_props.json')))\n\n# save the properties for this instance of Runtime\nprops = runtime.set_configuration(props)\n
      "},{"location":"tutorials/select/","title":"Device Selection","text":"

      This tutorial illustrates the difference between the select and select_one_of methods in the DeviceManager class. select chooses the first discovered device of a specific kind, camera or storage device. You can also, optionally, select a specific device by passing the device name as a string to select. Whereas, select_one_of requires that you specify both the kind of device to select and a list of possible device names. select_one_of will iterate through the list and select the first device in the list of names that is discovered on your machine.

      To start, instantiate Runtime and DeviceManager objects.

      import acquire \n\n# Instantiate a Runtime object\nruntime = acquire.Runtime()\n\n# Instantiate a DeviceManager object for the Runtime\nmanager = runtime.device_manager()\n\n# List devices discovered by DeviceManager\nfor device in manager.devices():\n    print(device)\n

      The output of the above code is below. All identified devices will be listed, and in the case of this tutorial, no cameras were connected to the machine, so only simulated cameras were found. Note that discovered storage devices will also print.

      <DeviceIdentifier Camera \"simulated: uniform random\">\n<DeviceIdentifier Camera \"simulated: radial sin\">\n<DeviceIdentifier Camera \"simulated: empty\">\n\n# storage devices will also print\n<DeviceIdentifier Storage \"raw\">\n<DeviceIdentifier Storage \"tiff\">\n<DeviceIdentifier Storage \"trash\">\n<DeviceIdentifier Storage \"tiff-json\">\n<DeviceIdentifier Storage \"Zarr\">\n<DeviceIdentifier Storage \"ZarrBlosc1ZstdByteShuffle\">\n<DeviceIdentifier Storage \"ZarrBlosc1Lz4ByteShuffle\">\n
      The order of those printed devices matters. Below are two examples of how the select method works. In the first, without a specific device name provided, select will choose the first device of the specified kind in the list of discovered devices. In the second example, a specific device name is provided, so select will grab that device if it was discovered by Runtime.

      # specify that the device should be a camera and not a storage device\nkind = acquire.DeviceKind.Camera\n\n# 1st example: select the first camera in the list of discovered devices\nselected = manager.select(kind)\n\n# 2nd example: select a specific camera since the name of the device was provided\nspecific = manager.select(kind, \"simulated: empty\")\n\n# print the 2 devices\nprint(selected)\nprint(specific)\n
      The output of the code is below:
      <DeviceIdentifier Camera \"simulated: uniform random\">\n<DeviceIdentifier Camera \"simulated: empty\">\n

      The select_one_of method allows more flexibility since you provide a list of names of acceptable devices for it to iterate through until a discovered device is located.

      # specify that the device should be a camera and not a storage device\nkind = acquire.DeviceKind.Camera\n\nselected = manager.select_one_of(kind, [\"Hamamatsu_DCAMSDK4_v22126552\", \"simulated: radial sin\", \"simulated: empty\"])\n\n# print which camera was selected\nprint(selected)\n
      The output of the code is below. The Hamamatsu camera was not discovered by Runtime, so select_one_of iterates until it finds a device discovered by Runtime. In this case, the next item in the list is a simulated camera that was discovered by Runtime.
      <DeviceIdentifier Camera \"simulated: radial sin\">\n

      "},{"location":"tutorials/setup/","title":"Utilizing the Setup Method","text":"

      This tutorial will provide an example of utilizing the setup method to configure Runtime and specify some basic properties.

      "},{"location":"tutorials/setup/#setup-function-definition","title":"Setup Function Definition","text":"
      def setup(\n    runtime: Runtime,\n    camera: Union[str, List[str]] = \"simulated: radial sin\",\n    storage: Union[str, List[str]] = \"Tiff\",\n    output_filename: Optional[str] = \"out.tif\",\n) -> Properties\n

      The setup function can be used as a shorthand to simplify the Runtime configuration process. setup takes a Runtime object and strings of the camera and storage device names and returns a Properties object. You may also optionally specify the filename for writing the data.

      "},{"location":"tutorials/setup/#example","title":"Example","text":"

      import acquire\n\n# Initialize a Runtime object\nruntime = acquire.Runtime()\n\n# use setup to get configuration and set the camera, storage, and filename\nconfig = acquire.setup(runtime, \"simulated: radial sin\", \"Zarr\", \"out.zarr\")\n
      You can subsequently use config to specify additional settings and set those configurations before beginning acquisition.

      Without using setup, the process would take a few additional lines of codes. The below code is equivalent to the example above.

      import acquire\n\n# Initialize a Runtime object\nruntime = acquire.Runtime()\n\n# Grab the current configuration\nconfig = runtime.get_configuration() \n\n# Select the radial sine simulated camera as the video source\nconfig.video[0].camera.identifier = runtime.device_manager().select(acquire.DeviceKind.Camera, \"simulated: radial sin\") \n\n# Set the storage to Zarr to have the option to save multiscale data\nconfig.video[0].storage.identifier = runtime.device_manager().select(acquire.DeviceKind.Storage, \"Zarr\")\n\n# Set the output file to out.zarr\nconfig.video[0].storage.settings.filename = \"out.zarr\"\n

      In either case, we can update the configuration settings using:

      config = runtime.set_configuration(config)\n
      "},{"location":"tutorials/start_stop/","title":"Multiple Acquisitions","text":"

      This tutorial will provide an example of starting, stopping, and restarting acquisition, or streaming from a video source.

      "},{"location":"tutorials/start_stop/#configure-streaming","title":"Configure Streaming","text":"

      To start, we'll create a Runtime object and configure the streaming process. To do this, we'll utilize the setup method. More information on that method is detailed in this tutorial.

      import acquire\n\n# Initialize a Runtime object\nruntime = acquire.Runtime()\n\n# Grarb Set Video Source and Storage Device\nconfig = acquire.setup(runtime, \"simulated: radial sin\", \"Tiff\")\n\nconfig.video[0].storage.settings.filename == \"out.tif\"\nconfig.video[0].camera.settings.shape = (192, 108)\nconfig.video[0].camera.settings.exposure_time_us = 10e4\nconfig.video[0].max_frame_count = 10\n\n# Update the configuration with the chosen parameters \nconfig = runtime.set_configuration(config) \n
      "},{"location":"tutorials/start_stop/#start-stop-and-restart-acquisition","title":"Start, Stop, and Restart Acquisition","text":"

      During Acquisition, the AvailableData object is the streaming interface. Upon shutdown, Runtime deletes all of the objects created during acquisition to free up resources, and you must stop acquisition by calling runtime.stop() between acquisitions. Otherwise, an exception will be raised.

      To understand how acquisition works, we'll start, stop, and repeat acquisition and print the DeviceState, which can be Armed, AwaitingConfiguration, Closed, or Running, and the AvailableData object throughout the process.

      If acquisition has ended, all of the objects are deleted, including AvailableData objects, so those will be None when not acquiring data. In addition, if enough time hasn't elapsed since acquisition started, AvailableData will also be None. We'll utilize the time python package to introduce time delays to account for these facts.

      # package used to introduce time delays\nimport time\n\n# start acquisition\nruntime.start()\n\nprint(runtime.get_state())\nprint(runtime.get_available_data(0))\n\n# wait 0.5 seconds to allow time for data to be acquired\ntime.sleep(0.5)\n\nprint(runtime.get_state())\nprint(runtime.get_available_data(0))\n\n# stop acquisition\nruntime.stop()\n\nprint(runtime.get_state())\nprint(runtime.get_available_data(0))\n\n# start acquisition\nruntime.start()\n\n# time delay of 5 seconds - acquisition only runs for 1 second\ntime.sleep(5)\n\nprint(runtime.get_state())\nprint(runtime.get_available_data(0))\n\n# stop acquisition\nruntime.stop()\n

      The output will be:

      DeviceState.Running\nNone\nDeviceState.Running\n<builtins.AvailableData object at 0x00000218D685E5B0>\nDeviceState.Armed\nNone\nDeviceState.Armed\n<builtins.AvailableData object at 0x00000218D685E3D0>\n
      1. The first time we print states is immediately after we started acqusition and enough time hasn't elapsed for data to be collected based on the exposure time, so the camera is running but there is no data yet. 2. The next print happens after waiting 0.5 seconds, so acquisition is still runnning and now there is acquired data available. 3. The subsequent print is following calling runtime.stop() which terminates acquisition after the specified max number of frames are collected, so the device is no longer running, although it is in the Armed state ready for acquisition, and there is no available data. 4. The final print occurs after waiting 5 seconds after starting acquisition, which is longer than the 1 second time needed to collect all the frames, so the device is no longer collecting data. However, runtime.stop() hasn't been called, so the AvailableData object has not yet been deleted.

      "},{"location":"tutorials/storage/","title":"Storage Device Selection","text":"

      This tutorial illustrates the storage device options in Acquire.

      "},{"location":"tutorials/storage/#description-of-storage-devices","title":"Description of Storage Devices","text":"

      To start, we'll create a Runtime object and print the storage device options.

      import acquire \n\n# Instantiate a Runtime object\nruntime = acquire.Runtime()\n\n# Instantiate a DeviceManager object for the Runtime\nmanager = runtime.device_manager()\n\n# Print devices in DeviceManager of kind Storage\nfor device in manager.devices():\n    if device.kind == acquire.DeviceKind.Storage:\n        print(device)\n
      The output of that script will be:

      # Storage Devices printed\n\n<DeviceIdentifier Storage \"raw\">\n<DeviceIdentifier Storage \"tiff\">\n<DeviceIdentifier Storage \"trash\">\n<DeviceIdentifier Storage \"tiff-json\">\n<DeviceIdentifier Storage \"Zarr\">\n<DeviceIdentifier Storage \"ZarrBlosc1ZstdByteShuffle\">\n<DeviceIdentifier Storage \"ZarrBlosc1Lz4ByteShuffle\">\n
      - raw - Streams to a raw binary file. - tiff - Streams to a bigtiff file. Metadata is stored in the ImageDescription tag for each frame as a JSON string. - trash - Writes nothing. Discards incoming data. Useful for live streaming applications. - tiff-json - Stores the video stream in a bigtiff, and stores metadata in a JSON file. Both are located in a folder identified by the filename property. - Zarr - Streams data to a Zarr V2 file with associated metadata. - ZarrBlosc1ZstdByteShuffle - Streams compressed data (zstd codec) to a Zarr V2 file with associated metadata. - ZarrBlosc1Lz4ByteShuffle - Streams compressed data (lz4 codec) to a Zarr V2 file with associated metadata.

      Acquire supports streaming data to bigtiff and Zarr V2.

      Zarr has additional capabilities relative to the basic storage devices, namely chunking, compression, and multiscale storage. You can learn more about the Zarr capabilities in Acquire here.

      "},{"location":"tutorials/storage/#select-the-storage-device-and-specify-where-to-store-the-data","title":"Select the Storage Device and Specify where to Store the Data","text":"

      We'll use our instance of Runtime and specify that the data from one video source should be streamed to a file out.tif in the example below:

      # get the current configuration\nconfig = runtime.get_configuration()\n\n# Select the tiff storage device\nconfig.video[0].storage.identifier = manager.select( acquire.DeviceKind.Storage, \"tiff\")\n\n# Set the data filename to out.tif in your current directory (provide the whole filetree to save to a different directory)\nconfig.video[0].storage.settings.filename = \"out.tif\" \n

      Before proceeding, complete the Camera setup and call set_configuration to save those new configuration settings.

      "},{"location":"tutorials/trig_json/","title":"Saving and Loading Trigger Settings from a JSON file","text":"

      This tutorial will provide an example of saving and subsequently loading a Trigger object from a JSON file.

      "},{"location":"tutorials/trig_json/#initialize-runtime","title":"Initialize Runtime","text":"

      To start, we'll import Acquire and create a Runtime object, which coordinates the streaming process.

      import acquire\nruntime = acquire.Runtime()\n
      "},{"location":"tutorials/trig_json/#create-a-trigger-object","title":"Create a Trigger Object","text":"

      Trigger objects have 4 attributes: edge, enable, line, and kind. In this example, will only adjust the edge attribute.

      # Instantiate a Trigger object\ntrig = acquire.Trigger()\n\n# change the edge attribute from the default Rising to Falling\ntrig.edge = acquire.TriggerEdge.Falling\n
      "},{"location":"tutorials/trig_json/#save-properties-to-a-json-file","title":"Save Properties to a JSON file","text":"

      We'll utilize the json library to write our Trigger to a JSON file to save for subsequent acquisition.

      import json\n\n# cast the properties to a dictionary\ntrig = trig.dict()\n\n# convert the dictionary to json with \"human-readable\" formatting\ntrig = json.dumps(trig, indent=4, sort_keys=True)\n\n# save the trigger to file \"sample_trig.json\" in the current directory\nwith open(\"sample_trig.json\", \"w\") as outfile:\n    outfile.write(trig)\n
      "},{"location":"tutorials/trig_json/#example-json-file","title":"Example JSON file","text":"

      The resulting sample_trig.json file is below:

      {\n  \"edge\": \"Falling\",\n  \"enable\": false,\n  \"kind\": \"Input\",\n  \"line\": 0\n}\n
      "},{"location":"tutorials/trig_json/#load-properties-from-a-json-file","title":"Load Properties from a JSON file","text":"

      You can load the trigger attributes in the JSON file to a Trigger object as shown below:

      # Instantiate a `Trigger` object from the settings in sample_trig.json\ntrig = acquire.Trigger(**json.load(open('sample_trig.json')))\n
      "},{"location":"tutorials/trigger/","title":"Finite Triggered Acquisition","text":"

      Acquire (acquire-imaging on PyPI) is a Python package providing a multi-camera video streaming library focused on performant microscopy, with support for up to two simultaneous, independent, video streams.

      This tutorial shows an example of setting up triggered acquisition of a finite number of frames with one of Acquire's supported devices and saving the data to a Zarr file.

      "},{"location":"tutorials/trigger/#initialize-acquisition","title":"Initialize Acquisition","text":"

      To start, we'll import Acquire and create an acquisition Runtime object, which initializes the driver adaptors needed for the supported cameras.

      import acquire\nruntime = acquire.Runtime()\n
      "},{"location":"tutorials/trigger/#configure-camera","title":"Configure Camera","text":"

      All camera settings can be captured by an instance of the Properties class, which will be associated with a given camera acquisition. The settings can be stored in a dictionary (e.g: Properties.dict()). These settings can be saved to a JSON file to be subsequently loaded, (e.g. Properties(**json.load(open('acquire.json'))) ), using the json library.

      props = runtime.get_configuration()\n\nimport json\nwith open(\"/path/to/acquire.json\", \"w\") as f:\n    json.dump(props.dict(), f)\n

      The current configuration settings can be checked and assigned to an instance of the Properties class with:

      props = runtime.get_configuration() \n

      Since Acquire supports 2 video streams, each camera, or source, must be configured separately. In this example, we will only use 1 source for the acquisition, so we will only need to configure props.video[0]. To set the first video stream to Hamamatsu Orca Fusion BT (C15440-20UP), you can use the following with a regular expression to grab the Hamamatsu camera:

      props.video[0].camera.identifier = runtime.device_manager().select(acquire.DeviceKind.Camera, 'Hamamatsu C15440.*')\n

      Next we'll choose the settings for the Hamamatsu camera. The CameraProperties class describes the available settings, which include exposure time (in microseconds), binning, pixel data type (e.g. u16), and how many frames to acquire.

      Every property can be set using the following, but in this example, we will only change a few of the available settings.

      props.video[0].camera.settings.binning = 1 # no pixels will be combined\nprops.video[0].camera.settings.shape = (1700, 512) # shape of the image to be acquired in pixels\nprops.video[0].camera.settings.offset = (302, 896) # centers the image region of interest on the camera sensor\nprops.video[0].camera.settings.pixel_type = acquire.SampleType.U16 # sets the pixel data type to a 16-bit unsigned integer\nprops.video[0].max_frame_count = 10 # finite acquisition of 10 frames. Use 0 for infinite acquisition.\n

      Triggers can also be set in the CameraProperties object. The parameters can be stored in a dictionary (e.g: Trigger.dict()). You can construct a Trigger from a JSON file (e.g. acquire.Trigger(**json.loads(open('trigger.json'))) ), using the json library.

      trig = acquire.Trigger()\n\nimport json\nwith open(\"/path/to/trigger.json\", \"w\") as f:\n    json.dump(trig.dict(), f)\n

      In this example, we'll only utilize output triggers. By default, the camera's internal triggering is used, but you may explicitly disable external input triggers using:

      props.video[0].camera.settings.input_triggers = acquire.InputTriggers() # default: disabled\n

      Output triggers can be set to begin exposure, start a new frame, or wait before acquiring. We can enable an exposure trigger to start on the rising edge with:

      props.video[0].camera.settings.output_triggers.exposure = acquire.Trigger(\n    enable=True, line=1, edge=\"Rising\"\n)\n
      "},{"location":"tutorials/trigger/#select-storage","title":"Select Storage","text":"

      Storage objects have identifiers which specify the file type (e.g. Zarr or tiff) and settings described by an instance of the StorageProperties class. We can set the file type to Zarr and set the file name to \"out\" with:

      props.video[0].storage.identifier = runtime.device_manager().select(acquire.DeviceKind.Storage,'zarr') \nprops.video[0].storage.settings.filename=\"out.zarr\"\n
      "},{"location":"tutorials/trigger/#save-configuration","title":"Save configuration","text":"

      None of these settings will be updated in the Properties object until you call the set_configuration method. This method reads what the current configuration settings are on the device.

      We'll set the configuration with:

      props = runtime.set_configuration(props)\n

      You can optionally print out these settings using the Rich python library to save for your records with:

      from rich.pretty import pprint\npprint(props.dict())\n
      "},{"location":"tutorials/trigger/#acquire-data","title":"Acquire data","text":"

      To begin acquisition:

      runtime.start()\n

      You can stop acquisition with runtime.stop() to stop after the specified number of frames is collected or runtime.abort() to immediately stop acquisition.

      "}]} \ No newline at end of file +{"config":{"lang":["en"],"separator":"[\\s\\-]+","pipeline":["stopWordFilter"]},"docs":[{"location":"","title":"Acquire Docs","text":""},{"location":"#guides","title":"Guides","text":"Get Started

      Install Acquire and use simulated cameras

      Get Started API Reference

      Information on classes and methods

      API Reference Tutorials

      Guides on using Acquire for specific tasks

      Tutorials For contributors

      Learn how to contribute code or documentation to Acquire

      For contributors"},{"location":"#about-acquire","title":"About Acquire","text":"

      Acquire (acquire-imaging on PyPI) provides high-speed, multi-camera, video streaming and image acquisition with a programming interface for streaming video data directly to napari, Python and cloud-friendly file formats.

      "},{"location":"#installation","title":"Installation","text":"

      To install Acquire on Windows, macOS, or Ubuntu, simply run the following command:

      python -m pip install acquire-imaging\n
      "},{"location":"#supported-cameras-and-file-formats","title":"Supported Cameras and File Formats","text":"

      Acquire supports the following cameras (currently only on Windows):

      • Hamamatsu Orca Fusion BT (C15440-20UP)
      • Vieworks VC-151MX-M6H00
      • FLIR Blackfly USB3 (BFLY-U3-23S6M-C)
      • FLIR Oryx 10GigE (ORX-10GS-51S5M-C)

      Acquire also supports the following output file formats:

      • Tiff
      • Zarr

      For testing and demonstration purposes, Acquire provides a few simulated cameras, as well as raw and trash output devices.

      "},{"location":"#citing-acquire","title":"Citing Acquire","text":"
      cff-version: 1.2.0\ntitle: Acquire: a multi-camera video streaming software focusing on microscopy\nmessage: >-\n  If you use this software, please cite it using the\n  metadata from this file.\ntype: software\nauthors:\n  - given-names: Nathan\n    family-names: Clack\n    email: nclack@chanzuckerberg.com\n    affiliation: Chan-Zuckerberg Initiative Foundation\n    orcid: 'https://orcid.org/0000-0001-6236-9282'\n  - given-names: Alan\n    family-names: Liddell\n    email: aliddell@chanzuckerberg.com\n    affiliation: Chan-Zuckerberg Initiative Foundation\n  - given-names: Andrew\n    family-names: Sweet\n    email: andrewdsweet@gmail.com\n    affiliation: Chan-Zuckerberg Initiative Foundation\nrepository-code: 'https://github.com/acquire-project/acquire-python'\nrepository-artifact: 'https://pypi.org/project/acquire-imaging/'\nabstract: >-\n  acquire-imaging is a library focusing on multi-camera video\n  streaming for microscopy.\nlicense: Apache-2.0\n
      "},{"location":"#acquire-license","title":"Acquire License","text":"

      Acquire is provided under an Apache 2.0 license. You can learn more about the Apache license in the documentation here.

      "},{"location":"api_reference/","title":"API Reference","text":"

      Information on the classes in acquire-imaging along with the attributes and methods associated with them.

      "},{"location":"api_reference/#class-availabledata","title":"Class AvailableData","text":"

      The AvailableData class represents the collection of frames that have been captured since the last call to runtime.get_available_data(). AvailableData objects should be set to have a short lifetime, since these objects reserve space on the video queue and will eventually block camera acquisition to ensure no data is overwritten before it can be processed.

      class AvailableData:\n    def frames(self) -> Iterator[VideoFrame]:\n        \"\"\"Returns an iterator over the video frames in the available data.\"\"\"\n\n    def get_frame_count(self) -> int:\n        \"\"\"Returns the total number of video frames in the available data.\"\"\"\n\n    def __iter__(self) -> Iterator[VideoFrame]:\n        \"\"\"Returns an iterator over the video frames in the available data.\"\"\"\n
      • The frames method provides an iterator over these frames.

      • Call get_frame_count() to query the number of frames in an AvailableData object.

      • The __iter__ method enables AvailableData objects to be iterated.

      "},{"location":"api_reference/#class-camera","title":"Class Camera","text":"

      The Camera class is used to describe cameras or other video sources.

      class Camera:\n    identifier: Optional[DeviceIdentifier]\n    settings: CameraProperties\n\n    def __init__(self, *args: None, **kwargs: Any) -> None: ...\n    \"\"\"Initializes a Camera object with optional arguments.\"\"\"\n\n    def dict(self) -> Dict[str, Any]: ...\n    \"\"\"Returns a dictionary of the Camera attributes.\"\"\"\n
      • identifier: An optional attribute which contains an instance of the DeviceIdentifier class. DeviceIdentifier has id and kind attributes assigned by Acquire if the device is natively supported. Otherwise, it is of type None.

      • settings: An instance of the CameraProperties class which contains the settings for the camera.

      • The dict method creates a dictionary of a Camera object's attributes.

      "},{"location":"api_reference/#class-cameraproperties","title":"Class CameraProperties","text":"

      The CameraProperties class is used to set the desired camera properties for acquisition.

      class CameraProperties:\n    exposure_time_us: float\n    line_interval_us: float\n    binning: float\n    pixel_type: SampleType\n    readout_direction: Direction\n    offset: Tuple[int, int]\n    shape: Tuple[int, int]\n    input_triggers: InputTriggers\n    output_triggers: OutputTriggers\n\n    def __init__(self, *args: None, **kwargs: Any) -> None: ...\n    \"\"\"Initializes a CameraProperties object with optional arguments.\"\"\"\n\n    def dict(self) -> Dict[str, Any]: ...\n    \"\"\"Returns a dictionary of the CameraProperties attributes.\"\"\"\n
      • exposure_time_us: How long in microseconds your camera should collect light from the sample. However, for simulated cameras, this is just a waiting period before generating the next frame.

      • line_interval_us: The time to scan one line in microseconds in a rolling shutter camera.

      • binning: How many adjacent pixels in each direction to combine by averaging. For example, if binning is set to 2, a 2x2 square of pixels will be combined by averaging. If binning is set to 1, no pixels will be combined.

      • pixel_type: An instance of the SampleType class which specifies the numerical data type, for example Uint16, a 16-bit unsigned integer type.

      • readout_direction: An instance of the Direction class which specifies whether the data is readout forwards or backwards.

      • offset: A tuple of two integers representing the (x, y) offset in pixels of the image region of interest on the camera.

      • shape: A tuple of two integers representing the (x, y)size in pixels of the image region of interest on the camera.

      • input_triggers: An instance of the InputTriggers class, which describes the trigger signals for starting acquisition, camera exposure, and acquiring a frame.

      • output_triggers: An instance of the OutputTriggers class, which describes the trigger signals for the camera exposure, acquiring a frame, as well as any wait times for sending the trigger signal.

      • The dict method create a dictionary of a CameraProperties object's attributes.

      "},{"location":"api_reference/#class-chunkingproperties","title":"Class ChunkingProperties","text":"

      The ChunkingProperties class represents properties related to data chunking for storage in a Zarr container.

      class ChunkingProperties:\n    max_bytes_per_chunk: int\n    tile: TileShape\n\n    def dict(self) -> Dict[str, Any]: ...\n    \"\"\"Returns a dictionary of the ChunkingProperties attributes.\"\"\"\n
      • max_bytes_per_chunk: The maximum number of bytes per data chunk.

      • tile: An instance of the TileShape class representing the shape of the data chunk tile.

      • The dict method creates a dictionary of a ChunkingProperties object's attributes.

      "},{"location":"api_reference/#class-deviceidentifier","title":"Class DeviceIdentifier","text":"

      The DeviceIdentifier class represents an identifier for a supported device, including its unique id and type, such as a camera or storage.

      class DeviceIdentifier:\n    id: Tuple[int, int]\n    kind: DeviceKind\n    name: str\n\n    def __init__(self, *args: None, **kwargs: Any) -> None: ...\n    \"\"\"Initializes a DeviceIdentifier object with optional arguments.\"\"\"\n\n    def dict(self) -> Dict[str, Any]: ...\n    \"\"\"Returns a dictionary of the DeviceIdentifier attributes.\"\"\"\n\n    @staticmethod\n    def none() -> DeviceIdentifier: ...\n    \"\"\"Returns a \"None\" type DeviceIdentifier.\n    Useful when a DeviceIdentifier is not needed.\"\"\"\n\n    def __eq__(self, other: object) -> bool:\n        \"\"\"Checks if two DeviceIdentifier objects are equal.\"\"\"\n\n    def __ge__(self, other: object) -> bool:\n        \"\"\"Checks if this DeviceIdentifier is greater than or equal to another.\"\"\"\n\n    def __gt__(self, other: object) -> bool:\n        \"\"\"Checks if this DeviceIdentifier is greater than another.\"\"\"\n\n    def __le__(self, other: object) -> bool:\n        \"\"\"Checks if this DeviceIdentifier is less than or equal to another.\"\"\"\n\n    def __lt__(self, other: object) -> bool:\n        \"\"\"Checks if this DeviceIdentifier is less than another.\"\"\"\n\n    def __ne__(self, other: object) -> bool:\n        \"\"\"Checks if two DeviceIdentifier objects are not equal.\"\"\"\n
      • id: A tuple (driver_id, device_id) containing two Uint8 integers that serve to identify each driver and device uniquely for a given run.

      • kind: An instance of the DeviceKind class that represents the type or kind of the device.

      • name: A string representing the name or label of the device.

      • The dict method creates a dictionary of a DeviceIdentifier object's attributes.

      "},{"location":"api_reference/#class-devicekind","title":"Class DeviceKind","text":"

      The DeviceKind class represents the types of devices in a given system.

      class DeviceKind:\n    Camera: ClassVar[DeviceKind] = DeviceKind.Camera\n    NONE: ClassVar[DeviceKind] = DeviceKind.NONE\n    Signals: ClassVar[DeviceKind] = DeviceKind.Signals\n    StageAxis: ClassVar[DeviceKind] = DeviceKind.StageAxis\n    Storage: ClassVar[DeviceKind] = DeviceKind.Storage\n\n    def __init__(self, *args: None, **kwargs: Any) -> None:\n        \"\"\"Initializes the DeviceKind class.\"\"\"\n\n    def __eq__(self, other: object) -> bool:\n        \"\"\"Checks if two DeviceKind objects are equal.\"\"\"\n\n    def __ge__(self, other: object) -> bool:\n        \"\"\"Checks if this DeviceKind is greater than or equal to another.\"\"\"\n\n    def __gt__(self, other: object) -> bool:\n        \"\"\"Checks if this DeviceKind is greater than another.\"\"\"\n\n    def __int__(self) -> int:\n        \"\"\"Converts the DeviceKind to an integer.\"\"\"\n\n    def __le__(self, other: object) -> bool:\n        \"\"\"Checks if this DeviceKind is less than or equal to another.\"\"\"\n\n    def __lt__(self, other: object) -> bool:\n        \"\"\"Checks if this DeviceKind is less than another.\"\"\"\n\n    def __ne__(self, other: object) -> bool:\n        \"\"\"Checks if two DeviceKind objects are not equal.\"\"\"\n
      • Camera: Enum-type class variable of DeviceKind that specifies a device is a camera.

      • NONE: Enum-type class variable of DeviceKind for if a device's kind is unavailable.

      • Signals: Enum-type class variable of DeviceKind that specifies a device is a signal.

      • StageAxis: Enum-type class variable of DeviceKind that specifies a device is a stage.

      • Storage: Enum-type class variable of DeviceKind that specifies a device is for storage.

      "},{"location":"api_reference/#class-devicemanager","title":"Class DeviceManager","text":"

      The DeviceManager class manages selection of available devices in the system. Regular expressions are accepted for the name argument.

      class DeviceManager:\n    def devices(self) -> List[DeviceIdentifier]:\n        \"\"\"Returns a list of all available device identifiers.\"\"\"\n\n    @overload\n    def select(self, kind: DeviceKind, name: Optional[str]) -> Optional[DeviceIdentifier]:\n        \"\"\"Selects a specified device.\n\n        Args:\n            kind (DeviceKind): The type of device to select.\n            name (Optional[str]): The name of the device to select. Regular expressions supported.\n\n        Returns:\n            Optional[DeviceIdentifier]: The selected device identifier, or None if the specified device is not available.\n        \"\"\"\n\n    def select_one_of(self, kind: DeviceKind, names: List[str]) -> Optional[DeviceIdentifier]:\n        \"\"\"Selects the first device in the list of devices that is of one of the specified kinds.\n\n        Args:\n            kind (DeviceKind): The type of device to select.\n            names (List[str]): A list of device names to choose from. Regular expressions supported.\n\n        Returns:\n            Optional[DeviceIdentifier]: The selected device identifier, or None if none of the specified devices are available.\n        \"\"\"\n
      • Call devices to list the DeviceIdentifier of each available device.

      • Call select to choose the first available device of a given type or to select a specific device by name.

      • Call select_one_of to choose one device from a list of acceptable devices of a given kind.

      "},{"location":"api_reference/#class-devicestate","title":"Class DeviceState","text":"

      The DeviceState class represents the acquisition status of a device.

      class DeviceState:\n    Closed: ClassVar[DeviceState] = DeviceState.Closed\n    AwaitingConfiguration: ClassVar[DeviceState] = DeviceState.AwaitingConfiguration\n    Armed: ClassVar[DeviceState] = DeviceState.Armed\n    Running: ClassVar[DeviceState] = DeviceState.Running\n\n    def __eq__(self, other: object) -> bool:\n        \"\"\"Checks if two DeviceState objects are equal.\"\"\"\n\n    def __ge__(self, other: object) -> bool:\n        \"\"\"Checks if this DeviceState is greater than or equal to another.\"\"\"\n\n    def __gt__(self, other: object) -> bool:\n        \"\"\"Checks if this DeviceState is greater than another.\"\"\"\n\n    def __int__(self) -> int:\n        \"\"\"Converts the DeviceState to an integer.\"\"\"\n\n    def __le__(self, other: object) -> bool:\n        \"\"\"Checks if this DeviceState is less than or equal to another.\"\"\"\n\n    def __lt__(self, other: object) -> bool:\n        \"\"\"Checks if this DeviceState is less than another.\"\"\"\n\n    def __ne__(self, other: object) -> bool:\n        \"\"\"Checks if two DeviceState objects are not equal.\"\"\"\n
      • Closed: Enum-type class variable of DeviceState that species when a device is not ready for configuration.

      • AwaitingConfiguration: Enum-type class variable of DeviceState that species when a device is ready for configuration.

      • Armed: Enum-type class variable of DeviceState that species when a device ready to stream data.

      • Running: Enum-type class variable of DeviceState that species when a device is streaming data.

      "},{"location":"api_reference/#class-direction","title":"Class Direction","text":"

      The Direction class represents the direction that data is read for streaming.

      class Direction:\n    Backward: ClassVar[Direction] = Direction.Backward\n    Forward: ClassVar[Direction] = Direction.Forward\n\n    def __eq__(self, other: object) -> bool:\n        \"\"\"Checks if two Direction objects are equal.\"\"\"\n\n    def __ge__(self, other: object) -> bool:\n        \"\"\"Checks if this Direction is greater than or equal to another.\"\"\"\n\n    def __gt__(self, other: object) -> bool:\n        \"\"\"Checks if this Direction is greater than another.\"\"\"\n\n    def __int__(self) -> int:\n        \"\"\"Converts the Direction to an integer.\"\"\"\n\n    def __le__(self, other: object) -> bool:\n        \"\"\"Checks if this Direction is less than or equal to another.\"\"\"\n\n    def __lt__(self, other: object) -> bool:\n        \"\"\"Checks if this Direction is less than another.\"\"\"\n\n    def __ne__(self, other: object) -> bool:\n        \"\"\"Checks if two Direction objects are not equal.\"\"\"\n
      • Backward: Enum-type class variable of Direction that species when data is streamed backward.

      • Forward: Enum-type class variable of Direction that species when data is streamed forward.

      "},{"location":"api_reference/#class-inputtriggers","title":"Class InputTriggers","text":"

      The InputTriggers class represents input triggers for a camera device.

      class InputTriggers:\n    acquisition_start: Trigger\n    exposure: Trigger\n    frame_start: Trigger\n\n    def dict(self) -> Dict[str, Any]: ...\n    \"\"\"Returns a dictionary of the InputTriggers attributes.\"\"\"\n
      • acquisition_start: An instance of the Trigger class representing the trigger for starting acquisition.

      • exposure: An instance of the Trigger class representing the trigger for exposure.

      • frame_start: An instance of the Trigger class representing the trigger for starting a frame.

      • The dict method creates a dictionary of a InputTriggers object's attributes.

      "},{"location":"api_reference/#class-outputtriggers","title":"Class OutputTriggers","text":"

      The OutputTriggers class represents output triggers for a camera device.

      class OutputTriggers:\n    exposure: Trigger\n    frame_start: Trigger\n    trigger_wait: Trigger\n\n    def dict(self) -> Dict[str, Any]: ...\n    \"\"\"Returns a dictionary of the OutputTriggers attributes.\"\"\"\n
      • exposure: An instance of the Trigger class representing the trigger for exposure.

      • frame_start: An instance of the Trigger class representing the trigger for starting a frame.

      • trigger_wait: An instance of the Trigger class representing the trigger for waiting before continuing acquisition.

      • The dict method creates a dictionary of a OutputTriggers object's attributes.

      "},{"location":"api_reference/#class-pid","title":"Class PID","text":"

      The PID class represents proportional-integral-derivative (PID) values.

      class PID:\n    derivative: float\n    integral: float\n    proportional: float\n\n    def __init__(self, *args: None, **kwargs: Any) -> None: ...\n    \"\"\"Initializes a PID object with optional arguments.\"\"\"\n\n    def dict(self) -> Dict[str, Any]: ...\n    \"\"\"Returns a dictionary of the PID attributes.\"\"\"\n
      • derivative: The derivative value for the PID.

      • integral: The integral value for the PID.

      • proportional: The proportional value for the PID.

      • The dict method creates a dictionary of a PID object's attributes.

      "},{"location":"api_reference/#class-properties","title":"Class Properties","text":"

      The Properties class represents properties related to video streams.

      class Properties:\n    video: Tuple[VideoStream, VideoStream]\n\n    def __init__(self, *args: None, **kwargs: Any) -> None: ..\n    \"\"\"Initializes a Properties object with optional arguments.\"\"\".\n\n    def dict(self) -> Dict[str, Any]: ...\n    \"\"\"Returns a dictionary of the Properties attributes.\"\"\"\n
      • video: A tuple containing two VideoStream instances since Acquire supports simultaneous streaming from 2 video sources. VideoStream objects have 2 attributes camera and storage to set the source and sink for the stream.

      • The dict method creates a dictionary of a Properties object's attributes.

      "},{"location":"api_reference/#class-runtime","title":"Class Runtime","text":"

      The Runtime class coordinates the devices with the storage disc including selecting the devices, setting their properties, and starting and stopping acqusition.

      class Runtime:\n    def __init__(self, *args: None, **kwargs: Any) -> None:\n        \"\"\"Initializes the Runtime object with optional arguments.\"\"\"\n\n    def device_manager(self) -> DeviceManager:\n        \"\"\"Returns the DeviceManager instance associated with this Runtime.\"\"\"\n\n    def get_available_data(self, stream_id: int) -> AvailableData:\n        \"\"\"Returns the AvailableData instance for the given stream ID.\n\n        Args:\n            stream_id (int): The ID of the stream for which available data is requested.\n\n        Returns:\n            AvailableData: The AvailableData instance for the given VideoStream ID.\n        \"\"\"\n\n    def get_configuration(self) -> Properties:\n        \"\"\"Returns the current configuration properties of the runtime.\"\"\"\n\n    def get_state(self) -> DeviceState:\n        \"\"\"Returns the current state of the device.\"\"\"\n\n    def set_configuration(self, properties: Properties) -> Properties:\n        \"\"\"Applies the provided configuration properties to the runtime.\n\n        Args:\n            properties (Properties): The properties to be set.\n\n        Returns:\n            Properties: The updated configuration properties.\n        \"\"\"\n\n    def start(self) -> None:\n        \"\"\"Starts the runtime, allowing it to collect data.\"\"\"\n\n    def stop(self) -> None:\n        \"\"\"Stops the runtime, ending data collection after the max number of frames is collected.\"\"\"\n\n    def abort(self) -> None:\n        \"\"\"Aborts the runtime, terminating it immediately.\"\"\"\n
      • Call device_manager() to return the DeviceManager object associated with this Runtime instance.

      • Call get_available_data with a specific stream_id, 0 or 1, to return the AvailableData associated with the 1st or 2nd video source, respectively.

      • Call get_configuration() to return the Properties object associated with this Runtime instance.

      • Call get_state() to return the DeviceState object associated with this Runtime instance.

      • Call set_configuration with a Properties object to change the properties of this Runtime instance.

      • Call start() to begin data acquisition.

      • Call stop() to end data acquisition once the max number of frames specified in acquire.VideoStream.max_frame_count is collected. All objects are deleted to free up disk space upon shutdown of Runtime.

      • Call abort() to immediately end data acqusition. All objects are deleted to free up disk space upon shutdown of Runtime.

      "},{"location":"api_reference/#class-sampleratehz","title":"Class SampleRateHz","text":"

      The SampleRateHz class represents the sampling rate in hertz.

      class SampleRateHz:\n    numerator: int\n    denominator: int\n\n    def __init__(self, *args: None, **kwargs: Any) -> None: ...\n    \"\"\"Initializes a SampleRateHz object with optional arguments.\"\"\"\n\n    def dict(self) -> Dict[str, Any]: ...\n    \"\"\"Returns a dictionary of the SampleRateHz attributes.\"\"\"\n
      • numerator: The numerator part of the sampling rate fraction.

      • denominator: The denominator part of the sampling rate fraction.

      • The dict method creates a dictionary of a SampleRateHz object's attributes.

      "},{"location":"api_reference/#class-sampletype","title":"Class SampleType","text":"

      The SampleType class defines the type of the values in the streamed data.

      class SampleType:\n    F32: ClassVar[SampleType] = SampleType.F32\n    I16: ClassVar[SampleType] = SampleType.I16\n    I8: ClassVar[SampleType] = SampleType.I8\n    U16: ClassVar[SampleType] = SampleType.U16\n    U8: ClassVar[SampleType] = SampleType.U8\n    U10: ClassVar[SampleType] = SampleType.U10\n    U12: ClassVar[SampleType] = SampleType.U12\n    U14: ClassVar[SampleType] = SampleType.U14\n\n    def __eq__(self, other: object) -> bool:\n        \"\"\"Checks if two SampleType objects are equal.\"\"\"\n\n    def __ge__(self, other: object) -> bool:\n        \"\"\"Checks if this SampleType is greater than or equal to another.\"\"\"\n\n    def __gt__(self, other: object) -> bool:\n        \"\"\"Checks if this SampleType is greater than another.\"\"\"\n\n    def __int__(self) -> int:\n        \"\"\"Converts the SampleType to an integer.\"\"\"\n\n    def __le__(self, other: object) -> bool:\n        \"\"\"Checks if this SampleType is less than or equal to another.\"\"\"\n\n    def __lt__(self, other: object) -> bool:\n        \"\"\"Checks if this SampleType is less than another.\"\"\"\n\n    def __ne__(self, other: object) -> bool:\n        \"\"\"Checks if two SampleType objects are not equal.\"\"\"\n
      • F32: Enum-type class variable of SampleType that specifies values of 32-bit floating point type.

      • I16: Enum-type class variable of SampleType that specifies values of 16-bit signed integer type.

      • I8: Enum-type class variable of SampleType that specifies values of 8-bit signed integer type.

      • U16: Enum-type class variable of SampleType that specifies values of 16-bit unsigned integer type.

      • U8: Enum-type class variable of SampleType that specifies values of 8-bit unsigned integer type.

      • U10: Enum-type class variable of SampleType that specifies values of 10-bit unsigned integer type.

      • U12: Enum-type class variable of SampleType that specifies values of 12-bit unsigned integer type.

      • U14: Enum-type class variable of SampleType that specifies values of 14-bit unsigned integer type.

      "},{"location":"api_reference/#class-signaliokind","title":"Class SignalIOKind","text":"

      The SignalIOKind class defines the signal type, input or output, for a trigger.

      class SignalIOKind:\n    Input: ClassVar[SignalIOKind] = SignalIOKind.Input\n    Output: ClassVar[SignalIOKind] = SignalIOKind.Output\n\n    def __eq__(self, other: object) -> bool:\n        \"\"\"Checks if two SignalIOKind objects are equal.\"\"\"\n\n    def __ge__(self, other: object) -> bool:\n        \"\"\"Checks if this SignalIOKind is greater than or equal to another.\"\"\"\n\n    def __gt__(self, other: object) -> bool:\n        \"\"\"Checks if this SignalIOKind is greater than another.\"\"\"\n\n    def __int__(self) -> int:\n        \"\"\"Converts the SignalIOKind to an integer.\"\"\"\n\n    def __le__(self, other: object) -> bool:\n        \"\"\"Checks if this SignalIOKind is less than or equal to another.\"\"\"\n\n    def __lt__(self, other: object) -> bool:\n        \"\"\"Checks if this SignalIOKind is less than another.\"\"\"\n\n    def __ne__(self, other: object) -> bool:\n        \"\"\"Checks if two SignalIOKind objects are not equal.\"\"\"\n
      • Input: Enum-type class variable of SignalIOKind that specifies signal coming in to the device.

      • Output: Enum-type class variable of SignalIOKind that specifies signal sent out of the device.

      "},{"location":"api_reference/#class-signaltype","title":"Class SignalType","text":"

      The SignalType class specifies whether a signal is analog or digital.

      class SignalType:\n    Analog: ClassVar[SignalType] = SignalType.Analog\n    Digital: ClassVar[SignalType] = SignalType.Digital\n\n    def __eq__(self, other: object) -> bool:\n        \"\"\"Checks if two SignalType objects are equal.\"\"\"\n\n    def __ge__(self, other: object) -> bool:\n        \"\"\"Checks if this SignalType is greater than or equal to another.\"\"\"\n\n    def __gt__(self, other: object) -> bool:\n        \"\"\"Checks if this SignalType is greater than another.\"\"\"\n\n    def __int__(self) -> int:\n        \"\"\"Converts the SignalType to an integer.\"\"\"\n\n   def __le__(self, other: object) -> bool:\n        \"\"\"Checks if this SignalType is less than or equal to another.\"\"\"\n\n    def __lt__(self, other: object) -> bool:\n        \"\"\"Checks if this SignalType is less than another.\"\"\"\n\n    def __ne__(self, other: object) -> bool:\n        \"\"\"Checks if two SignalType objects are not equal.\"\"\"\n
      • Analog: Enum-type class variable of SignalType that specifies a signal is analog.

      • Input: Enum-type class variable of SignalType that specifies signal coming in to the device.

      "},{"location":"api_reference/#class-storage","title":"Class Storage","text":"

      The Storage class represents storage devices and their settings.

      class Storage:\n    identifier: Optional[DeviceIdentifier]\n    settings: StorageProperties\n\n    def dict(self) -> Dict[str, Any]: ...\n    \"\"\"Returns a dictionary of the Storage attributes.\"\"\"\n
      • identifier: An optional attribute which contains an instance of the DeviceIdentifier class that describes the storage device if that device is natively supported. Otherwise, it is of type None.

      • settings: An instance of the StorageProperties class which contains the settings for the data storage.

      • The dict method creates a dictionary of a Storage object's attributes.

      "},{"location":"api_reference/#class-storageproperties","title":"Class StorageProperties","text":"

      The StorageProperties class represents properties for data storage.

      class StorageProperties:\n    external_metadata_json: Optional[str]\n    filename: Optional[str]\n    first_frame_id: int\n    pixel_scale_um: Tuple[float, float]\n    chunking: ChunkingProperties\n    enable_multiscale: bool\n\n    def dict(self) -> Dict[str, Any]: ...\n    \"\"\"Returns a dictionary of the StorageProperties attributes.\"\"\"\n
      • external_metadata_json: An optional attribute of the metadata JSON filename as a string.

      • filename: An optional attribute representing the filename for storing the image data.

      • first_frame_id: An integer representing the ID of the first frame for a given acquisition.

      • pixel_scale_um: A tuple of two floats representing the pixel size of the camera in micrometers.

      • chunking: An instance of the ChunkingProperties class representing data chunking settings for Zarr storage.

      • enable_multiscale: A boolean indicating whether multiscale storage is enabled.

      • The dict method creates a dictionary of a StorageProperties object's attributes.

      "},{"location":"api_reference/#class-tileshape","title":"Class TileShape","text":"

      The TileShape class represents the shape of data chunks for storage in Zarr containers.

      class TileShape:\n    width: int\n    height: int\n    planes: int\n\n    def dict(self) -> Dict[str, Any]: ...\n    \"\"\"Returns a dictionary of the TileShape attributes.\"\"\"\n
      • width: The width of the chunk.

      • height: The height of the chunk.

      • planes: The number of planes in the chunk.

      • The dict method creates a dictionary of a TileShape object's attributes.

      "},{"location":"api_reference/#class-trigger","title":"Class Trigger","text":"

      The Trigger class represents a trigger signal.

      class Trigger:\n    edge: TriggerEdge\n    enable: bool\n    line: int\n    kind: SignalIOKind\n\n    def __init__(self, *args: None, **kwargs: Any) -> None: ...\n    \"\"\"Initializes a Trigger object with optional arguments.\"\"\"\n\n    def dict(self) -> Dict[str, Any]: ...\n    \"\"\"Returns a dictionary of the Trigger attributes.\"\"\"\n
      • edge: An instance of the TriggerEdge class specifying if the trigger is on the rising or falling edge trigger signal.

      • enable: A boolean indicating whether the trigger is enabled.

      • line: An integer representing the max value of the trigger signal.

      • kind: An instance of the SignalIOKind class specifying if the signal is input or output.

      • The dict method creates a dictionary of a Trigger object's attributes.

      "},{"location":"api_reference/#class-triggeredge","title":"Class TriggerEdge","text":"

      The TriggerEdge class represents what edge of the trigger function initiates the trigger.

      class TriggerEdge:\n    Falling: ClassVar[TriggerEdge] = TriggerEdge.Falling\n    NotApplicable: ClassVar[TriggerEdge] = TriggerEdge.NotApplicable\n    Rising: ClassVar[TriggerEdge] = TriggerEdge.Rising\n\n    def __eq__(self, other: object) -> bool:\n        \"\"\"Checks if two TriggerEdge objects are equal.\"\"\"\n\n    def __ge__(self, other: object) -> bool:\n        \"\"\"Checks if this TriggerEdge is greater than or equal to another.\"\"\"\n\n    def __gt__(self, other: object) -> bool:\n        \"\"\"Checks if this TriggerEdge is greater than another.\"\"\"\n\n    def __int__(self) -> int:\n        \"\"\"Converts the TriggerEdge to an integer.\"\"\"\n\n    def __le__(self, other: object) -> bool:\n        \"\"\"Checks if this TriggerEdge is less than or equal to another.\"\"\"\n\n    def __lt__(self, other: object) -> bool:\n        \"\"\"Checks if this TriggerEdge is less than another.\"\"\"\n\n    def __ne__(self, other: object) -> bool:\n        \"\"\"Checks if two TriggerEdge objects are not equal.\"\"\"\n
      • Falling: Enum-type class variable of TriggerEdge that defines the falling edge of the trigger.

      • NotApplicable: Enum-type class variable of TriggerEdge that defines if a trigger does not have a rising or falling edge.

      • Rising: Enum-type class variable of TriggerEdge that defines the rising edge of the trigger.

      "},{"location":"api_reference/#class-videoframe","title":"Class VideoFrame","text":"

      The VideoFrame class represents data from acquisition of a frame.

      class VideoFrame:\n    def data(self) -> NDArray[Any]:\n        \"\"\"Returns the data of the video frame as an NDArray.\"\"\"\n\n    def metadata(self) -> VideoFrameMetadata:\n        \"\"\"Returns the metadata associated with the video frame.\"\"\"\n
      • Call data() to create an NDArray of the VideoFrame data.

      • Call metadata() to create a VideoFrameMetadata object containing the metadata of VideoFrame.

      "},{"location":"api_reference/#class-videoframemetadata","title":"Class VideoFrameMetadata","text":"

      The VideoFrameMetadata class represents metadata related to a video frame.

      class VideoFrameMetadata:\n    frame_id: int\n    timestamps: VideoFrameTimestamps\n\n    def dict(self) -> Dict[str, Any]: ...\n    \"\"\"Returns a dictionary of the VideoFrameMetadata attributes.\"\"\"\n
      • frame_id: An integer representing the ID of the video frame.

      • timestamps: An instance of the VideoFrameTimestamps class specifying the video timestamps based on the hardware clock and the acquisition clock.

      • The dict method creates a dictionary of a VideoFrameTimestamps object's attributes.

      "},{"location":"api_reference/#class-videoframetimestamps","title":"Class VideoFrameTimestamps","text":"

      The VideoFrameTimestamps class represents timestamps related to a video frame.

      class VideoFrameTimestamps:\n    hardware: int\n    acq_thread: int\n\n    def dict(self) -> Dict[str, Any]: ...\n    \"\"\"Returns a dictionary of the VideoFrameTimestamps attributes.\"\"\"\n
      • hardware: An integer representing hardware timestamps.

      • acq_thread: An integer representing timestamps from the acquisition thread.

      • The dict method creates a dictionary of a VideoFrameTimestamps object's attributes.

      "},{"location":"api_reference/#class-videostream","title":"Class VideoStream","text":"

      The VideoStream class represents a video stream.

      class VideoStream:\n    camera: Camera\n    storage: Storage\n    max_frame_count: int\n    frame_average_count: int\n\n    def dict(self) -> Dict[str, Any]: ...\n    \"\"\"Returns a dictionary of the VideoStream attributes.\"\"\"\n
      • camera: An instance of the Camera class representing the camera device for the video stream.

      • storage: An instance of the Storage class representing the storage device for the video stream.

      • max_frame_count: An integer representing the maximum number of frames to acquire.

      • frame_average_count: An integer representing the number of frames to average, if any, before streaming. The default value is 0, which disables this feature. Setting this to 1 will also prevent averaging.

      • The dict method creates a dictionary of a VideoStream object's attributes.

      "},{"location":"api_reference/#class-voltagerange","title":"Class VoltageRange","text":"

      The VoltageRange class represents a range of voltage values.

      class VoltageRange:\n    mn: float\n    mx: float\n\n    @overload\n    def __init__(self) -> None: ...\n    \"\"\"Initializes a VoltageRange object\"\"\"\n\n    @overload\n    def __init__(self, mn: float, mx: float) -> None: ...\n    \"\"\"Initializes a VoltageObject object with mn and mx provided.\"\"\"\n\n    def dict(self) -> Dict[str, float]: ...\n    \"\"\"Returns a dictionary of the VoltageRange attributes.\"\"\"\n
      • mn: A float representing the minimum voltage value.

      • mx: A float representing the maximum voltage value.

      • The dict method creates a dictionary of a VoltageRange object's attributes.

      "},{"location":"get_started/","title":"Getting Started with Acquire","text":"

      Acquire (acquire-imaging on PyPI) is a Python package providing a multi-camera video streaming library focused on performant microscopy, with support for up to two simultaneous, independent, video streams.

      This tutorial covers Acquire installation and shows an example of using Acquire with its provided simulated cameras to demonstrate the acquisition process.

      "},{"location":"get_started/#installation","title":"Installation","text":"

      To install Acquire on Windows, macOS, or Ubuntu, simply run the following command:

      python -m pip install acquire-imaging\n

      You will probably want to have a fresh conda environment or virtualenv. For example, with conda:

      conda create -n acquire python=3.10 # follow the prompts and proceed with the defaults\nconda activate acquire\npython -m pip install acquire-imaging\n

      or with virtualenv:

      $ python -m venv venv\n$ . ./venv/bin/activate # or on Windows: .\\venv\\Scripts\\Activate.bat or .\\venv\\Scripts\\Activate.ps1\n(venv) $ python -m pip install acquire-imaging\n

      Once you have Acquire installed, simply call import acquire in your script, notebook, or module to start utilizing the package.

      import acquire\n
      "},{"location":"get_started/#supported-cameras-and-file-formats","title":"Supported Cameras and File Formats","text":"

      Acquire supports the following cameras (currently only on Windows):

      • Hamamatsu Orca Fusion BT (C15440-20UP)
      • Vieworks VC-151MX-M6H00
      • FLIR Blackfly USB3 (BFLY-U3-23S6M-C)
      • FLIR Oryx 10GigE (ORX-10GS-51S5M-C)

      Acquire also supports the following output file formats:

      • Tiff
      • Zarr

      For testing and demonstration purposes, Acquire provides a few simulated cameras, as well as raw and trash output devices. To see all the devices that Acquire supports, you can run the following script:

      import acquire\n\nfor device in acquire.Runtime().device_manager().devices():\n    print(device)\n
      "},{"location":"get_started/#tutorial-prerequisites","title":"Tutorial Prerequisites","text":"

      We will be writing to and reading from the Zarr format, using the Dask library to load and inspect the data, and visualizing the data using napari.

      You can install these prerequisites with:

      python -m pip install dask \"napari[all]\" zarr\n
      "},{"location":"get_started/#setup-for-acquisition","title":"Setup for Acquisition","text":"

      We will use one of Acquire's simulated cameras to generate data for us and use Zarr for our output file format.

      Let's set up our runtime and device manager, then list the currently supported devices.

      import acquire\n\nruntime = acquire.Runtime()\ndm = runtime.device_manager()\n\nfor device in dm.devices():\n    print(device)\n
      The runtime is the main entry point in Acquire. Through the runtime, you configure your devices, start acquisition, check acquisition status, inspect data as it streams from your cameras, and terminate acquisition.

      Let's configure our devices now. To do this, we'll get a copy of the current runtime configuration. We can update the configuration with identifiers from the the runtime's device manager, but we won't actually instantiate these devices until we start acquiring.

      Acquire supports up to two video streams. These streams consist of a source (i.e., a camera), optionally a filter, and a sink (an output, like a Zarr dataset or a Tiff file). Before configuring the streams, grab the current configuration of the Runtime object with:

      config = runtime.get_configuration()\n

      Video streams are configured independently. Configure the first video stream by setting properties on config.video[0] and the second video stream with config.video[1]. We'll be using simulated cameras, one generating a radial sine pattern and one generating a random pattern.

      config.video[0].camera.identifier = dm.select(acquire.DeviceKind.Camera, \"simulated: radial sin\")\n\n# how many adjacent pixels in each direction to combine by averaging; here, 1 means not to combine\nconfig.video[0].camera.settings.binning = 1\n\n# how long (in microseconds) your camera should collect light from the sample; for simulated cameras,\n# this is just a waiting period before generating the next frame\nconfig.video[0].camera.settings.exposure_time_us = 5e4  # 50 ms\n\n# the data type representing each pixel; here we choose unsigned 8-bit integer\nconfig.video[0].camera.settings.pixel_type = acquire.SampleType.U8\n\n# the shape, in pixels, of the image; width first, then height\nconfig.video[0].camera.settings.shape = (1024, 768)\n
      config.video[1].camera.identifier = dm.select(acquire.DeviceKind.Camera, \"simulated: uniform random\")\n\n# how many adjacent pixels in each direction to combine by averaging; here, 1 means not to combine\nconfig.video[1].camera.settings.binning = 1\n\n# how long (in microseconds) your camera should collect light from the sample; for simulated cameras,\n# this is just a waiting period before generating the next frame\nconfig.video[1].camera.settings.exposure_time_us = 1e4  # 10 ms\n\n# the data type representing each pixel; here we choose unsigned 8-bit integer\nconfig.video[1].camera.settings.pixel_type = acquire.SampleType.U8\n\n# the shape, in pixels, of the image; width first, then height\nconfig.video[1].camera.settings.shape = (1280, 720)\n

      Now we'll configure each output, or sink device. For both simulated cameras, we'll be writing to Zarr, a format which supports chunked arrays.

      config.video[0].storage.identifier = dm.select(acquire.DeviceKind.Storage, \"Zarr\")\n\n# what file or directory to write the data to\nconfig.video[0].storage.settings.filename = \"output1.zarr\"\n\n# where applicable, how large should a chunk file get before opening the next chunk file\nconfig.video[0].storage.settings.chunking.max_bytes_per_chunk = 32 * 2**20  # 32 MiB chunk sizes\n
      config.video[1].storage.identifier = dm.select(acquire.DeviceKind.Storage, \"Zarr\")\n\n# what file or directory to write the data to\nconfig.video[1].storage.settings.filename = \"output2.zarr\"\n\n# where applicable, how large should a chunk file get before opening the next chunk file\nconfig.video[1].storage.settings.chunking.max_bytes_per_chunk = 64 * 2**20  # 64 MiB chunk sizes\n

      Finally, let's specify how many frames to generate for each camera before stopping our simulated acquisition. We also need to register our configuration with the runtime.

      If you want to let the runtime just keep acquiring effectively forever, you can set max_frame_count to 2**64 - 1.

      config.video[0].max_frame_count = 100 # collect 100 frames\nconfig.video[1].max_frame_count = 150 # collect 150 frames\n\nconfig = runtime.set_configuration(config)\n

      Note

      If you run this tutorial multiple times, you can clear output from previous runs with:

      import os\nimport shutil\n\nif config.video[0].storage.settings.filename in os.listdir(\".\"):\n    shutil.rmtree(config.video[0].storage.settings.filename)\n\nif config.video[1].storage.settings.filename in os.listdir(\".\"):\n    shutil.rmtree(config.video[1].storage.settings.filename)\n
      "},{"location":"get_started/#acquire-data","title":"Acquire Data","text":"

      To start aquiring data:

      runtime.start()\n

      Acquisition happens in a separate thread, so at any point we can check on the status by calling runtime.get_state().

      runtime.get_state()\n

      Finally, once we're done acquiring, we call runtime.stop(). This method will wait until you've reached the number of frames specified in config.video[0].max_frame_count or config.video[1].max_frame_count, whichever is larger.

      runtime.stop()\n
      "},{"location":"get_started/#visualizing-the-data-with-napari","title":"Visualizing the Data with napari","text":"

      Let's take a look at what we've written. We'll load each Zarr dataset as a Dask array and inspect its dimensions, then we'll use napari to view it.

      import dask.array as da\nimport napari\n
      data1 = da.from_zarr(config.video[0].storage.settings.filename, component=\"0\")\ndata1\n
      data2 = da.from_zarr(config.video[1].storage.settings.filename, component=\"0\")\ndata2\n
      viewer1 = napari.view_image(data1)\n
      viewer2 = napari.view_image(data2)\n
      "},{"location":"get_started/#conclusion","title":"Conclusion","text":"

      For more examples of using Acquire, check out our tutorials page. #ADD LINK

      "},{"location":"for_contributors/","title":"For contributors","text":"

      Documentation for those looking to contribute to the Acquire project.

      GitHub repositories: https://github.com/acquire-project

      "},{"location":"for_contributors/docs_contribution_quickstart/","title":"Quick Start Acquire Docs Contribution Guide","text":"
      1. Make sure you have a fresh environment with the latest mkdocs and mkdocs-material installed. You can install them with pip install -r requirements.txt from the root of the repository.
      2. Your pages should be written as markdown files, using the basic markdown syntax or following the mkdocs or material for mkdocs syntax.
      3. Pages can be added to the top level menu or submenus by editing the mkdocs.yml file. The order of the pages in the menu is determined by the order of the pages in the mkdocs.yml file. Subpages can be added by creating subfolders in the docs/ folder (see, for example, the docs/tutorials/ folder).
      4. To add images, place them in the docs/images/ folder and reference them in your markdown files using the relative path ../images/your_image.png.
      5. Custom CSS configuration goes into the docs/stylesheets/custom.css file.
      6. To build the website locally, after activating your environment (either using conda activate <your-environment> or source activate <your-env>, for example), run mkdocs serve to start a local server. You can then view the website at the URL indicated on your console.
      "},{"location":"tutorials/","title":"Tutorials","text":"

      These tutorials will help you explore the main use cases of Acquire and show examples of using the API. Please submit an issue on Github if you'd like to request a tutorial, or if you are also interested in contributing to a tutorial to this documentation please visit our contribution guide

      "},{"location":"tutorials/chunked/","title":"Chunking Data for Zarr Storage","text":"

      This tutorial will provide an example of writing chunked data to a Zarr storage device.

      Zarr has additional capabilities relative to the basic storage devices, namely chunking, compression, and multiscale storage. To enable chunking, set the attributes in an instance of the ChunkingProperties class. You can learn more about the Zarr capabilities in Acquire here.

      "},{"location":"tutorials/chunked/#configure-runtime","title":"Configure Runtime","text":"

      To start, we'll create a Runtime object and configure the streaming process, selecting Zarr as the storage device to enable chunking the data.

      import acquire\n\n# Initialize a Runtime object\nruntime = acquire.Runtime()\n\n# Initialize the device manager\ndm = runtime.device_manager()\n\n# Grab the current configuration\nconfig = runtime.get_configuration() \n\n# Select the radial sine simulated camera as the video source\nconfig.video[0].camera.identifier = dm.select(acquire.DeviceKind.Camera, \"simulated: radial sin\") \n\n# Set the storage to Zarr to take advantage of chunking\nconfig.video[0].storage.identifier = dm.select(acquire.DeviceKind.Storage, \"Zarr\")\n\n# Set the time for collecting data for a each frame\nconfig.video[0].camera.settings.exposure_time_us = 5e4  # 50 ms\n\n# size of image region of interest on the camera (x, y)\nconfig.video[0].camera.settings.shape = (1920, 1080)\n\n# specify the pixel datatype as a uint8\nconfig.video[0].camera.settings.pixel_type = acquire.SampleType.U8\n\n# Set the max frame count\nconfig.video[0].max_frame_count = 10 # collect 10 frames\n\n# Set the output file to out.zarr\nconfig.video[0].storage.settings.filename = \"out.zarr\"\n
      Below we'll configure the chunking specific settings.

      # Chunk size may need to be optimized for each acquisition. \n# See Zarr documentation for further guiddance: https://zarr.readthedocs.io/en/stable/tutorial.html#chunk-optimizations\nconfig.video[0].storage.settings.chunking.max_bytes_per_chunk = 32 * 2**20 # 32 MB\n\n# x, y dimensions of each chunk to 1/2 of the width and height of the image, generating 4 chunks\nconfig.video[0].storage.settings.chunking.tile.width = 1920 // 2\nconfig.video[0].storage.settings.chunking.tile.height = 1080 // 2\n\n# Update the configuration with the chosen parameters\nconfig = runtime.set_configuration(config)\n
      "},{"location":"tutorials/chunked/#collect-and-inspect-the-data","title":"Collect and Inspect the Data","text":"
      # collect data\nruntime.start()\nruntime.stop()\n

      You can inspect the Zarr file directory to check that the data saved as expected. Alternatively, you can inspect the data programmatically with:

      # Utilize the zarr library to open the data\nimport zarr\n\n# create a zarr Group object\ngroup = zarr.open(config.video[0].storage.settings.filename)\n\n# check how many directories are in the zarr container\nassert len(group) == 1\n\n# inspect the characteristics of the data\ngroup[\"0\"]\n

      The output will be:

      <zarr.core.Array '/0' (10, 1, 1080, 1920) uint8>\n
      As expected, we have only 1 top level directory, corresponding to the single array in the group (we would expect more than 1 array only if we were writing multiscale data). The overall array shape is (10, 1, 1080, 1920), corresponding to 10 frames, 1 channel, and a height and width of 1080 and 1920, respectively, per frame.

      "},{"location":"tutorials/compressed/","title":"Writing to Compressed Zarr Files","text":"

      This tutorial will provide an example of writing compressed data to a Zarr file.

      Acquire supports streaming compressed data to the ZarrBlosc1* storage devices. Compression is done via Blosc. Supported codecs are lz4 and zstd, available with ZarrBlosc1Lz4ByteShuffle and ZarrBlosc1ZstdByteShuffle devices, respectively. For a comparison of these codecs, please refer to the Blosc docs. You can learn more about the Zarr capabilities in Acquire here.

      "},{"location":"tutorials/compressed/#configure-runtime","title":"Configure Runtime","text":"

      To start, we'll create a Runtime object and configure the streaming process, selecting ZarrBlosc1ZstdByteShuffle as the storage device to enable compressing the data.

      import acquire\n\n# Initialize a Runtime object\nruntime = acquire.Runtime()\n\n# Initialize the device manager\ndm = runtime.device_manager()\n\n# Grab the current configuration\nconfig = runtime.get_configuration() \n\n# Select the radial sine simulated camera as the video source\nconfig.video[0].camera.identifier = dm.select(acquire.DeviceKind.Camera, \"simulated: radial sin\") \n\n# Set the storage to ZarrBlosc1ZstdByteShuffle to avoid saving the data\nconfig.video[0].storage.identifier = dm.select(acquire.DeviceKind.Storage, \"ZarrBlosc1ZstdByteShuffle\")\n\n# Set the time for collecting data for a each frame\nconfig.video[0].camera.settings.exposure_time_us = 5e4  # 50 ms\n\nconfig.video[0].camera.settings.shape = (1024, 768)\n\n# Set the max frame count\nconfig.video[0].max_frame_count = 100 # collect 100 frames\n\n# Set the output file to out.zarr\nconfig.video[0].storage.settings.filename = \"out.zarr\"\n\n# Update the configuration with the chosen parameters \nconfig = runtime.set_configuration(config) \n
      "},{"location":"tutorials/compressed/#inspect-acquired-data","title":"Inspect Acquired Data","text":"

      Now that the configuration is set to utilize the ZarrBlosc1ZstdByteShuffle storage device, we can acquire data, which will be compressed before it is stored to out.zarr. Since we did not specify the size of chunks, the data will be saved as a single chunk that is the size of the image data. You may specify chunk sizes using the TileShape class. For example, using acquire.StorageProperties.chunking.tile.width to set the width of the chunks.

      # acquire data\nruntime.start()\nruntime.stop()\n
      We'll use the Zarr Python package to read the data in out.zarr file.
      # We'll utilize the Zarr python package to read the data\nimport zarr\n\n# load from Zarr\ncompressed = zarr.open(config.video[0].storage.settings.filename)\n

      We'll print some of the data properties to illustrate how the data was compressed. Since we have not enabled multiscale output, out.zarr will only have one top level array\"0\".

      # All of the data is stored in the \"0\" directory since the data was stored as a single chunk.\ndata = compressed[\"0\"]\n\nprint(data.compressor.cname)\nprint(data.compressor.clevel)\nprint(data.compressor.shuffle)\n

      Output:

      zstd\n1\n1\n
      As expected, the data was compressed using the zstd codec.

      "},{"location":"tutorials/configure/","title":"Configure an Acquisition","text":"

      This tutorial will provide an in-depth explanation of setting configuration properites and demonstrate the relationships between various Acquire classes, such as CameraProperties and StorageProperties, used in the configuration process. In this example, we'll only configure one video source.

      "},{"location":"tutorials/configure/#initialize-runtime","title":"Initialize Runtime","text":"

      Runtime is the main entry point in Acquire. Through the runtime, you configure your devices, start acquisition, check acquisition status, inspect data as it streams from your cameras, and terminate acquisition. The device_manager method in Runtime creates an instance of the DeviceManager class. The get_configuration method in Runtime creates an instance of the Properties class. To configure the acquisition, we'll use those two methods to grab the configuration and to initialize a DeviceManager to set the attributes of Properties and related classes.

      import acquire\n\n# Initialize a Runtime object\nruntime = acquire.Runtime()\n\n# Initialize the device manager\ndm = runtime.device_manager()\n\n# Grab the current configuration\nconfig = runtime.get_configuration()\n
      "},{"location":"tutorials/configure/#utilize-devicemanager","title":"Utilize DeviceManager","text":"

      DeviceManager contains a devices method which creates a list of DeviceIdentifier objects each representing a discovered camera or storage device. Each DeviceIdentifier has an attribute kind that is a DeviceKind object, which has attributes specifying whether the device is a camera or storage device, as well as Signals and StageAxes attributes. The Signals and StageAxes attributes would apply to device kinds such as stages, which are not yet supported by Acquire.

      DeviceManager has 2 methods for selecting devices for the camera and storage. For more information on these methods, check out the Device Selection tutorial. We'll use the select method in this example to choose a specific device.

      # Select the radial sine simulated camera as the video source\nconfig.video[0].camera.identifier = dm.select(acquire.DeviceKind.Camera, \"simulated: radial sin\") \n\n# Set the storage to Tiff\nconfig.video[0].storage.identifier = dm.select(acquire.DeviceKind.Storage, \"Tiff\")\n
      "},{"location":"tutorials/configure/#properties-class-explanation","title":"Properties Class Explanation","text":"

      Using Runtime's get_configuration method we created config, an instance of the Properties class. Properties contains only one attribute video which is a tuple of VideoStream objects since Acquire currently supports 2 camera streaming. To configure the first video stream, we'll index this tuple to select the first VideoStream object config.video[0].

      VideoStream objects have 2 attributes camera and storage which are instances of the Camera and Storage classes, respectively, and will be used to set the attributes of the selected camera device simulated: radial sin and storage device Tiff. The other attributes of VideoStream are integers that specify the maximum number of frames to collect and how many frames to average, if any, before storing the data. The frame_average_count has a default value of 0, which disables this feature.

      "},{"location":"tutorials/configure/#configure-camera","title":"Configure Camera","text":"

      Camera class objects have 2 attributes, settings, a CameraProperties object, and an optional attribute identifier, which is a DeviceIdentifier object.

      CameraProperties has 5 attributes that are numbers and specify the exposure time and line interval in microseconds, how many pixels, if any, to bin (set to 1 by default to disable), and tuples for the image size and location on the camera chip. The other attributes are all instances of different classes. The pixel_type attribute is a SampleType object which indicates the data type of the pixel values in the image, such as Uint8. The readout_direction attribute is a Direction object specifying whether the data is read forwards or backwards from the camera. The input_triggers attribute is an InputTriggers object that details the characteristics of any input triggers in the system. The output_triggers attribute is an OutputTriggers object that details the characteristics of any output triggers in the system. All of the attributes of InputTriggers and OutputTriggers objects are instances of the Trigger class. The Trigger class is described in this tutorial.

      We'll configure some camera settings below.

      # Set the time for collecting data for a each frame\nconfig.video[0].camera.settings.exposure_time_us = 5e4  # 50 ms\n\n# (x, y) size of the image in pixels\nconfig.video[0].camera.settings.shape = (1024, 768)\n\n# Specify the pixel type as Uint32\nconfig.video[0].camera.settings.pixel_type = acquire.SampleType.U32\n
      "},{"location":"tutorials/configure/#configure-storage","title":"Configure Storage","text":"

      Storage objects have 2 attributes, settings, a StorageProperties object, and an optional attribute identifier, which is an instance of the DeviceIdentifier class described above.

      StorageProperties has 2 attributes external_metadata_json and filename which are strings of the filename or filetree of the output metadata in JSON format and image data in whatever format corresponds to the selected storage device, respectively. first_frame_id is an integer ID that corresponds to the first frame of the current acquisition and is typically 0. pixel_scale_um is the pixel size in microns. enable_multiscale is a boolean used to specify if the data should be saved as an image pyramid. See the multiscale tutorial for more information. The chunking attribute is an instance of the ChunkingProperties class, used for Zarr storage. See the chunking tutorial for more information.

      We'll specify the name of the output image file below.

      # Set the output file to out.tiff\nconfig.video[0].storage.settings.filename = \"out.tiff\"\n
      "},{"location":"tutorials/configure/#update-configuration-settings","title":"Update Configuration Settings","text":"

      None of the configuration settings are updated in Runtime until the set_configuration method is called. We'll be creating a new Properties object with the set_configuration method. For simplicity, we'll reuse config for the name of that object as well, but note that new_config = runtime.set_configuration(config) also works here.

      # Update the configuration with the chosen parameters \nconfig = runtime.set_configuration(config) \n
      "},{"location":"tutorials/drivers/","title":"Test Camera Drivers","text":"

      This tutorial will cover testing that your camera has been properly identified.

      Acquire supports the following cameras (currently only on Windows):

      • Hamamatsu Orca Fusion BT (C15440-20UP)
      • Vieworks VC-151MX-M6H00
      • FLIR Blackfly USB3 (BFLY-U3-23S6M-C)
      • FLIR Oryx 10GigE (ORX-10GS-51S5M-C)

      Acquire provides the following simulated cameras:

      • simulated: uniform random - Produces uniform random noise for each pixel.
      • simulated: radial sin - Produces an animated radial sine wave pattern.
      • simulated: empty - Produces no data, leaving a blank image. This camera simulates acquiring as fast as possible.

      Acquire will only identify cameras whose drivers are present on your machine. The DeviceManager class manages selection of cameras and storage. We can create a DeviceManager object using the following:

      import acquire \n\n# Instantiate a Runtime object\nruntime = acquire.Runtime()\n\n# Instantiate a DeviceManager object for the Runtime\nmanager = runtime.device_manager()\n

      DeviceManager objects have device methods which lists the identifiers for discovered devices. You can iterate over this list to determine which cameras were recognized.

      for device in manager.devices():\n    print(device)\n
      The output of this code is below. All identified cameras will be listed, and in the case of this tutorial, no cameras were connected to the machine, so only simulated cameras were found. Note that any storage devices will also print.

      <DeviceIdentifier Camera \"simulated: uniform random\">\n<DeviceIdentifier Camera \"simulated: radial sin\">\n<DeviceIdentifier Camera \"simulated: empty\">\n\n# storage devices will also print\n<DeviceIdentifier Storage \"raw\">\n<DeviceIdentifier Storage \"tiff\">\n<DeviceIdentifier Storage \"trash\">\n<DeviceIdentifier Storage \"tiff-json\">\n<DeviceIdentifier Storage \"Zarr\">\n<DeviceIdentifier Storage \"ZarrBlosc1ZstdByteShuffle\">\n<DeviceIdentifier Storage \"ZarrBlosc1Lz4ByteShuffle\">\n

      For cameras that weren't discovered you will see an error like the one below. These errors will not affect performance and can be ignored.

      ERROR acquire.runtime 2023-10-20 19:03:17,917 runtime.rs:40 C:\\actions-runner\\_work\\acquire-driver-hdcam\\acquire-driver-hdcam\\src\\acquire-core-libs\\src\\acquire-device-hal\\device\\hal\\loader.c:114 - driver_load(): Failed to load driver at \"acquire-driver-hdcam\".\n
      "},{"location":"tutorials/framedata/","title":"Accessing Data from the Video Source","text":"

      This tutorial will provide an example of accessing data from a video source during acquisition.

      "},{"location":"tutorials/framedata/#configure-runtime","title":"Configure Runtime","text":"

      To start, we'll create a Runtime object and configure the streaming process.

      import acquire\n\n# Initialize a Runtime object\nruntime = acquire.Runtime()\n\n# Initialize the device manager\ndm = runtime.device_manager()\n\n# Grab the current configuration\nconfig = runtime.get_configuration() \n\n# Select the radial sine simulated camera as the video source\nconfig.video[0].camera.identifier = dm.select(acquire.DeviceKind.Camera, \"simulated: radial sin\") \n\n# Set the storage to trash to avoid saving the data\nconfig.video[0].storage.identifier = dm.select(acquire.DeviceKind.Storage, \"Trash\")\n\n# Set the time for collecting data for a each frame\nconfig.video[0].camera.settings.exposure_time_us = 5e4  # 50 ms\n\nconfig.video[0].camera.settings.shape = (1024, 768)\n\n# Set the max frame count to 2**(64-1) the largest number supported by Uint64 for essentially infinite acquisition\nconfig.video[0].max_frame_count = 100 # collect 100 frames\n\n# Update the configuration with the chosen parameters \nconfig = runtime.set_configuration(config) \n
      "},{"location":"tutorials/framedata/#working-with-availabledata-objects","title":"Working with AvailableData objects","text":"

      During Acquisition, the AvailableData object is the streaming interface, and this class has a frames method which iterates over the VideoFrame objects in AvailableData. Once we start acquisition, we'll utilize this iterator method to list the frames.

      # To increase the likelihood of `AvailableData` containing data, we'll utilize the time python package to introduce a delay before we create our `AvailableData` object\n\nimport time\n\n# start acquisition\nruntime.start()\n\n# time delay of 0.5 seconds\ntime.sleep(0.5)\n\n# grab the packet of data available on disk for video stream 0. This is an AvailableData object.\navailable_data = runtime.get_available_data(0) \n
      Once get_available_data() is called the AvailableData object will be locked into memory, so the circular buffer that stores the available data will overflow if AvailableData isn\u2019t released.

      There may not be data available, in which case our variable available_data would be None. To avoid errors associated with this circumstance, we'll only grab data if available_data is not None.

      # NoneType if there is no available data. We can only grab frames if data is available.\nif available_data is not None:\n\n\n    # frames is an iterator over available_data, so we'll use this iterator to make a list of the frames\n    video_frames = list(available_data.frames())\n\nelse:         \n    # delete the available_data variable if there is no data in the packet to free up RAM\n    del available_data\n
      video_frames is a list with each element being an instance of the VideoFrame class. VideoFrame has a data method which provides the frame as an NDArray. The shape of this NDArray corresponds to the image dimensions used internally by Acquire. Since we have a single channel, both the first and the last dimensions will be 1. The interior dimensions will be height and width, respectively.

      # grab the first VideoStream object in frames and convert it to an NDArray\nfirst_frame = video_frames[0].data()\n\nprint(first_frame.shape)\n
      Output:
      (1, 768, 1024, 1) \n

      To grab the desired NDArray image data from first_frame, we'll slice the array as shown:

      image = image.squeeze()\n\n\nprint(image.shape)\n
      Output:
      (768, 1024)\n
      Finally, delete the available_data to unlock the region in the circular buffer.

      # delete the available_data to free up disk space\ndel available_data\n\n# stop runtime\nruntime.stop()\n
      "},{"location":"tutorials/livestream/","title":"Livestream to napari","text":"

      The below script can be used to livestream data to the napari viewer. You may also utilize the Acquire napari plugin, which is provided in the package upon install. You can access the plugin in the napari plugins menu once Acquire is installed. You can review the plugin code here. You may also stream using other packages such at matplotlib.

      \"\"\"\nThis script will livestream data to the [napari viewer](https://napari.org/stable/). You may also utilize the `Acquire` napari plugin, which is provided in the `acquire-imaging` package on PyPI upon install. You can access the plugin in the napari plugins menu once `Acquire` is installed. You can review the [plugin code here](https://github.com/acquire-project/acquire-python/blob/main/python/acquire/__init__.py).\n\"\"\"\n\nimport acquire\nruntime = acquire.Runtime()\n\n# Initialize the device manager\ndm = runtime.device_manager()\n\n# Grab the current configuration\nconfig = runtime.get_configuration() \n\n# Select the uniform random camera as the video source\nconfig.video[0].camera.identifier = dm.select(acquire.DeviceKind.Camera, \".*random.*\")\n\n# Set the storage to trash to avoid saving the data\nconfig.video[0].storage.identifier = dm.select(acquire.DeviceKind.Storage, \"Trash\")\n\n# Set the time for collecting data for a each frame\nconfig.video[0].camera.settings.exposure_time_us = 5e4  # 500 ms\n\nconfig.video[0].camera.settings.shape = (300, 200)\n\n# Set the max frame count to 100 frames\nconfig.video[0].max_frame_count = 100\n\n# Update the configuration with the chosen parameters \nconfig = runtime.set_configuration(config) \n\n# import napari and open a viewer to stream the data\nimport napari\nviewer = napari.Viewer()\n\nimport time\nfrom napari.qt.threading import thread_worker\n\ndef update_layer(args) -> None:\n    (new_image, stream_id) = args\n    print(f\"update layer: {new_image.shape=}, {stream_id=}\")\n    layer_key = f\"Video {stream_id}\"\n    try:\n        layer = viewer.layers[layer_key]\n        layer._slice.image._view = new_image\n        layer.data = new_image\n        # you can use the private api with layer.events.set_data() to speed up by 1-2 ms/frame\n\n    except KeyError:\n        viewer.add_image(new_image, name=layer_key)\n\n@thread_worker(connect={\"yielded\": update_layer})\ndef do_acquisition():\n    time.sleep(5)\n    runtime.start()\n\n    nframes = [0, 0]\n    stream_id = 0\n\n    def is_not_done() -> bool:\n        return (nframes[0] < config.video[0].max_frame_count) or (\n                nframes[1] < config.video[1].max_frame_count\n                )\n\n    def next_frame(): #-> Optional[npt.NDArray[Any]]:\n        \"\"\"Get the next frame from the current stream.\"\"\"\n        if nframes[stream_id] < config.video[stream_id].max_frame_count:\n            if packet := runtime.get_available_data(stream_id):\n                n = packet.get_frame_count()\n                nframes[stream_id] += n\n                f = next(packet.frames())\n                return f.data().squeeze().copy()\n        return None\n\n    stream = 1\n    # loop to continue to update the data in napari while acquisition is running\n    while is_not_done():  \n        if (frame := next_frame()) is not None:\n            yield frame, stream_id\n        time.sleep(0.1)\n\ndo_acquisition()\n\nnapari.run()\n
      "},{"location":"tutorials/multiscale/","title":"Writing Multiscale Zarr Files","text":"

      This tutorial will provide an example of writing multiscale data to a Zarr file.

      Zarr has additional capabilities relative to Acquire's basic storage devices, namely chunking, compression, and multiscale storage. To enable chunking and multiscale storage, set those attributes in instances of the ChunkingProperties and StorageProperties classes, respectively. You can learn more about the Zarr capabilities in Acquire here.

      "},{"location":"tutorials/multiscale/#configure-runtime","title":"Configure Runtime","text":"

      To start, we'll create a Runtime object and begin to configure the streaming process, selecting Zarr as the storage device so that writing multiscale data is possible.

      import acquire\n\n# Initialize a Runtime object\nruntime = acquire.Runtime()\n\n# Initialize the device manager\ndm = runtime.device_manager()\n\n# Grab the current configuration\nconfig = runtime.get_configuration() \n\n# Select the radial sine simulated camera as the video source\nconfig.video[0].camera.identifier = dm.select(acquire.DeviceKind.Camera, \"simulated: radial sin\") \n\n# Set the storage to Zarr to have the option to save multiscale data\nconfig.video[0].storage.identifier = dm.select(acquire.DeviceKind.Storage, \"Zarr\")\n\n# Set the time for collecting data for a each frame\nconfig.video[0].camera.settings.exposure_time_us = 5e4  # 50 ms\n\n# size of image region of interest on the camera (x, y)\nconfig.video[0].camera.settings.shape = (1920, 1080)\n\n# Set the max frame count\nconfig.video[0].max_frame_count = 5 # collect 5 frames\n\n# specify the pixel datatype as a uint8\nconfig.video[0].camera.settings.pixel_type = acquire.SampleType.U8\n\n# set the scale of the pixels\nconfig.video[0].storage.settings.pixel_scale_um = (1, 1) # 1 micron by 1 micron\n\n# Set the output file to out.zarr\nconfig.video[0].storage.settings.filename = \"out.zarr\"\n

      To complete configuration, we'll configure the multiscale specific settings.

      # Chunk size may need to be optimized for each acquisition. \n# See Zarr documentation for further guiddance: https://zarr.readthedocs.io/en/stable/tutorial.html#chunk-optimizations\nconfig.video[0].storage.settings.chunking.max_bytes_per_chunk = 16 * 2**20 # 16 MB\n\n# x, y dimensions of each chunk to 1/3 of the width and height of the image, generating 9 chunks\nconfig.video[0].storage.settings.chunking.tile.width = (config.video[0].camera.settings.shape[0] // 3)\nconfig.video[0].storage.settings.chunking.tile.height = (config.video[0].camera.settings.shape[1] // 3)\n\n# turn on multiscale mode\nconfig.video[0].storage.settings.enable_multiscale = True\n\n# Update the configuration with the chosen parameters \nconfig = runtime.set_configuration(config) \n
      "},{"location":"tutorials/multiscale/#collect-and-inspect-the-data","title":"Collect and Inspect the Data","text":"
      # collect data\nruntime.start()\nruntime.stop()\n

      You can inspect the Zarr file directory to check that the data saved as expected. This zarr file should have multiple subdirectories, one for each resolution in the multiscale data. Alternatively, you can inspect the data programmatically with:

      # Utilize the zarr python library to read the data\nimport zarr\n\n# Open the data to create a zarr Group\ngroup = zarr.open(\"out.zarr\")\n
      With multiscale mode enabled, an image pyramid will be formed by rescaling the data by a factor of 2 progressively until the rescaled image is smaller than the specified zarr chunk size in both dimensions. In this example, the original image dimensions are (1920, 1080), and we chunked the data using tiles 1/3 of the size of the image, namely (640, 360). To illustrate this point, we'll inspect the sizes of the various levels in the multiscale data and compare it to our specified chunk size.

      group[\"0\"], group[\"1\"], group[\"2\"]\n
      The output will be:
      (<zarr.core.Array '/0' (10, 1, 1080, 1920) uint8>,\n <zarr.core.Array '/1' (5, 1, 540, 960) uint8>,\n <zarr.core.Array '/2' (2, 1, 270, 480) uint8>)\n
      Here, the \"0\" directory contains the full-resolution array of frames of size 1920 x 1080, with a single channel, saving all 10 frames. The \"1\" directory contains the first rescaled array of frames of size 960 x 540, averaging every two frames, taking the frame count from 10 to 5. The \"2\" directory contains a further rescaled array of frames of size 480 x 270, averaging every four frames, taking the frame count from 10 to 2. Notice that both the frame width and frame height are now smaller than the chunk width and chunk height of 640 and 360, respectively, so this should be the last array in the group.

      "},{"location":"tutorials/props_json/","title":"Saving and Loading Properties from a JSON file","text":"

      This tutorial will provide an example of saving and subsequently loading a Properties object from a JSON file.

      "},{"location":"tutorials/props_json/#initialize-runtime","title":"Initialize Runtime","text":"

      To start, we'll import Acquire and create a Runtime object, which coordinates the streaming process.

      import acquire\nruntime = acquire.Runtime()\n
      "},{"location":"tutorials/props_json/#configure-camera","title":"Configure Camera","text":"

      All camera settings are captured by an instance of the Properties class, which will be associated with a given camera acquisition.

      # Instantiate a Properties object for the Runtime\nprops = runtime.get_configuration()\n
      You can update any of the settings in this instance of Properties. To save any updated settings, use the set_configuration function. For this tutorial, we'll simply specify a camera, and then save these new settings. Note that more settings must be provided before this Properties object could be used for an acquistion.

      # set the radial sine simulated camera as the first video stream\nprops.video[0].camera.identifier = runtime.device_manager().select(acquire.DeviceKind.Camera, \"simulated: radial sin\")\n\n# save the updated settings\nprops = runtime.set_configuration(props)\n
      "},{"location":"tutorials/props_json/#save-properties-to-a-json-file","title":"Save Properties to a JSON file","text":"

      We'll utilize the json library to write our properties to a JSON file to save for subsequent acquisition.

      import json\n\n# cast the properties to a dictionary\nprops = props.dict()\n\n# convert the dictionary to json with \"human-readable\" formatting\nprops = json.dumps(props, indent=4, sort_keys=True)\n\n# save the properties to file \"sample_props.json\" in the current directory\nwith open(\"sample_props.json\", \"w\") as outfile:\n    outfile.write(props)\n
      "},{"location":"tutorials/props_json/#example-json-file","title":"Example JSON file","text":"

      The resulting sample_props.json file is below:

      {\n  \"video\": [\n    {\n      \"camera\": {\n        \"identifier\": {\n          \"id\": [\n            0,\n            1\n          ],\n          \"kind\": \"Camera\",\n          \"name\": \"simulated: radial sin\"\n        },\n        \"settings\": {\n          \"binning\": 1,\n          \"exposure_time_us\": 0.0,\n          \"input_triggers\": {\n            \"acquisition_start\": {\n              \"edge\": \"Rising\",\n              \"enable\": false,\n              \"kind\": \"Input\",\n              \"line\": 0\n            },\n            \"exposure\": {\n              \"edge\": \"Rising\",\n              \"enable\": false,\n              \"kind\": \"Input\",\n              \"line\": 0\n            },\n            \"frame_start\": {\n              \"edge\": \"Rising\",\n              \"enable\": false,\n              \"kind\": \"Input\",\n              \"line\": 0\n            }\n          },\n          \"line_interval_us\": 0.0,\n          \"offset\": [\n            0,\n            0\n          ],\n          \"output_triggers\": {\n            \"exposure\": {\n              \"edge\": \"Rising\",\n              \"enable\": false,\n              \"kind\": \"Input\",\n              \"line\": 0\n            },\n            \"frame_start\": {\n              \"edge\": \"Rising\",\n              \"enable\": false,\n              \"kind\": \"Input\",\n              \"line\": 0\n            },\n            \"trigger_wait\": {\n              \"edge\": \"Rising\",\n              \"enable\": false,\n              \"kind\": \"Input\",\n              \"line\": 0\n            }\n          },\n          \"pixel_type\": \"U16\",\n          \"readout_direction\": \"Forward\",\n          \"shape\": [\n            1,\n            1\n          ]\n        }\n      },\n      \"frame_average_count\": 0,\n      \"max_frame_count\": 18446744073709551615,\n      \"storage\": {\n        \"identifier\": {\n          \"id\": [\n            0,\n            0\n          ],\n          \"kind\": \"NONE\",\n          \"name\": \"\"\n        },\n        \"settings\": {\n          \"chunking\": {\n            \"max_bytes_per_chunk\": 16777216,\n            \"tile\": {\n              \"height\": 0,\n              \"planes\": 0,\n              \"width\": 0\n            }\n          },\n          \"enable_multiscale\": false,\n          \"external_metadata_json\": \"\",\n          \"filename\": \"\",\n          \"first_frame_id\": 0,\n          \"pixel_scale_um\": [\n            0.0,\n            0.0\n          ]\n        },\n        \"write_delay_ms\": 0.0\n      }\n    },\n    {\n      \"camera\": {\n        \"identifier\": {\n          \"id\": [\n            0,\n            0\n          ],\n          \"kind\": \"NONE\",\n          \"name\": \"\"\n        },\n        \"settings\": {\n          \"binning\": 1,\n          \"exposure_time_us\": 0.0,\n          \"input_triggers\": {\n            \"acquisition_start\": {\n              \"edge\": \"Rising\",\n              \"enable\": false,\n              \"kind\": \"Input\",\n              \"line\": 0\n            },\n            \"exposure\": {\n              \"edge\": \"Rising\",\n              \"enable\": false,\n              \"kind\": \"Input\",\n              \"line\": 0\n            },\n            \"frame_start\": {\n              \"edge\": \"Rising\",\n              \"enable\": false,\n              \"kind\": \"Input\",\n              \"line\": 0\n            }\n          },\n          \"line_interval_us\": 0.0,\n          \"offset\": [\n            0,\n            0\n          ],\n          \"output_triggers\": {\n            \"exposure\": {\n              \"edge\": \"Rising\",\n              \"enable\": false,\n              \"kind\": \"Input\",\n              \"line\": 0\n            },\n            \"frame_start\": {\n              \"edge\": \"Rising\",\n              \"enable\": false,\n              \"kind\": \"Input\",\n              \"line\": 0\n            },\n            \"trigger_wait\": {\n              \"edge\": \"Rising\",\n              \"enable\": false,\n              \"kind\": \"Input\",\n              \"line\": 0\n            }\n          },\n          \"pixel_type\": \"U16\",\n          \"readout_direction\": \"Forward\",\n          \"shape\": [\n            0,\n            0\n          ]\n        }\n      },\n      \"frame_average_count\": 0,\n      \"max_frame_count\": 18446744073709551615,\n      \"storage\": {\n        \"identifier\": {\n          \"id\": [\n            0,\n            0\n          ],\n          \"kind\": \"NONE\",\n          \"name\": \"\"\n        },\n        \"settings\": {\n          \"chunking\": {\n            \"max_bytes_per_chunk\": 16777216,\n            \"tile\": {\n              \"height\": 0,\n              \"planes\": 0,\n              \"width\": 0\n            }\n          },\n          \"enable_multiscale\": false,\n          \"external_metadata_json\": \"\",\n          \"filename\": \"\",\n          \"first_frame_id\": 0,\n          \"pixel_scale_um\": [\n            0.0,\n            0.0\n          ]\n        },\n        \"write_delay_ms\": 0.0\n      }\n    }\n  ]\n}\n
      "},{"location":"tutorials/props_json/#load-properties-from-a-json-file","title":"Load Properties from a JSON file","text":"

      You can load the settings in the JSON file to a Properties object and set this configuration for your Runtime as shown below:

      import acquire\nimport json\n\n# create a Runtime object\nruntime = acquire.Runtime()\n\n# Instantiate a `Properties` object from the settings in sample_props.json\nprops = acquire.Properties(**json.load(open('sample_props.json')))\n\n# save the properties for this instance of Runtime\nprops = runtime.set_configuration(props)\n
      "},{"location":"tutorials/select/","title":"Device Selection","text":"

      This tutorial illustrates the difference between the select and select_one_of methods in the DeviceManager class. select chooses the first discovered device of a specific kind, camera or storage device. You can also, optionally, select a specific device by passing the device name as a string to select. Whereas, select_one_of requires that you specify both the kind of device to select and a list of possible device names. select_one_of will iterate through the list and select the first device in the list of names that is discovered on your machine.

      To start, instantiate Runtime and DeviceManager objects.

      import acquire \n\n# Instantiate a Runtime object\nruntime = acquire.Runtime()\n\n# Instantiate a DeviceManager object for the Runtime\nmanager = runtime.device_manager()\n\n# List devices discovered by DeviceManager\nfor device in manager.devices():\n    print(device)\n

      The output of the above code is below. All identified devices will be listed, and in the case of this tutorial, no cameras were connected to the machine, so only simulated cameras were found. Note that discovered storage devices will also print.

      <DeviceIdentifier Camera \"simulated: uniform random\">\n<DeviceIdentifier Camera \"simulated: radial sin\">\n<DeviceIdentifier Camera \"simulated: empty\">\n\n# storage devices will also print\n<DeviceIdentifier Storage \"raw\">\n<DeviceIdentifier Storage \"tiff\">\n<DeviceIdentifier Storage \"trash\">\n<DeviceIdentifier Storage \"tiff-json\">\n<DeviceIdentifier Storage \"Zarr\">\n<DeviceIdentifier Storage \"ZarrBlosc1ZstdByteShuffle\">\n<DeviceIdentifier Storage \"ZarrBlosc1Lz4ByteShuffle\">\n
      The order of those printed devices matters. Below are two examples of how the select method works. In the first, without a specific device name provided, select will choose the first device of the specified kind in the list of discovered devices. In the second example, a specific device name is provided, so select will grab that device if it was discovered by Runtime.

      # specify that the device should be a camera and not a storage device\nkind = acquire.DeviceKind.Camera\n\n# 1st example: select the first camera in the list of discovered devices\nselected = manager.select(kind)\n\n# 2nd example: select a specific camera since the name of the device was provided\nspecific = manager.select(kind, \"simulated: empty\")\n\n# print the 2 devices\nprint(selected)\nprint(specific)\n
      The output of the code is below:
      <DeviceIdentifier Camera \"simulated: uniform random\">\n<DeviceIdentifier Camera \"simulated: empty\">\n

      The select_one_of method allows more flexibility since you provide a list of names of acceptable devices for it to iterate through until a discovered device is located.

      # specify that the device should be a camera and not a storage device\nkind = acquire.DeviceKind.Camera\n\nselected = manager.select_one_of(kind, [\"Hamamatsu_DCAMSDK4_v22126552\", \"simulated: radial sin\", \"simulated: empty\"])\n\n# print which camera was selected\nprint(selected)\n
      The output of the code is below. The Hamamatsu camera was not discovered by Runtime, so select_one_of iterates until it finds a device discovered by Runtime. In this case, the next item in the list is a simulated camera that was discovered by Runtime.
      <DeviceIdentifier Camera \"simulated: radial sin\">\n

      "},{"location":"tutorials/setup/","title":"Utilizing the Setup Method","text":"

      This tutorial will provide an example of utilizing the setup method to configure Runtime and specify some basic properties.

      "},{"location":"tutorials/setup/#setup-function-definition","title":"Setup Function Definition","text":"
      def setup(\n    runtime: Runtime,\n    camera: Union[str, List[str]] = \"simulated: radial sin\",\n    storage: Union[str, List[str]] = \"Tiff\",\n    output_filename: Optional[str] = \"out.tif\",\n) -> Properties\n

      The setup function can be used as a shorthand to simplify the Runtime configuration process. setup takes a Runtime object and strings of the camera and storage device names and returns a Properties object. You may also optionally specify the filename for writing the data.

      "},{"location":"tutorials/setup/#example","title":"Example","text":"

      import acquire\n\n# Initialize a Runtime object\nruntime = acquire.Runtime()\n\n# use setup to get configuration and set the camera, storage, and filename\nconfig = acquire.setup(runtime, \"simulated: radial sin\", \"Zarr\", \"out.zarr\")\n
      You can subsequently use config to specify additional settings and set those configurations before beginning acquisition.

      Without using setup, the process would take a few additional lines of codes. The below code is equivalent to the example above.

      import acquire\n\n# Initialize a Runtime object\nruntime = acquire.Runtime()\n\n# Grab the current configuration\nconfig = runtime.get_configuration() \n\n# Select the radial sine simulated camera as the video source\nconfig.video[0].camera.identifier = runtime.device_manager().select(acquire.DeviceKind.Camera, \"simulated: radial sin\") \n\n# Set the storage to Zarr to have the option to save multiscale data\nconfig.video[0].storage.identifier = runtime.device_manager().select(acquire.DeviceKind.Storage, \"Zarr\")\n\n# Set the output file to out.zarr\nconfig.video[0].storage.settings.filename = \"out.zarr\"\n

      In either case, we can update the configuration settings using:

      config = runtime.set_configuration(config)\n
      "},{"location":"tutorials/start_stop/","title":"Multiple Acquisitions","text":"

      This tutorial will provide an example of starting, stopping, and restarting acquisition, or streaming from a video source.

      "},{"location":"tutorials/start_stop/#configure-streaming","title":"Configure Streaming","text":"

      To start, we'll create a Runtime object and configure the streaming process. To do this, we'll utilize the setup method. More information on that method is detailed in this tutorial.

      import acquire\n\n# Initialize a Runtime object\nruntime = acquire.Runtime()\n\n# Grarb Set Video Source and Storage Device\nconfig = acquire.setup(runtime, \"simulated: radial sin\", \"Tiff\")\n\nconfig.video[0].storage.settings.filename == \"out.tif\"\nconfig.video[0].camera.settings.shape = (192, 108)\nconfig.video[0].camera.settings.exposure_time_us = 10e4\nconfig.video[0].max_frame_count = 10\n\n# Update the configuration with the chosen parameters \nconfig = runtime.set_configuration(config) \n
      "},{"location":"tutorials/start_stop/#start-stop-and-restart-acquisition","title":"Start, Stop, and Restart Acquisition","text":"

      During Acquisition, the AvailableData object is the streaming interface. Upon shutdown, Runtime deletes all of the objects created during acquisition to free up resources, and you must stop acquisition by calling runtime.stop() between acquisitions. Otherwise, an exception will be raised.

      To understand how acquisition works, we'll start, stop, and repeat acquisition and print the DeviceState, which can be Armed, AwaitingConfiguration, Closed, or Running, and the AvailableData object throughout the process.

      If acquisition has ended, all of the objects are deleted, including AvailableData objects, so those will be None when not acquiring data. In addition, if enough time hasn't elapsed since acquisition started, AvailableData will also be None. We'll utilize the time python package to introduce time delays to account for these facts.

      # package used to introduce time delays\nimport time\n\n# start acquisition\nruntime.start()\n\nprint(runtime.get_state())\nprint(runtime.get_available_data(0))\n\n# wait 0.5 seconds to allow time for data to be acquired\ntime.sleep(0.5)\n\nprint(runtime.get_state())\nprint(runtime.get_available_data(0))\n\n# stop acquisition\nruntime.stop()\n\nprint(runtime.get_state())\nprint(runtime.get_available_data(0))\n\n# start acquisition\nruntime.start()\n\n# time delay of 5 seconds - acquisition only runs for 1 second\ntime.sleep(5)\n\nprint(runtime.get_state())\nprint(runtime.get_available_data(0))\n\n# stop acquisition\nruntime.stop()\n

      The output will be:

      DeviceState.Running\nNone\nDeviceState.Running\n<builtins.AvailableData object at 0x00000218D685E5B0>\nDeviceState.Armed\nNone\nDeviceState.Armed\n<builtins.AvailableData object at 0x00000218D685E3D0>\n
      1. The first time we print states is immediately after we started acqusition and enough time hasn't elapsed for data to be collected based on the exposure time, so the camera is running but there is no data yet. 2. The next print happens after waiting 0.5 seconds, so acquisition is still runnning and now there is acquired data available. 3. The subsequent print is following calling runtime.stop() which terminates acquisition after the specified max number of frames are collected, so the device is no longer running, although it is in the Armed state ready for acquisition, and there is no available data. 4. The final print occurs after waiting 5 seconds after starting acquisition, which is longer than the 1 second time needed to collect all the frames, so the device is no longer collecting data. However, runtime.stop() hasn't been called, so the AvailableData object has not yet been deleted.

      "},{"location":"tutorials/storage/","title":"Storage Device Selection","text":"

      This tutorial illustrates the storage device options in Acquire.

      "},{"location":"tutorials/storage/#description-of-storage-devices","title":"Description of Storage Devices","text":"

      To start, we'll create a Runtime object and print the storage device options.

      import acquire \n\n# Instantiate a Runtime object\nruntime = acquire.Runtime()\n\n# Instantiate a DeviceManager object for the Runtime\nmanager = runtime.device_manager()\n\n# Print devices in DeviceManager of kind Storage\nfor device in manager.devices():\n    if device.kind == acquire.DeviceKind.Storage:\n        print(device)\n
      The output of that script will be:

      # Storage Devices printed\n\n<DeviceIdentifier Storage \"raw\">\n<DeviceIdentifier Storage \"tiff\">\n<DeviceIdentifier Storage \"trash\">\n<DeviceIdentifier Storage \"tiff-json\">\n<DeviceIdentifier Storage \"Zarr\">\n<DeviceIdentifier Storage \"ZarrBlosc1ZstdByteShuffle\">\n<DeviceIdentifier Storage \"ZarrBlosc1Lz4ByteShuffle\">\n
      - raw - Streams to a raw binary file. - tiff - Streams to a bigtiff file. Metadata is stored in the ImageDescription tag for each frame as a JSON string. - trash - Writes nothing. Discards incoming data. Useful for live streaming applications. - tiff-json - Stores the video stream in a bigtiff, and stores metadata in a JSON file. Both are located in a folder identified by the filename property. - Zarr - Streams data to a Zarr V2 file with associated metadata. - ZarrBlosc1ZstdByteShuffle - Streams compressed data (zstd codec) to a Zarr V2 file with associated metadata. - ZarrBlosc1Lz4ByteShuffle - Streams compressed data (lz4 codec) to a Zarr V2 file with associated metadata.

      Acquire supports streaming data to bigtiff and Zarr V2.

      Zarr has additional capabilities relative to the basic storage devices, namely chunking, compression, and multiscale storage. You can learn more about the Zarr capabilities in Acquire here.

      "},{"location":"tutorials/storage/#select-the-storage-device-and-specify-where-to-store-the-data","title":"Select the Storage Device and Specify where to Store the Data","text":"

      We'll use our instance of Runtime and specify that the data from one video source should be streamed to a file out.tif in the example below:

      # get the current configuration\nconfig = runtime.get_configuration()\n\n# Select the tiff storage device\nconfig.video[0].storage.identifier = manager.select( acquire.DeviceKind.Storage, \"tiff\")\n\n# Set the data filename to out.tif in your current directory (provide the whole filetree to save to a different directory)\nconfig.video[0].storage.settings.filename = \"out.tif\" \n

      Before proceeding, complete the Camera setup and call set_configuration to save those new configuration settings.

      "},{"location":"tutorials/trig_json/","title":"Saving and Loading Trigger Settings from a JSON file","text":"

      This tutorial will provide an example of saving and subsequently loading a Trigger object from a JSON file.

      "},{"location":"tutorials/trig_json/#initialize-runtime","title":"Initialize Runtime","text":"

      To start, we'll import Acquire and create a Runtime object, which coordinates the streaming process.

      import acquire\nruntime = acquire.Runtime()\n
      "},{"location":"tutorials/trig_json/#create-a-trigger-object","title":"Create a Trigger Object","text":"

      Trigger objects have 4 attributes: edge, enable, line, and kind. In this example, will only adjust the edge attribute.

      # Instantiate a Trigger object\ntrig = acquire.Trigger()\n\n# change the edge attribute from the default Rising to Falling\ntrig.edge = acquire.TriggerEdge.Falling\n
      "},{"location":"tutorials/trig_json/#save-properties-to-a-json-file","title":"Save Properties to a JSON file","text":"

      We'll utilize the json library to write our Trigger to a JSON file to save for subsequent acquisition.

      import json\n\n# cast the properties to a dictionary\ntrig = trig.dict()\n\n# convert the dictionary to json with \"human-readable\" formatting\ntrig = json.dumps(trig, indent=4, sort_keys=True)\n\n# save the trigger to file \"sample_trig.json\" in the current directory\nwith open(\"sample_trig.json\", \"w\") as outfile:\n    outfile.write(trig)\n
      "},{"location":"tutorials/trig_json/#example-json-file","title":"Example JSON file","text":"

      The resulting sample_trig.json file is below:

      {\n  \"edge\": \"Falling\",\n  \"enable\": false,\n  \"kind\": \"Input\",\n  \"line\": 0\n}\n
      "},{"location":"tutorials/trig_json/#load-properties-from-a-json-file","title":"Load Properties from a JSON file","text":"

      You can load the trigger attributes in the JSON file to a Trigger object as shown below:

      # Instantiate a `Trigger` object from the settings in sample_trig.json\ntrig = acquire.Trigger(**json.load(open('sample_trig.json')))\n
      "},{"location":"tutorials/trigger/","title":"Finite Triggered Acquisition","text":"

      Acquire (acquire-imaging on PyPI) is a Python package providing a multi-camera video streaming library focused on performant microscopy, with support for up to two simultaneous, independent, video streams.

      This tutorial shows an example of setting up triggered acquisition of a finite number of frames with one of Acquire's supported devices and saving the data to a Zarr file.

      "},{"location":"tutorials/trigger/#initialize-acquisition","title":"Initialize Acquisition","text":"

      To start, we'll import Acquire and create an acquisition Runtime object, which initializes the driver adaptors needed for the supported cameras.

      import acquire\nruntime = acquire.Runtime()\n
      "},{"location":"tutorials/trigger/#configure-camera","title":"Configure Camera","text":"

      All camera settings can be captured by an instance of the Properties class, which will be associated with a given camera acquisition. The settings can be stored in a dictionary (e.g: Properties.dict()). These settings can be saved to a JSON file to be subsequently loaded, (e.g. Properties(**json.load(open('acquire.json'))) ), using the json library.

      props = runtime.get_configuration()\n\nimport json\nwith open(\"/path/to/acquire.json\", \"w\") as f:\n    json.dump(props.dict(), f)\n

      The current configuration settings can be checked and assigned to an instance of the Properties class with:

      props = runtime.get_configuration() \n

      Since Acquire supports 2 video streams, each camera, or source, must be configured separately. In this example, we will only use 1 source for the acquisition, so we will only need to configure props.video[0]. To set the first video stream to Hamamatsu Orca Fusion BT (C15440-20UP), you can use the following with a regular expression to grab the Hamamatsu camera:

      props.video[0].camera.identifier = runtime.device_manager().select(acquire.DeviceKind.Camera, 'Hamamatsu C15440.*')\n

      Next we'll choose the settings for the Hamamatsu camera. The CameraProperties class describes the available settings, which include exposure time (in microseconds), binning, pixel data type (e.g. u16), and how many frames to acquire.

      Every property can be set using the following, but in this example, we will only change a few of the available settings.

      props.video[0].camera.settings.binning = 1 # no pixels will be combined\nprops.video[0].camera.settings.shape = (1700, 512) # shape of the image to be acquired in pixels\nprops.video[0].camera.settings.offset = (302, 896) # centers the image region of interest on the camera sensor\nprops.video[0].camera.settings.pixel_type = acquire.SampleType.U16 # sets the pixel data type to a 16-bit unsigned integer\nprops.video[0].max_frame_count = 10 # finite acquisition of 10 frames. Use 0 for infinite acquisition.\n

      Triggers can also be set in the CameraProperties object. The parameters can be stored in a dictionary (e.g: Trigger.dict()). You can construct a Trigger from a JSON file (e.g. acquire.Trigger(**json.loads(open('trigger.json'))) ), using the json library.

      trig = acquire.Trigger()\n\nimport json\nwith open(\"/path/to/trigger.json\", \"w\") as f:\n    json.dump(trig.dict(), f)\n

      In this example, we'll only utilize output triggers. By default, the camera's internal triggering is used, but you may explicitly disable external input triggers using:

      props.video[0].camera.settings.input_triggers = acquire.InputTriggers() # default: disabled\n

      Output triggers can be set to begin exposure, start a new frame, or wait before acquiring. We can enable an exposure trigger to start on the rising edge with:

      props.video[0].camera.settings.output_triggers.exposure = acquire.Trigger(\n    enable=True, line=1, edge=\"Rising\"\n)\n
      "},{"location":"tutorials/trigger/#select-storage","title":"Select Storage","text":"

      Storage objects have identifiers which specify the file type (e.g. Zarr or tiff) and settings described by an instance of the StorageProperties class. We can set the file type to Zarr and set the file name to \"out\" with:

      props.video[0].storage.identifier = runtime.device_manager().select(acquire.DeviceKind.Storage,'zarr') \nprops.video[0].storage.settings.filename=\"out.zarr\"\n
      "},{"location":"tutorials/trigger/#save-configuration","title":"Save configuration","text":"

      None of these settings will be updated in the Properties object until you call the set_configuration method. This method reads what the current configuration settings are on the device.

      We'll set the configuration with:

      props = runtime.set_configuration(props)\n

      You can optionally print out these settings using the Rich python library to save for your records with:

      from rich.pretty import pprint\npprint(props.dict())\n
      "},{"location":"tutorials/trigger/#acquire-data","title":"Acquire data","text":"

      To begin acquisition:

      runtime.start()\n

      You can stop acquisition with runtime.stop() to stop after the specified number of frames is collected or runtime.abort() to immediately stop acquisition.

      "}]} \ No newline at end of file diff --git a/sitemap.xml.gz b/sitemap.xml.gz index 76b84f0db42cde9220f4371305e34d1df7bc7186..5ae9ecb149753a11e40ed4e16e2fd6ef8da96637 100644 GIT binary patch delta 12 Tcmb=gXOr*d;K+J0k*yK{8FB<6 delta 12 Tcmb=gXOr*d;F$7gB3mT@8Y~1w