diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile new file mode 100644 index 0000000000..7b70d5bdc9 --- /dev/null +++ b/.devcontainer/Dockerfile @@ -0,0 +1,37 @@ +# This file is for use as a devcontainer and a runtime container +# +# The devcontainer should use the build target and run as root with podman +# or docker with user namespaces. +# +FROM python:3.8 as build + +ARG PIP_OPTIONS + +# Add any system dependencies for the developer/build environment here e.g. +# RUN apt-get update && apt-get upgrade -y && \ +# apt-get install -y --no-install-recommends \ +# desired-packages \ +# && rm -rf /var/lib/apt/lists/* + +# set up a virtual environment and put it in PATH +RUN python -m venv /venv +ENV PATH=/venv/bin:$PATH + +# Copy any required context for the pip install over +COPY . /context +WORKDIR /context + +# install python package into /venv +RUN pip install ${PIP_OPTIONS} + +FROM python:3.8-slim as runtime + +# Add apt-get system dependecies for runtime here if needed + +# copy the virtual environment from the build stage and put it in PATH +COPY --from=build /venv/ /venv/ +ENV PATH=/venv/bin:$PATH + +# change this entrypoint if it is not the same as the repo +ENTRYPOINT ["python", "-m", "ophyd_epics_devices"] +CMD ["--version"] diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json new file mode 100644 index 0000000000..314a151af7 --- /dev/null +++ b/.devcontainer/devcontainer.json @@ -0,0 +1,47 @@ +// For format details, see https://containers.dev/implementors/json_reference/ +{ + "name": "Python 3 Developer Container", + "build": { + "dockerfile": "Dockerfile", + "target": "build", + // Only upgrade pip, we will install the project below + "args": { + "PIP_OPTIONS": "--upgrade pip" + }, + }, + "remoteEnv": { + "DISPLAY": "${localEnv:DISPLAY}" + }, + // Set *default* container specific settings.json values on container create. + "settings": { + "python.defaultInterpreterPath": "/venv/bin/python" + }, + "customizations": { + "vscode": { + // Add the IDs of extensions you want installed when the container is created. + "extensions": [ + "ms-python.python", + "tamasfe.even-better-toml", + "redhat.vscode-yaml", + "ryanluker.vscode-coverage-gutters" + ] + } + }, + // Make sure the files we are mapping into the container exist on the host + "initializeCommand": "bash -c 'for i in $HOME/.inputrc; do [ -f $i ] || touch $i; done'", + "runArgs": [ + "--net=host", + "--security-opt=label=type:container_runtime_t" + ], + "mounts": [ + "source=${localEnv:HOME}/.ssh,target=/root/.ssh,type=bind", + "source=${localEnv:HOME}/.inputrc,target=/root/.inputrc,type=bind", + // map in home directory - not strictly necessary but useful + "source=${localEnv:HOME},target=${localEnv:HOME},type=bind,consistency=cached" + ], + // make the workspace folder the same inside and outside of the container + "workspaceMount": "source=${localWorkspaceFolder},target=${localWorkspaceFolder},type=bind", + "workspaceFolder": "${localWorkspaceFolder}", + // After the container is created, install the python project in editable form + "postCreateCommand": "pip install -e .[dev] --config-settings editable_mode=compat" +} diff --git a/.github/CONTRIBUTING.rst b/.github/CONTRIBUTING.rst index 361a0a0d0f..fe19bae11b 100644 --- a/.github/CONTRIBUTING.rst +++ b/.github/CONTRIBUTING.rst @@ -7,7 +7,7 @@ filing a new one. If you have a great idea but it involves big changes, please file a ticket before making a pull request! We want to make sure you don't spend your time coding something that might not fit the scope of the project. -.. _GitHub: https://github.com/bluesky/ophyd_async/issues +.. _GitHub: https://github.com/bluesky/ophyd-async/issues Issue or Discussion? -------------------- @@ -16,7 +16,7 @@ Github also offers discussions_ as a place to ask questions and share ideas. If your issue is open ended and it is not obvious when it can be "closed", please raise it as a discussion instead. -.. _discussions: https://github.com/bluesky/ophyd_async/discussions +.. _discussions: https://github.com/bluesky/ophyd-async/discussions Code coverage ------------- @@ -31,4 +31,4 @@ The `Developer Guide`_ contains information on setting up a development environment, running the tests and what standards the code and documentation should follow. -.. _Developer Guide: https://blueskyproject.io/ophyd_async/main/developer/how-to/contribute.html +.. _Developer Guide: https://blueskyproject.io/ophyd-async/main/developer/how-to/contribute.html diff --git a/.github/workflows/code.yml b/.github/workflows/code.yml index 38df6d7077..33dd22da8c 100644 --- a/.github/workflows/code.yml +++ b/.github/workflows/code.yml @@ -103,3 +103,37 @@ jobs: - name: Test module --version works using the installed wheel # If more than one module in src/ replace with module name to test run: python -m $(ls src | head -1) --version + + release: + # upload to PyPI and make a release on every tag + needs: [lint, dist, test] + if: ${{ github.event_name == 'push' && startsWith(github.ref, 'refs/tags') }} + runs-on: ubuntu-latest + env: + HAS_PYPI_TOKEN: ${{ secrets.PYPI_TOKEN != '' }} + + steps: + - uses: actions/download-artifact@v3 + + - name: Fixup blank lockfiles + # Github release artifacts can't be blank + run: for f in lockfiles/*; do [ -s $f ] || echo '# No requirements' >> $f; done + + - name: Github Release + # We pin to the SHA, not the tag, for security reasons. + # https://docs.github.com/en/actions/learn-github-actions/security-hardening-for-github-actions#using-third-party-actions + uses: softprops/action-gh-release@1e07f4398721186383de40550babbdf2b84acfc5 # v0.1.14 + with: + prerelease: ${{ contains(github.ref_name, 'a') || contains(github.ref_name, 'b') || contains(github.ref_name, 'rc') }} + files: | + dist/* + lockfiles/* + generate_release_notes: true + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + + - name: Publish to PyPI + if: ${{ env.HAS_PYPI_TOKEN }} + uses: pypa/gh-action-pypi-publish@release/v1 + with: + password: ${{ secrets.PYPI_TOKEN }} diff --git a/docs/conf.py b/docs/conf.py index f749043599..57f5366ead 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -4,9 +4,12 @@ # list see the documentation: # https://www.sphinx-doc.org/en/master/usage/configuration.html +import sys from pathlib import Path from subprocess import check_output +import requests + import ophyd_async # -- General configuration ------------------------------------------------ @@ -147,7 +150,17 @@ html_theme = "pydata_sphinx_theme" github_repo = project github_user = "bluesky" - +switcher_json = f"https://{github_user}.github.io/{github_repo}/switcher.json" +# Don't check switcher if it doesn't exist, but warn in a non-failing way +check_switcher = requests.get(switcher_json).ok +if not check_switcher: + print( + "*** Can't read version switcher, is GitHub pages enabled? \n" + " Once Docs CI job has successfully run once, set the " + "Github pages source branch to be 'gh-pages' at:\n" + f" https://github.com/{github_user}/{github_repo}/settings/pages", + file=sys.stderr, + ) # Theme options for pydata_sphinx_theme html_theme_options = dict( diff --git a/docs/developer/explanations/decisions/0004-repository-structure.rst b/docs/developer/explanations/decisions/0004-repository-structure.rst index c7ead92102..31d5c572d0 100644 --- a/docs/developer/explanations/decisions/0004-repository-structure.rst +++ b/docs/developer/explanations/decisions/0004-repository-structure.rst @@ -72,16 +72,11 @@ During this process, the folder structure should incrementally be changed to │ └── devices └── ... -The :python:`__init__.py` files of each submodule (core, devices.epics and devices.tango) will +The `__init__.py` files of each submodule (core, devices.epics and devices.tango) will be modified such that end users experience little disruption to how they use Ophyd Async. -For such users, lines like +For such users, `from ophyd.v2.core import ...` can be replaced with +`from ophyd_async.core import ...`. -.. code:: python - from ophyd.v2.core import ... - -will be replaced with -.. code:: python - from ophyd_async.core import ... Consequences ------------ @@ -90,4 +85,4 @@ The git history of all three repositories being merged will be preserved, and th code bases neatly subdivided. Merge conflicts dealt with for ophyd-epics-devices and ophyd-tango-devices will be -clearly stated in the commit messages regarding their resolutions. \ No newline at end of file +clearly stated in the commit messages regarding their resolutions. diff --git a/docs/developer/how-to/make-release.rst b/docs/developer/how-to/make-release.rst index 8c6fcb4c40..29953f9074 100644 --- a/docs/developer/how-to/make-release.rst +++ b/docs/developer/how-to/make-release.rst @@ -13,4 +13,4 @@ To make a new release, please follow this checklist: Note that tagging and pushing to the main branch has the same effect except that you will not get the option to edit the release notes. -.. _release: https://github.com/bluesky/ophyd_async/releases \ No newline at end of file +.. _release: https://github.com/bluesky/ophyd-async/releases diff --git a/docs/developer/tutorials/dev-install.rst b/docs/developer/tutorials/dev-install.rst index 8a574c4b1a..13e029d5f4 100644 --- a/docs/developer/tutorials/dev-install.rst +++ b/docs/developer/tutorials/dev-install.rst @@ -10,7 +10,7 @@ Clone the repository First clone the repository locally using `Git `_:: - $ git clone git://github.com/bluesky/ophyd_async.git + $ git clone git://github.com/bluesky/ophyd-async.git Install dependencies -------------------- @@ -25,7 +25,7 @@ requires python 3.8 or later) or to run in a container under `VSCode .. code:: - $ cd ophyd_async + $ cd ophyd-async $ python3 -m venv venv $ source venv/bin/activate $ pip install -e '.[dev]' @@ -34,7 +34,7 @@ requires python 3.8 or later) or to run in a container under `VSCode .. code:: - $ vscode ophyd_async + $ vscode ophyd-async # Click on 'Reopen in Container' when prompted # Open a new terminal diff --git a/docs/examples/ad_demo.py b/docs/examples/ad_demo.py new file mode 100644 index 0000000000..f11790c495 --- /dev/null +++ b/docs/examples/ad_demo.py @@ -0,0 +1,45 @@ +import bluesky.plan_stubs as bps +import bluesky.plans as bp # noqa +import bluesky.preprocessors as bpp + +# Import bluesky and ophyd +import matplotlib.pyplot as plt +from bluesky import RunEngine +from bluesky.callbacks.best_effort import BestEffortCallback +from bluesky.utils import ProgressBarManager, register_transform +from ophyd.v2.core import DeviceCollector + +from ophyd_async.devices import areadetector + +# Create a run engine, with plotting, progressbar and transform +RE = RunEngine({}, call_returns_result=True) +bec = BestEffortCallback() +RE.subscribe(bec) +RE.waiting_hook = ProgressBarManager() +plt.ion() +register_transform("RE", prefix="<") + +# Start IOC with demo pvs in subprocess +pv_prefix = "pc0105-AD-SIM-01:" + + +# Create v2 devices +with DeviceCollector(): + det1 = areadetector.MySingleTriggerSim(pv_prefix) + det2 = areadetector.MyHDFWritingSim(pv_prefix) + det3 = areadetector.MyHDFFlyerSim(pv_prefix) + + +# And a plan +@bpp.run_decorator() +@bpp.stage_decorator([det3]) +def fly_det3(num: int): + yield from bps.mov(det3.drv.num_images, num) + yield from bps.kickoff(det3, wait=True) + status = yield from bps.complete(det3, wait=False, group="complete") + while status and not status.done: + yield from bps.collect(det3, stream=True, return_payload=False) + yield from bps.sleep(0.1) + yield from bps.wait(group="complete") + # One last one + yield from bps.collect(det3, stream=True, return_payload=False) diff --git a/docs/images/bluesky_ophyd_epics_devices_logo.svg b/docs/images/bluesky_ophyd_epics_devices_logo.svg new file mode 100644 index 0000000000..5eefa60947 --- /dev/null +++ b/docs/images/bluesky_ophyd_epics_devices_logo.svg @@ -0,0 +1,389 @@ + + + + + + image/svg+xml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/images/dls-favicon.ico b/docs/images/dls-favicon.ico deleted file mode 100644 index 9a11f508ef..0000000000 Binary files a/docs/images/dls-favicon.ico and /dev/null differ diff --git a/docs/images/dls-logo.svg b/docs/images/dls-logo.svg deleted file mode 100644 index 0af1a17707..0000000000 --- a/docs/images/dls-logo.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - - - - - - - - \ No newline at end of file diff --git a/docs/user/examples/epics_demo.py b/docs/user/examples/epics_demo.py index f3f47e9896..ebc1b4b4ee 100644 --- a/docs/user/examples/epics_demo.py +++ b/docs/user/examples/epics_demo.py @@ -8,7 +8,7 @@ from ophyd import Component, Device, EpicsSignal, EpicsSignalRO from ophyd_async.core import epicsdemo -from ophyd_async.core.core import DeviceCollector +from ophyd_async.core.device_collector import DeviceCollector # Create a run engine, with plotting, progressbar and transform RE = RunEngine({}, call_returns_result=True) diff --git a/docs/user/how-to/run-container.rst b/docs/user/how-to/run-container.rst new file mode 100644 index 0000000000..2639fb919e --- /dev/null +++ b/docs/user/how-to/run-container.rst @@ -0,0 +1,15 @@ +Run in a container +================== + +Pre-built containers with ophyd-epics-devices and its dependencies already +installed are available on `Github Container Registry +`_. + +Starting the container +---------------------- + +To pull the container from github container registry and run:: + + $ docker run ghcr.io/bluesky/ophyd-async:main --version + +To get a released version, use a numbered release instead of ``main``. diff --git a/docs/user/index.rst b/docs/user/index.rst index 214ba3784f..6e612e82bc 100644 --- a/docs/user/index.rst +++ b/docs/user/index.rst @@ -32,6 +32,7 @@ side-bar. :maxdepth: 1 how-to/make-a-simple-device + how-to/run-container +++ diff --git a/docs/user/reference/api.rst b/docs/user/reference/api.rst index 354e77496b..60a3dfaf4a 100644 --- a/docs/user/reference/api.rst +++ b/docs/user/reference/api.rst @@ -24,6 +24,10 @@ This is the internal API reference for ophyd_async :template: custom-module-template.rst :recursive: - ophyd_async.core.core - ophyd_async.core.epics + ophyd_async.core.backends + ophyd_async.core.devices ophyd_async.core.epicsdemo + ophyd_async.core.signals + ophyd_async.core.async_status + ophyd_async.core.device_collector + ophyd_async.core.utils diff --git a/docs/user/tutorials/installation.rst b/docs/user/tutorials/installation.rst index ac589e6fcd..beb9aa2677 100644 --- a/docs/user/tutorials/installation.rst +++ b/docs/user/tutorials/installation.rst @@ -30,14 +30,14 @@ Installing the library You can now use ``pip`` to install the library and its dependencies:: - $ python3 -m pip install ophyd + $ python3 -m pip install ophyd_async If you require a feature that is not currently released you can also install from github:: - $ python3 -m pip install git+https://github.com/bluesky/ophyd.git + $ python3 -m pip install git+https://github.com/bluesky/ophyd-async.git The library should now be installed and the commandline interface on your path. You can check the version that has been installed by typing:: - $ python3 -m ophyd --version + $ ophyd_async --version diff --git a/pyproject.toml b/pyproject.toml index 48804ca713..d921d02291 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -18,20 +18,20 @@ dependencies = [ "networkx>=2.0", "numpy", "packaging", - "pint" + "pint", + "bluesky", + "event-model", + "p4p", ] # Add project dependencies here, e.g. ["click", "numpy"] + dynamic = ["version"] license.file = "LICENSE" readme = "README.rst" requires-python = ">=3.8" [project.optional-dependencies] -ca = [ - "aioca>=1.6" -] -pva = [ - "p4p" -] +ca = ["aioca>=1.6"] +pva = ["p4p"] dev = [ "ophyd_async[pva]", "ophyd_async[ca]", @@ -77,18 +77,18 @@ dev = [ ophyd_async = "ophyd_async.__main__:main" [project.urls] -GitHub = "https://github.com/bluesky/ophyd_async" +GitHub = "https://github.com/bluesky/ophyd-async" [[project.authors]] # Further authors may be added by duplicating this section -email = "rose.yemelyanova@diamond.ac.uk" -name = "Rose Yemelyanova" +email = "tom.cobb@diamond.ac.uk" +name = "Tom Cobb" [tool.setuptools_scm] write_to = "src/ophyd_async/_version.py" [tool.mypy] -ignore_missing_imports = true # Ignore missing stubs in imported modules +ignore_missing_imports = true # Ignore missing stubs in imported modules plugins = ["numpy.typing.mypy_plugin"] [tool.isort] @@ -106,12 +106,11 @@ extend-ignore = [ max-line-length = 88 # Respect black's line length (default 88), exclude = [".tox", "venv"] - [tool.pytest.ini_options] # Run pytest with all our checkers, and don't spam us with massive tracebacks on error addopts = """ --tb=native -vv --strict-markers --doctest-modules --doctest-glob="*.rst" - --ignore=docs/user/examples + --ignore=docs/user/examples --ignore=docs/examples --cov=src/ophyd_async --cov-report term --cov-report xml:cov.xml """ # https://iscinumpy.gitlab.io/post/bound-version-constraints/#watch-for-warnings @@ -127,6 +126,8 @@ markers = [ ] asyncio_mode = "auto" +[tool.coverage.run] +data_file = "/tmp/ophyd_async.coverage" [tool.coverage.paths] # Tests are run from installed location, map back to the src directory @@ -154,4 +155,4 @@ commands = mypy: mypy src tests {posargs} pre-commit: pre-commit run --all-files {posargs} docs: sphinx-{posargs:build -EW --keep-going} -T docs build/html -""" \ No newline at end of file +""" diff --git a/src/ophyd_async/core/__init__.py b/src/ophyd_async/core/__init__.py index 71f22ddb20..a6b6c95615 100644 --- a/src/ophyd_async/core/__init__.py +++ b/src/ophyd_async/core/__init__.py @@ -28,6 +28,7 @@ wait_for_value, ) from .utils import ( + DEFAULT_TIMEOUT, Callback, NotConnected, ReadingValueCallback, @@ -65,6 +66,7 @@ "set_sim_put_proceeds", "set_sim_value", "wait_for_value", + "DEFAULT_TIMEOUT", "Callback", "NotConnected", "ReadingValueCallback", diff --git a/src/ophyd_async/devices/__init__.py b/src/ophyd_async/devices/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/src/ophyd_async/devices/areadetector.py b/src/ophyd_async/devices/areadetector.py new file mode 100644 index 0000000000..84a2348a93 --- /dev/null +++ b/src/ophyd_async/devices/areadetector.py @@ -0,0 +1,279 @@ +import asyncio +import collections +import tempfile +import time +from abc import abstractmethod +from enum import Enum +from pathlib import Path +from typing import Callable, Dict, Iterator, Optional, Protocol, Sequence, Sized, Type + +from bluesky.protocols import ( + Asset, + Descriptor, + Flyable, + PartialEvent, + Triggerable, + WritesExternalAssets, +) +from bluesky.utils import new_uid +from event_model import compose_stream_resource + +from ophyd_async.core.async_status import AsyncStatus +from ophyd_async.core.devices import Device, StandardReadable +from ophyd_async.core.signals import ( + SignalR, + SignalRW, + epics_signal_r, + epics_signal_rw, + set_and_wait_for_value, +) +from ophyd_async.core.utils import DEFAULT_TIMEOUT, T + + +def ad_rw(datatype: Type[T], prefix: str) -> SignalRW[T]: + return epics_signal_rw(datatype, prefix + "_RBV", prefix) + + +def ad_r(datatype: Type[T], prefix: str) -> SignalR[T]: + return epics_signal_r(datatype, prefix + "_RBV") + + +class ImageMode(Enum): + single = "Single" + multiple = "Multiple" + continuous = "Continuous" + + +class ADDriver(Device): + def __init__(self, prefix: str) -> None: + # Define some signals + self.acquire = ad_rw(bool, prefix + "Acquire") + self.acquire_time = ad_rw(float, prefix + "AcquireTime") + self.num_images = ad_rw(int, prefix + "NumImages") + self.image_mode = ad_rw(ImageMode, prefix + "ImageMode") + self.array_counter = ad_rw(int, prefix + "ArrayCounter") + self.array_size_x = ad_r(int, prefix + "ArraySizeX") + self.array_size_y = ad_r(int, prefix + "ArraySizeY") + # There is no _RBV for this one + self.wait_for_plugins = epics_signal_rw(bool, prefix + "WaitForPlugins") + + +class NDPlugin(Device): + pass + + +class NDPluginStats(NDPlugin): + def __init__(self, prefix: str) -> None: + # Define some signals + self.unique_id = ad_r(int, prefix + "UniqueId") + + +class SingleTriggerDet(StandardReadable, Triggerable): + def __init__( + self, + drv: ADDriver, + read_uncached: Sequence[SignalR] = (), + name="", + **plugins: NDPlugin, + ) -> None: + self.drv = drv + self.__dict__.update(plugins) + self.set_readable_signals( + # Can't subscribe to read signals as race between monitor coming back and + # caput callback on acquire + read_uncached=[self.drv.array_counter] + list(read_uncached), + config=[self.drv.acquire_time], + ) + super().__init__(name=name) + + @AsyncStatus.wrap + async def stage(self) -> None: + await asyncio.gather( + self.drv.image_mode.set(ImageMode.single), + self.drv.wait_for_plugins.set(True), + ) + await super().stage() + + @AsyncStatus.wrap + async def trigger(self) -> None: + await self.drv.acquire.set(1) + + +class FileWriteMode(str, Enum): + single = "Single" + capture = "Capture" + stream = "Stream" + + +class NDFileHDF(Device): + def __init__(self, prefix: str) -> None: + # Define some signals + self.file_path = ad_rw(str, prefix + "FilePath") + self.file_name = ad_rw(str, prefix + "FileName") + self.file_template = ad_rw(str, prefix + "FileTemplate") + self.full_file_name = ad_r(str, prefix + "FullFileName") + self.file_write_mode = ad_rw(FileWriteMode, prefix + "FileWriteMode") + self.num_capture = ad_rw(int, prefix + "NumCapture") + self.num_captured = ad_r(int, prefix + "NumCaptured") + self.swmr_mode = ad_rw(bool, prefix + "SWMRMode") + self.lazy_open = ad_rw(bool, prefix + "LazyOpen") + self.capture = ad_rw(bool, prefix + "Capture") + self.flush_now = epics_signal_rw(bool, prefix + "FlushNow") + self.array_size0 = ad_r(int, prefix + "ArraySize0") + self.array_size1 = ad_r(int, prefix + "ArraySize1") + + +class _HDFResource: + def __init__(self) -> None: + # TODO: set to Deque[Asset] after protocols updated for stream* + # https://github.com/bluesky/bluesky/issues/1558 + self.asset_docs = collections.deque() # type: ignore + self._last_emitted = 0 + self._last_flush = time.monotonic() + self._compose_datum: Optional[Callable] = None + + def _append_resource(self, full_file_name: str): + resource_doc, (self._compose_datum,) = compose_stream_resource( + spec="AD_HDF5_SWMR_SLICE", + root="/", + resource_path=full_file_name, + resource_kwargs={}, + stream_names=["primary"], + ) + self.asset_docs.append(("stream_resource", resource_doc)) + + def _append_datum(self, event_count: int): + assert self._compose_datum, "Resource not emitted yet" + datum_doc = self._compose_datum( + datum_kwargs={}, + event_offset=self._last_emitted, + event_count=event_count, + ) + self._last_emitted += event_count + self.asset_docs.append(("stream_datum", datum_doc)) + + async def flush_and_publish(self, hdf: NDFileHDF): + num_captured = await hdf.num_captured.get_value() + if num_captured: + if self._compose_datum is None: + self._append_resource(await hdf.full_file_name.get_value()) + event_count = num_captured - self._last_emitted + if event_count: + self._append_datum(event_count) + await hdf.flush_now.set(1) + self._last_flush = time.monotonic() + if time.monotonic() - self._last_flush > FRAME_TIMEOUT: + raise TimeoutError(f"{hdf.name}: writing stalled on frame {num_captured}") + + +class DirectoryProvider(Protocol): + @abstractmethod + async def get_directory(self) -> Path: + ... + + +class TmpDirectoryProvider(DirectoryProvider): + def __init__(self) -> None: + self._directory = Path(tempfile.mkdtemp()) + + async def get_directory(self) -> Path: + return self._directory + + +# How long in seconds to wait between flushes of HDF datasets +FLUSH_PERIOD = 0.5 + +# How long to wait for new frames before timing out +FRAME_TIMEOUT = 120 + + +class HDFStreamerDet(StandardReadable, Flyable, WritesExternalAssets): + def __init__( + self, drv: ADDriver, hdf: NDFileHDF, dp: DirectoryProvider, name="" + ) -> None: + self.drv = drv + self.hdf = hdf + self._dp = dp + self._resource = _HDFResource() + self._capture_status: Optional[AsyncStatus] = None + self._start_status: Optional[AsyncStatus] = None + self.set_readable_signals(config=[self.drv.acquire_time]) + super().__init__(name) + + @AsyncStatus.wrap + async def stage(self) -> None: + # Make a new resource for the new HDF file we're going to open + self._resource = _HDFResource() + await asyncio.gather( + self.drv.wait_for_plugins.set(True), + self.hdf.lazy_open.set(True), + self.hdf.swmr_mode.set(True), + self.hdf.file_path.set(str(await self._dp.get_directory())), + self.hdf.file_name.set(f"{self.name}-{new_uid()}"), + self.hdf.file_template.set("%s/%s.h5"), + # Go forever + self.hdf.num_capture.set(0), + self.hdf.file_write_mode.set(FileWriteMode.stream), + ) + # Wait for it to start, stashing the status that tells us when it finishes + self._capture_status = await set_and_wait_for_value(self.hdf.capture, True) + await super().stage() + + async def describe(self) -> Dict[str, Descriptor]: + datakeys = await super().describe() + # Insert a descriptor for the HDF resource, this will not appear + # in read() as it describes StreamResource outputs only + datakeys[self.name] = Descriptor( + source=self.hdf.full_file_name.source, + shape=await asyncio.gather( + self.drv.array_size_y.get_value(), + self.drv.array_size_x.get_value(), + ), + dtype="array", + external="STREAM:", + ) + return datakeys + + # For step scan, take a single frame + @AsyncStatus.wrap + async def trigger(self): + await self.drv.image_mode.set(ImageMode.single) + frame_timeout = DEFAULT_TIMEOUT + await self.drv.acquire_time.get_value() + await self.drv.acquire.set(1, timeout=frame_timeout) + await self._resource.flush_and_publish(self.hdf) + + def collect_asset_docs(self) -> Iterator[Asset]: + while self._resource.asset_docs: + yield self._resource.asset_docs.popleft() + + # For flyscan, take the number of frames we wanted + @AsyncStatus.wrap + async def kickoff(self) -> None: + await self.drv.image_mode.set(ImageMode.multiple) + # Wait for it to start, stashing the status that tells us when it finishes + self._start_status = await set_and_wait_for_value(self.drv.acquire, True) + + # Do the same thing for flyscans and step scans + async def describe_collect(self) -> Dict[str, Dict[str, Descriptor]]: + return {self.name: await self.describe()} + + def collect(self) -> Iterator[PartialEvent]: + yield from iter([]) + + @AsyncStatus.wrap + async def complete(self) -> None: + done: Sized = () + while not done: + assert self._start_status, "Kickoff not run" + done, _ = await asyncio.wait( + (self._start_status.task,), timeout=FLUSH_PERIOD + ) + await self._resource.flush_and_publish(self.hdf) + + @AsyncStatus.wrap + async def unstage(self) -> None: + # Already done a caput callback in _capture_status, so can't do one here + await self.hdf.capture.set(0, wait=False) + assert self._capture_status, "Stage not run" + await self._capture_status + await super().unstage() diff --git a/src/ophyd_async/devices/motor.py b/src/ophyd_async/devices/motor.py new file mode 100644 index 0000000000..846d24320c --- /dev/null +++ b/src/ophyd_async/devices/motor.py @@ -0,0 +1,88 @@ +import asyncio +import time +from typing import Callable, List, Optional + +from bluesky.protocols import Movable, Stoppable + +from ophyd_async.core.async_status import AsyncStatus +from ophyd_async.core.devices import StandardReadable +from ophyd_async.core.signals.epics import ( + epics_signal_r, + epics_signal_rw, + epics_signal_x, +) + + +class Motor(StandardReadable, Movable, Stoppable): + """Device that moves a motor record""" + + def __init__(self, prefix: str, name="") -> None: + # Define some signals + self.setpoint = epics_signal_rw(float, prefix + ".VAL") + self.readback = epics_signal_r(float, prefix + ".RBV") + self.velocity = epics_signal_rw(float, prefix + ".VELO") + self.units = epics_signal_r(str, prefix + ".EGU") + self.precision = epics_signal_r(int, prefix + ".PREC") + # Signals that collide with standard methods should have a trailing underscore + self.stop_ = epics_signal_x(prefix + ".STOP") + # Whether set() should complete successfully or not + self._set_success = True + # Set name and signals for read() and read_configuration() + self.set_readable_signals( + read=[self.readback], + config=[self.velocity, self.units], + ) + super().__init__(name=name) + + def set_name(self, name: str): + super().set_name(name) + # Readback should be named the same as its parent in read() + self.readback.set_name(name) + + async def _move(self, new_position: float, watchers: List[Callable] = []): + self._set_success = True + start = time.monotonic() + old_position, units, precision = await asyncio.gather( + self.setpoint.get_value(), + self.units.get_value(), + self.precision.get_value(), + ) + + def update_watchers(current_position: float): + for watcher in watchers: + watcher( + name=self.name, + current=current_position, + initial=old_position, + target=new_position, + unit=units, + precision=precision, + time_elapsed=time.monotonic() - start, + ) + + self.readback.subscribe_value(update_watchers) + try: + await self.setpoint.set(new_position) + finally: + self.readback.clear_sub(update_watchers) + if not self._set_success: + raise RuntimeError("Motor was stopped") + + def move(self, new_position: float, timeout: Optional[float] = None): + """Commandline only synchronous move of a Motor""" + from bluesky.run_engine import call_in_bluesky_event_loop, in_bluesky_event_loop + + if in_bluesky_event_loop(): + raise RuntimeError("Will deadlock run engine if run in a plan") + call_in_bluesky_event_loop(self._move(new_position), timeout) # type: ignore + + def set(self, new_position: float, timeout: Optional[float] = None) -> AsyncStatus: + watchers: List[Callable] = [] + coro = asyncio.wait_for(self._move(new_position, watchers), timeout=timeout) + return AsyncStatus(coro, watchers) + + async def stop(self, success=False): + self._set_success = success + # Put with completion will never complete as we are waiting for completion on + # the move above, so need to pass wait=False + await self.stop_.execute(wait=False) diff --git a/src/ophyd_async/devices/panda.py b/src/ophyd_async/devices/panda.py new file mode 100644 index 0000000000..01bb010a20 --- /dev/null +++ b/src/ophyd_async/devices/panda.py @@ -0,0 +1,327 @@ +from __future__ import annotations + +import atexit +import re +from enum import Enum +from typing import ( + Callable, + Dict, + FrozenSet, + Optional, + Sequence, + Tuple, + Type, + TypedDict, + get_args, + get_origin, + get_type_hints, +) + +import numpy as np +import numpy.typing as npt +from p4p.client.thread import Context + +from ophyd_async.core.backends import SimSignalBackend +from ophyd_async.core.devices import Device, DeviceVector +from ophyd_async.core.signals import ( + Signal, + SignalR, + SignalRW, + SignalX, + epics_signal_r, + epics_signal_rw, + epics_signal_w, + epics_signal_x, +) + + +class PulseBlock(Device): + delay: SignalRW[float] + width: SignalRW[float] + + +class SeqTrigger(Enum): + IMMEDIATE = "Immediate" + BITA_0 = "BITA=0" + BITA_1 = "BITA=1" + BITB_0 = "BITB=0" + BITB_1 = "BITB=1" + BITC_0 = "BITC=0" + BITC_1 = "BITC=1" + POSA_GT = "POSA>=POSITION" + POSA_LT = "POSA<=POSITION" + POSB_GT = "POSB>=POSITION" + POSB_LT = "POSB<=POSITION" + POSC_GT = "POSC>=POSITION" + POSC_LT = "POSC<=POSITION" + + +class SeqTable(TypedDict): + repeats: npt.NDArray[np.uint16] + trigger: Sequence[SeqTrigger] + position: npt.NDArray[np.int32] + time1: npt.NDArray[np.uint32] + outa1: npt.NDArray[np.bool_] + outb1: npt.NDArray[np.bool_] + outc1: npt.NDArray[np.bool_] + outd1: npt.NDArray[np.bool_] + oute1: npt.NDArray[np.bool_] + outf1: npt.NDArray[np.bool_] + time2: npt.NDArray[np.uint32] + outa2: npt.NDArray[np.bool_] + outb2: npt.NDArray[np.bool_] + outc2: npt.NDArray[np.bool_] + outd2: npt.NDArray[np.bool_] + oute2: npt.NDArray[np.bool_] + outf2: npt.NDArray[np.bool_] + + +class SeqBlock(Device): + table: SignalRW[SeqTable] + + +class PcapBlock(Device): + active: SignalR[bool] + + +class PVIEntry(TypedDict, total=False): + d: str + r: str + rw: str + w: str + x: str + + +def block_name_number(block_name: str) -> Tuple[str, Optional[int]]: + """Maps a panda block name to a block and number. + + There are exceptions to this rule; some blocks like pcap do not contain numbers. + Other blocks may contain numbers and letters, but no numbers at the end. + + Such block names will only return the block name, and not a number. + + If this function returns both a block name and number, it should be instantiated + into a device vector.""" + m = re.match("^([0-9a-z_-]*)([0-9]+)$", block_name) + if m is not None: + name, num = m.groups() + return name, int(num or 1) # just to pass type checks. + + return block_name, None + + +def _remove_inconsistent_blocks(pvi: Dict[str, PVIEntry]) -> None: + """Remove blocks from pvi information. + + This is needed because some pandas have 'pcap' and 'pcap1' blocks, which are + inconsistent with the assumption that pandas should only have a 'pcap' block, + for example. + + """ + pvi_keys = set(pvi.keys()) + for k in pvi_keys: + kn = re.sub(r"\d*$", "", k) + if kn and k != kn and kn in pvi_keys: + del pvi[k] + + +async def pvi_get(pv: str, ctxt: Context, timeout: float = 5.0) -> Dict[str, PVIEntry]: + pv_info = ctxt.get(pv, timeout=timeout).get("pvi").todict() + + result = {} + + for attr_name, attr_info in pv_info.items(): + result[attr_name] = PVIEntry(**attr_info) # type: ignore + _remove_inconsistent_blocks(result) + return result + + +class PandA(Device): + _ctxt: Optional[Context] = None + + pulse: DeviceVector[PulseBlock] + seq: DeviceVector[SeqBlock] + pcap: PcapBlock + + def __init__(self, pv: str) -> None: + self._init_prefix = pv + self.pvi_mapping: Dict[FrozenSet[str], Callable[..., Signal]] = { + frozenset({"r", "w"}): lambda dtype, rpv, wpv: epics_signal_rw( + dtype, rpv, wpv + ), + frozenset({"rw"}): lambda dtype, rpv, wpv: epics_signal_rw(dtype, rpv, wpv), + frozenset({"r"}): lambda dtype, rpv, wpv: epics_signal_r(dtype, rpv), + frozenset({"w"}): lambda dtype, rpv, wpv: epics_signal_w(dtype, wpv), + frozenset({"x"}): lambda dtype, rpv, wpv: epics_signal_x(wpv), + } + + @property + def ctxt(self) -> Context: + if PandA._ctxt is None: + PandA._ctxt = Context("pva", nt=False) + + @atexit.register + def _del_ctxt(): + # If we don't do this we get messages like this on close: + # Error in sys.excepthook: + # Original exception was: + PandA._ctxt = None + + return PandA._ctxt + + def verify_block(self, name: str, num: Optional[int]): + """Given a block name and number, return information about a block.""" + anno = get_type_hints(self, globalns=globals()).get(name) + + block: Device = Device() + + if anno: + type_args = get_args(anno) + block = type_args[0]() if type_args else anno() + + if not type_args: + assert num is None, f"Only expected one {name} block, got {num}" + + return block + + async def _make_block( + self, name: str, num: Optional[int], block_pv: str, sim: bool = False + ): + """Makes a block given a block name containing relevant signals. + + Loops through the signals in the block (found using type hints), if not in + sim mode then does a pvi call, and identifies this signal from the pvi call. + """ + block = self.verify_block(name, num) + + field_annos = get_type_hints(block, globalns=globals()) + block_pvi = await pvi_get(block_pv, self.ctxt) if not sim else None + + # finds which fields this class actually has, e.g. delay, width... + for sig_name, sig_type in field_annos.items(): + origin = get_origin(sig_type) + args = get_args(sig_type) + + # if not in sim mode, + if block_pvi: + # try to get this block in the pvi. + entry: Optional[PVIEntry] = block_pvi.get(sig_name) + if entry is None: + raise Exception( + f"{self.__class__.__name__} has a {name} block containing a/" + + f"an {sig_name} signal which has not been retrieved by PVI." + ) + + signal = self._make_signal(entry, args[0] if len(args) > 0 else None) + + else: + backend = SimSignalBackend(args[0] if len(args) > 0 else None, block_pv) + signal = SignalX(backend) if not origin else origin(backend) + + setattr(block, sig_name, signal) + + # checks for any extra pvi information not contained in this class + if block_pvi: + for attr, attr_pvi in block_pvi.items(): + if not hasattr(block, attr): + # makes any extra signals + signal = self._make_signal(attr_pvi) + setattr(block, attr, signal) + + return block + + async def _make_untyped_block(self, block_pv: str): + """Populates a block using PVI information. + + This block is not typed as part of the PandA interface but needs to be + included dynamically anyway. + """ + block = Device() + block_pvi = await pvi_get(block_pv, self.ctxt) + + for signal_name, signal_pvi in block_pvi.items(): + signal = self._make_signal(signal_pvi) + setattr(block, signal_name, signal) + + return block + + def _make_signal(self, signal_pvi: PVIEntry, dtype: Optional[Type] = None): + """Make a signal. + + This assumes datatype is None so it can be used to create dynamic signals. + """ + operations = frozenset(signal_pvi.keys()) + pvs = [signal_pvi[i] for i in operations] # type: ignore + signal_factory = self.pvi_mapping[operations] + + write_pv = pvs[0] + read_pv = write_pv if len(pvs) == 1 else pvs[1] + + return signal_factory(dtype, "pva://" + read_pv, "pva://" + write_pv) + + # TODO redo to set_panda_block? confusing name + def set_attribute(self, name: str, num: Optional[int], block: Device): + """Set a block on the panda. + + Need to be able to set device vectors on the panda as well, e.g. if num is not + None, need to be able to make a new device vector and start populating it... + """ + anno = get_type_hints(self, globalns=globals()).get(name) + + # if it's an annotated device vector, or it isn't but we've got a number then + # make a DeviceVector on the class + if get_origin(anno) == DeviceVector or (not anno and num is not None): + self.__dict__.setdefault(name, DeviceVector())[num] = block + else: + setattr(self, name, block) + + async def connect(self, sim=False) -> None: + """Initialises all blocks and connects them. + + First, checks for pvi information. If it exists, make all blocks from this. + Then, checks that all required blocks in the PandA have been made. + + If there's no pvi information, that's because we're in sim mode. In that case, + makes all required blocks. + """ + pvi = await pvi_get(self._init_prefix + ":PVI", self.ctxt) if not sim else None + hints = { + attr_name: attr_type + for attr_name, attr_type in get_type_hints(self, globalns=globals()).items() + if not attr_name.startswith("_") + } + + # create all the blocks pvi says it should have, + if pvi: + for block_name, block_pvi in pvi.items(): + name, num = block_name_number(block_name) + + if name in hints: + block = await self._make_block(name, num, block_pvi["d"]) + else: + block = await self._make_untyped_block(block_pvi["d"]) + + self.set_attribute(name, num, block) + + # then check if the ones defined in this class are in the pvi info + # make them if there is no pvi info, i.e. sim mode. + for block_name in hints.keys(): + if pvi is not None: + pvi_name = block_name + + if get_origin(hints[block_name]) == DeviceVector: + pvi_name += "1" + + entry: Optional[PVIEntry] = pvi.get(pvi_name) + + assert entry, f"Expected PandA to contain {block_name} block." + assert list(entry) == [ + "d" + ], f"Expected PandA to only contain blocks, got {entry}" + else: + num = 1 if get_origin(hints[block_name]) == DeviceVector else None + block = await self._make_block(block_name, num, "sim://", sim=sim) + self.set_attribute(block_name, num, block) + + self.set_name(self.name) + await super().connect(sim) diff --git a/tests/conftest.py b/tests/conftest.py new file mode 100644 index 0000000000..94e2da660b --- /dev/null +++ b/tests/conftest.py @@ -0,0 +1,86 @@ +import asyncio +import subprocess +import sys +import time +from pathlib import Path +from typing import Any, Callable + +import pytest +from bluesky.run_engine import RunEngine, TransitionError + +RECORD = str(Path(__file__).parent / "devices" / "db" / "panda.db") +INCOMPLETE_BLOCK_RECORD = str( + Path(__file__).parent / "devices" / "db" / "incomplete_block_panda.db" +) +INCOMPLETE_RECORD = str(Path(__file__).parent / "devices" / "db" / "incomplete_panda.db") +EXTRA_BLOCKS_RECORD = str(Path(__file__).parent / "devices" / "db" / "extra_blocks_panda.db") + + +@pytest.fixture(scope="function") +def RE(request): + loop = asyncio.new_event_loop() + loop.set_debug(True) + RE = RunEngine({}, call_returns_result=True, loop=loop) + + def clean_event_loop(): + if RE.state not in ("idle", "panicked"): + try: + RE.halt() + except TransitionError: + pass + loop.call_soon_threadsafe(loop.stop) + RE._th.join() + loop.close() + + request.addfinalizer(clean_event_loop) + return RE + + +@pytest.fixture(scope="module", params=["pva"]) +def pva(): + processes = [ + subprocess.Popen( + [ + sys.executable, + "-m", + "epicscorelibs.ioc", + "-m", + macros, + "-d", + RECORD, + ], + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + universal_newlines=True, + ) + for macros in [ + "INCLUDE_EXTRA_BLOCK=,INCLUDE_EXTRA_SIGNAL=", + "EXCLUDE_WIDTH=#,IOC_NAME=PANDAQSRVIB", + "EXCLUDE_PCAP=#,IOC_NAME=PANDAQSRVI", + ] + ] + time.sleep(2) + + for p in processes: + assert not p.poll(), p.stdout.read() + + yield processes + [p.terminate() for p in processes] + + +@pytest.fixture +async def normal_coroutine() -> Callable[[], Any]: + async def inner_coroutine(): + await asyncio.sleep(0.01) + + return inner_coroutine + + +@pytest.fixture +async def failing_coroutine() -> Callable[[], Any]: + async def inner_coroutine(): + await asyncio.sleep(0.01) + raise ValueError() + + return inner_coroutine diff --git a/tests/core/conftest.py b/tests/core/conftest.py deleted file mode 100644 index 41e97c1963..0000000000 --- a/tests/core/conftest.py +++ /dev/null @@ -1,42 +0,0 @@ -import asyncio -from typing import Any, Callable - -import pytest -from bluesky.run_engine import RunEngine, TransitionError - - -@pytest.fixture(scope="function") -def RE(request): - loop = asyncio.new_event_loop() - loop.set_debug(True) - RE = RunEngine({}, call_returns_result=True, loop=loop) - - def clean_event_loop(): - if RE.state not in ("idle", "panicked"): - try: - RE.halt() - except TransitionError: - pass - loop.call_soon_threadsafe(loop.stop) - RE._th.join() - loop.close() - - request.addfinalizer(clean_event_loop) - return RE - - -@pytest.fixture -async def normal_coroutine() -> Callable[[], Any]: - async def inner_coroutine(): - await asyncio.sleep(0.01) - - return inner_coroutine - - -@pytest.fixture -async def failing_coroutine() -> Callable[[], Any]: - async def inner_coroutine(): - await asyncio.sleep(0.01) - raise ValueError() - - return inner_coroutine diff --git a/tests/devices/db/panda.db b/tests/devices/db/panda.db new file mode 100644 index 0000000000..faf444452c --- /dev/null +++ b/tests/devices/db/panda.db @@ -0,0 +1,533 @@ +record(ao, "$(IOC_NAME=PANDAQSRV):PULSE1:DELAY") +{ + field(EGU, "us") + # Add to PULSE1:PVI PVA structure + info(Q:group, { + "$(IOC_NAME=PANDAQSRV):PULSE1:PVI": { + "pvi.delay.rw": { + "+channel": "NAME", + "+type": "plain" + } + } + }) +} + + +#if EXCLUDE_WIDTH is set to "#", this bit is commented out. +$(EXCLUDE_WIDTH=)record(ao, "$(IOC_NAME=PANDAQSRV):PULSE1:WIDTH") +$(EXCLUDE_WIDTH=){ +$(EXCLUDE_WIDTH=) field(EGU, "us") +$(EXCLUDE_WIDTH=) info(Q:group, { +$(EXCLUDE_WIDTH=) # Add to PULSE1:PVI PVA structure +$(EXCLUDE_WIDTH=) "$(IOC_NAME=PANDAQSRV):PULSE1:PVI": { +$(EXCLUDE_WIDTH=) "pvi.width.rw": { +$(EXCLUDE_WIDTH=) "+channel": "NAME", +$(EXCLUDE_WIDTH=) "+type": "plain" +$(EXCLUDE_WIDTH=) } +$(EXCLUDE_WIDTH=) } +$(EXCLUDE_WIDTH=) }) +$(EXCLUDE_WIDTH=)} + +record(bi, "$(IOC_NAME=PANDAQSRV):PCAP:ACTIVE") +{ + field(ZNAM, "0") + field(ONAM, "1") + field(PINI, "YES") + info(Q:group, { + "$(IOC_NAME=PANDAQSRV):PCAP:PVI": { + "pvi.active.r": { + "+channel": "NAME", + "+type": "plain" + } + } + }) +} + +record(waveform, "BOOL:PLEASE") +{ + field(NELM, 10) + field(FTVL, "ENUM") + field(INP, [0, 0, 1, 1, 0, 1, 1, 0]) + info(Q:form, "Binary") +} + +# We want to add $(IOC_NAME=PANDAQSRV):PULSE1 to $(IOC_NAME=PANDAQSRV):PVI +# structure, but we can't create a record called +# $(IOC_NAME=PANDAQSRV):PVI as it will shadow the QSRV created structure. +# We make a different PV and use its input link to hack around this +record(stringin, "$(IOC_NAME=PANDAQSRV):PULSE1:_PVI") +{ + field(VAL, "$(IOC_NAME=PANDAQSRV):PULSE1:PVI") + info(Q:group, { + "$(IOC_NAME=PANDAQSRV):PVI": { + "pvi.pulse1.d": { + "+channel": "VAL", + "+type": "plain" + } + } + }) +} + +record(waveform, "$(IOC_NAME=PANDAQSRV):SEQ1:TABLE:LABELS") { + field(FTVL, "STRING") + field(NELM, "64") + field(INP , {const:["Repeats", "Trigger", "Position", "Time1", "OutA1", "OutB1", "OutC1", "OutD1", "OutE1", "OutF1", "Time2", "OutA2", "OutB2", "OutC2", "OutD2", "OutE2", "OutF2"]}) + info(Q:group, { + "$(IOC_NAME=PANDAQSRV):SEQ1:TABLE": { + "+id": "epics:nt/NTTable:1.0", + "labels": { + "+type": "plain", + "+channel": "VAL" + } + } + }) +} + +record(waveform, "$(IOC_NAME=PANDAQSRV):SEQ1:TABLE:REPEATS") +{ + field(FTVL, "USHORT") + field(NELM, "4096") + field(INP, {const:[1, 1, 1, 32]}) + info(Q:group, { + "$(IOC_NAME=PANDAQSRV):SEQ1:TABLE": { + "value.repeats": { + "+type": "plain", + "+channel": "VAL", + "+putorder":0 + } + } + }) +} + +record(waveform, "$(IOC_NAME=PANDAQSRV):SEQ1:TABLE:TRIGGER") +{ + field(FTVL, "STRING") + field(NELM, "4096") + field(INP, {const:["POSA>=POSITION", "POSA<=POSITION", "Immediate", "Immediate"]}) + info(Q:group, { + "$(IOC_NAME=PANDAQSRV):SEQ1:TABLE": { + "value.trigger": { + "+type": "plain", + "+channel": "VAL", + "+putorder":1 + } + } + }) +} + +record(waveform, "$(IOC_NAME=PANDAQSRV):SEQ1:TABLE:POSITION") +{ + field(FTVL, "LONG") + field(NELM, "4096") + field(INP, {const:["3222", "-565", "0", "0"]}) + info(Q:group, { + "$(IOC_NAME=PANDAQSRV):SEQ1:TABLE": { + "value.position": { + "+type": "plain", + "+channel": "VAL", + "+putorder":2 + } + } + }) +} + +record(waveform, "$(IOC_NAME=PANDAQSRV):SEQ1:TABLE:TIME1") +{ + field(FTVL, "ULONG") + field(NELM, "4096") + field(INP, {const:["5", "0", "10", "10"]}) + info(Q:group, { + "$(IOC_NAME=PANDAQSRV):SEQ1:TABLE": { + "value.time1": { + "+type": "plain", + "+channel": "VAL", + "+putorder":3 + } + } + }) +} + +record(waveform, "$(IOC_NAME=PANDAQSRV):SEQ1:TABLE:OUTA1") +{ + field(FTVL, "UCHAR") + field(NELM, "4096") + field(INP, {const:[true, false, false, true]}) + info(Q:group, { + "$(IOC_NAME=PANDAQSRV):SEQ1:TABLE": { + "value.outa1": { + "+type": "plain", + "+channel": "VAL", + "+putorder":4 + } + } + }) +} + +record(waveform, "$(IOC_NAME=PANDAQSRV):SEQ1:TABLE:OUTB1") +{ + field(FTVL, "UCHAR") + field(NELM, "4096") + field(INP, {const:[false, false, true, true]}) + info(Q:group, { + "$(IOC_NAME=PANDAQSRV):SEQ1:TABLE": { + "value.outb1": { + "+type": "plain", + "+channel": "VAL", + "+putorder":5 + } + } + }) +} + +record(waveform, "$(IOC_NAME=PANDAQSRV):SEQ1:TABLE:OUTC1") +{ + field(FTVL, "UCHAR") + field(NELM, "4096") + field(INP, {const:[false, true, true, false]}) + info(Q:group, { + "$(IOC_NAME=PANDAQSRV):SEQ1:TABLE": { + "value.outc1": { + "+type": "plain", + "+channel": "VAL", + "+putorder":6 + } + } + }) +} + +record(waveform, "$(IOC_NAME=PANDAQSRV):SEQ1:TABLE:OUTD1") +{ + field(FTVL, "UCHAR") + field(NELM, "4096") + field(INP, {const:[true, true, false, true]}) + info(Q:group, { + "$(IOC_NAME=PANDAQSRV):SEQ1:TABLE": { + "value.outd1": { + "+type": "plain", + "+channel": "VAL", + "+putorder":7 + } + } + }) +} + +record(waveform, "$(IOC_NAME=PANDAQSRV):SEQ1:TABLE:OUTE1") +{ + field(FTVL, "UCHAR") + field(NELM, "4096") + field(INP, {const:[true, false, true, false]}) + info(Q:group, { + "$(IOC_NAME=PANDAQSRV):SEQ1:TABLE": { + "value.oute1": { + "+type": "plain", + "+channel": "VAL", + "+putorder":8 + } + } + }) +} + +record(waveform, "$(IOC_NAME=PANDAQSRV):SEQ1:TABLE:OUTF1") +{ + field(FTVL, "UCHAR") + field(NELM, "4096") + field(INP, {const:[true, false, false, false]}) + info(Q:group, { + "$(IOC_NAME=PANDAQSRV):SEQ1:TABLE": { + "value.outf1": { + "+type": "plain", + "+channel": "VAL", + "+putorder":9 + } + } + }) +} + +record(waveform, "$(IOC_NAME=PANDAQSRV):SEQ1:TABLE:TIME2") +{ + field(FTVL, "ULONG") + field(NELM, "4096") + field(INP, {const:["0", "10", "10", "11"]}) + info(Q:group, { + "$(IOC_NAME=PANDAQSRV):SEQ1:TABLE": { + "value.time2": { + "+type": "plain", + "+channel": "VAL", + "+putorder":10 + } + } + }) +} + +record(waveform, "$(IOC_NAME=PANDAQSRV):SEQ1:TABLE:OUTA2") +{ + field(FTVL, "UCHAR") + field(NELM, "4096") + field(INP, {const:[true, false, false, true]}) + info(Q:group, { + "$(IOC_NAME=PANDAQSRV):SEQ1:TABLE": { + "value.outa2": { + "+type": "plain", + "+channel": "VAL", + "+putorder":11 + } + } + }) +} + +record(waveform, "$(IOC_NAME=PANDAQSRV):SEQ1:TABLE:OUTB2") +{ + field(FTVL, "UCHAR") + field(NELM, "4096") + field(INP, {const:[false, false, true, true]}) + info(Q:group, { + "$(IOC_NAME=PANDAQSRV):SEQ1:TABLE": { + "value.outb2": { + "+type": "plain", + "+channel": "VAL", + "+putorder":12 + } + } + }) +} + +record(waveform, "$(IOC_NAME=PANDAQSRV):SEQ1:TABLE:OUTC2") +{ + field(FTVL, "UCHAR") + field(NELM, "4096") + field(INP, {const:[false, true, true, false]}) + info(Q:group, { + "$(IOC_NAME=PANDAQSRV):SEQ1:TABLE": { + "value.outc2": { + "+type": "plain", + "+channel": "VAL", + "+putorder":13 + } + } + }) +} + +record(waveform, "$(IOC_NAME=PANDAQSRV):SEQ1:TABLE:OUTD2") +{ + field(FTVL, "UCHAR") + field(NELM, "4096") + field(INP, {const:[true, true, false, true]}) + info(Q:group, { + "$(IOC_NAME=PANDAQSRV):SEQ1:TABLE": { + "value.outd2": { + "+type": "plain", + "+channel": "VAL", + "+putorder":14 + } + } + }) +} + +record(waveform, "$(IOC_NAME=PANDAQSRV):SEQ1:TABLE:OUTE2") +{ + field(FTVL, "UCHAR") + field(NELM, "4096") + field(INP, {const:[true, false, true, false]}) + info(Q:group, { + "$(IOC_NAME=PANDAQSRV):SEQ1:TABLE": { + "value.oute2": { + "+type": "plain", + "+channel": "VAL", + "+putorder":15 + } + } + }) +} + +# Last column has metadata +record(waveform, "$(IOC_NAME=PANDAQSRV):SEQ1:TABLE:OUTF2") +{ + field(FTVL, "UCHAR") + field(NELM, "4096") + field(INP, {const:[true, false, false, false]}) + info(Q:group, { + "$(IOC_NAME=PANDAQSRV):SEQ1:TABLE": { + "value.outf2": { + "+type": "plain", + "+channel": "VAL", + "+putorder":16 + }, + "": {"+type": "meta", "+channel": "VAL"}, + } + }) +} + +# Again, SEQ1:TABLE is QSRV only, so need to make a fake PV to put it in +# SEQ1:PVI +record(stringin, "$(IOC_NAME=PANDAQSRV):SEQ1:TABLE:_PVI") +{ + field(VAL, "$(IOC_NAME=PANDAQSRV):SEQ1:TABLE") + info(Q:group, { + "$(IOC_NAME=PANDAQSRV):SEQ1:PVI": { + "pvi.table.rw": { + "+channel": "VAL", + "+type": "plain", + "+putorder":17 + } + } + }) +} + + +record(stringin, "$(IOC_NAME=PANDAQSRV):SEQ1:_PVI") +{ + field(VAL, "$(IOC_NAME=PANDAQSRV):SEQ1:PVI") + info(Q:group, { + "$(IOC_NAME=PANDAQSRV):PVI": { + "pvi.seq1.d": { + "+channel": "VAL", + "+type": "plain", + "+putorder":18 + } + } + }) +} + + + +record(waveform, "$(IOC_NAME=PANDAQSRV):SEQ1:STRTABLE:LABELS") { + field(FTVL, "STRING") + field(NELM, "64") + field(INP , {const:["Col1", "Col2"]}) + info(Q:group, { + "$(IOC_NAME=PANDAQSRV):SEQ1:STRTABLE": { + "+id": "epics:nt/NTTable:1.0", + "labels": { + "+type": "plain", + "+channel": "VAL" + } + } + }) +} + +record(waveform, "$(IOC_NAME=PANDAQSRV):SEQ1:STRTABLE:COL1") +{ + field(FTVL, "STRING") + field(NELM, "64") + field(INP, {const:["Foo", "Bar"]}) + info(Q:group, { + "$(IOC_NAME=PANDAQSRV):SEQ1:STRTABLE": { + "value.col1": { + "+type": "plain", + "+channel": "VAL", + "+putorder":1 + }, + } + }) +} + +record(waveform, "$(IOC_NAME=PANDAQSRV):SEQ1:STRTABLE:COL2") +{ + field(FTVL, "STRING") + field(NELM, "64") + field(INP, {const:["Bat", "Baz"]}) + field(PINI, "YES") + info(Q:group, { + "$(IOC_NAME=PANDAQSRV):SEQ1:STRTABLE": { + "value.col2": { + "+type": "plain", + "+channel": "VAL", + "+putorder":2 + }, + "": {"+type": "meta", "+channel": "VAL"}, + } + }) +} + + +$(EXCLUDE_PCAP=)record(ao, "$(IOC_NAME=PANDAQSRV):PCAP:ARM") +$(EXCLUDE_PCAP=){ +$(EXCLUDE_PCAP=) info(Q:group, { +$(EXCLUDE_PCAP=) "$(IOC_NAME=PANDAQSRV):PCAP:PVI": { +$(EXCLUDE_PCAP=) "pvi.arm.x": { +$(EXCLUDE_PCAP=) "+channel": "NAME", +$(EXCLUDE_PCAP=) "+type": "plain" +$(EXCLUDE_PCAP=) } +$(EXCLUDE_PCAP=) } +$(EXCLUDE_PCAP=) }) +$(EXCLUDE_PCAP=)} + +$(INCLUDE_EXTRA_SIGNAL=#)record(ao, "$(IOC_NAME=PANDAQSRV):PCAP:NEWSIGNAL") +$(INCLUDE_EXTRA_SIGNAL=#){ +$(INCLUDE_EXTRA_SIGNAL=#) info(Q:group, { +$(INCLUDE_EXTRA_SIGNAL=#) "$(IOC_NAME=PANDAQSRV):PCAP:PVI": { +$(INCLUDE_EXTRA_SIGNAL=#) "pvi.newsignal.x": { +$(INCLUDE_EXTRA_SIGNAL=#) "+channel": "NAME", +$(INCLUDE_EXTRA_SIGNAL=#) "+type": "plain" +$(INCLUDE_EXTRA_SIGNAL=#) } +$(INCLUDE_EXTRA_SIGNAL=#) } +$(INCLUDE_EXTRA_SIGNAL=#) }) +$(INCLUDE_EXTRA_SIGNAL=#)} + +$(EXCLUDE_PCAP=) +$(EXCLUDE_PCAP=) +$(EXCLUDE_PCAP=)record(stringin, "$(IOC_NAME=PANDAQSRV):PCAP:_PVI") +$(EXCLUDE_PCAP=){ +$(EXCLUDE_PCAP=) field(VAL, "$(IOC_NAME=PANDAQSRV):PCAP:PVI") +$(EXCLUDE_PCAP=) info(Q:group, { +$(EXCLUDE_PCAP=) "$(IOC_NAME=PANDAQSRV):PVI": { +$(EXCLUDE_PCAP=) "pvi.pcap.d": { +$(EXCLUDE_PCAP=) "+channel": "VAL", +$(EXCLUDE_PCAP=) "+type": "plain" +$(EXCLUDE_PCAP=) } +$(EXCLUDE_PCAP=) } +$(EXCLUDE_PCAP=) }) +$(EXCLUDE_PCAP=)} + + +$(INCLUDE_EXTRA_BLOCK=#)record(ao, "$(IOC_NAME=PANDAQSRV):EXTRA1:ARM") +$(INCLUDE_EXTRA_BLOCK=#){ +$(INCLUDE_EXTRA_BLOCK=#) info(Q:group, { +$(INCLUDE_EXTRA_BLOCK=#) "$(IOC_NAME=PANDAQSRV):EXTRA1:PVI": { +$(INCLUDE_EXTRA_BLOCK=#) "pvi.arm.x": { +$(INCLUDE_EXTRA_BLOCK=#) "+channel": "NAME", +$(INCLUDE_EXTRA_BLOCK=#) "+type": "plain" +$(INCLUDE_EXTRA_BLOCK=#) } +$(INCLUDE_EXTRA_BLOCK=#) } +$(INCLUDE_EXTRA_BLOCK=#) }) +$(INCLUDE_EXTRA_BLOCK=#)} +$(INCLUDE_EXTRA_BLOCK=#) +$(INCLUDE_EXTRA_BLOCK=#) +$(INCLUDE_EXTRA_BLOCK=#)record(stringin, "$(IOC_NAME=PANDAQSRV):EXTRA1:_PVI") +$(INCLUDE_EXTRA_BLOCK=#){ +$(INCLUDE_EXTRA_BLOCK=#) field(VAL, "$(IOC_NAME=PANDAQSRV):EXTRA1:PVI") +$(INCLUDE_EXTRA_BLOCK=#) info(Q:group, { +$(INCLUDE_EXTRA_BLOCK=#) "$(IOC_NAME=PANDAQSRV):PVI": { +$(INCLUDE_EXTRA_BLOCK=#) "pvi.extra1.d": { +$(INCLUDE_EXTRA_BLOCK=#) "+channel": "VAL", +$(INCLUDE_EXTRA_BLOCK=#) "+type": "plain" +$(INCLUDE_EXTRA_BLOCK=#) } +$(INCLUDE_EXTRA_BLOCK=#) } +$(INCLUDE_EXTRA_BLOCK=#) }) +$(INCLUDE_EXTRA_BLOCK=#)} + +$(INCLUDE_EXTRA_BLOCK=#)record(ao, "$(IOC_NAME=PANDAQSRV):EXTRA2:ARM") +$(INCLUDE_EXTRA_BLOCK=#){ +$(INCLUDE_EXTRA_BLOCK=#) info(Q:group, { +$(INCLUDE_EXTRA_BLOCK=#) "$(IOC_NAME=PANDAQSRV):EXTRA2:PVI": { +$(INCLUDE_EXTRA_BLOCK=#) "pvi.arm.x": { +$(INCLUDE_EXTRA_BLOCK=#) "+channel": "NAME", +$(INCLUDE_EXTRA_BLOCK=#) "+type": "plain" +$(INCLUDE_EXTRA_BLOCK=#) } +$(INCLUDE_EXTRA_BLOCK=#) } +$(INCLUDE_EXTRA_BLOCK=#) }) +$(INCLUDE_EXTRA_BLOCK=#)} +$(INCLUDE_EXTRA_BLOCK=#) +$(INCLUDE_EXTRA_BLOCK=#) +$(INCLUDE_EXTRA_BLOCK=#)record(stringin, "$(IOC_NAME=PANDAQSRV):EXTRA2:_PVI") +$(INCLUDE_EXTRA_BLOCK=#){ +$(INCLUDE_EXTRA_BLOCK=#) field(VAL, "$(IOC_NAME=PANDAQSRV):EXTRA2:PVI") +$(INCLUDE_EXTRA_BLOCK=#) info(Q:group, { +$(INCLUDE_EXTRA_BLOCK=#) "$(IOC_NAME=PANDAQSRV):PVI": { +$(INCLUDE_EXTRA_BLOCK=#) "pvi.extra2.d": { +$(INCLUDE_EXTRA_BLOCK=#) "+channel": "VAL", +$(INCLUDE_EXTRA_BLOCK=#) "+type": "plain" +$(INCLUDE_EXTRA_BLOCK=#) } +$(INCLUDE_EXTRA_BLOCK=#) } +$(INCLUDE_EXTRA_BLOCK=#) }) +$(INCLUDE_EXTRA_BLOCK=#)} diff --git a/tests/devices/test_area_detector.py b/tests/devices/test_area_detector.py new file mode 100644 index 0000000000..891e17483b --- /dev/null +++ b/tests/devices/test_area_detector.py @@ -0,0 +1,226 @@ +import time +from typing import List +from unittest.mock import patch + +import bluesky.plan_stubs as bps +import bluesky.plans as bp +import bluesky.preprocessors as bpp +import pytest +from ophyd_async.core.device_collector import DeviceCollector +from ophyd_async.core.signals import set_sim_put_proceeds, set_sim_value + +from ophyd_async.devices.areadetector import ( + ADDriver, + FileWriteMode, + HDFStreamerDet, + ImageMode, + NDFileHDF, + NDPluginStats, + SingleTriggerDet, + TmpDirectoryProvider, +) + + +@pytest.fixture +async def single_trigger_det(): + async with DeviceCollector(sim=True): + stats = NDPluginStats("PREFIX:STATS") + det = SingleTriggerDet( + drv=ADDriver("PREFIX:DRV"), stats=stats, read_uncached=[stats.unique_id] + ) + + assert det.name == "det" + assert stats.name == "det-stats" + # Set non-default values to check they are set back + # These are using set_sim_value to simulate the backend IOC being setup + # in a particular way, rather than values being set by the Ophyd signals + set_sim_value(det.drv.acquire_time, 0.5) + set_sim_value(det.drv.array_counter, 1) + set_sim_value(det.drv.image_mode, ImageMode.continuous) + set_sim_value(stats.unique_id, 3) + yield det + + +class DocHolder: + def __init__(self): + self.names = [] + self.docs = [] + + def append(self, name, doc): + self.names.append(name) + self.docs.append(doc) + + +async def test_single_trigger_det(single_trigger_det: SingleTriggerDet, RE): + d = DocHolder() + RE(bp.count([single_trigger_det]), d.append) + + drv = single_trigger_det.drv + assert 1 == await drv.acquire.get_value() + assert ImageMode.single == await drv.image_mode.get_value() + assert True is await drv.wait_for_plugins.get_value() + + assert d.names == ["start", "descriptor", "event", "stop"] + _, descriptor, event, _ = d.docs + assert descriptor["configuration"]["det"]["data"]["det-drv-acquire_time"] == 0.5 + assert ( + descriptor["data_keys"]["det-stats-unique_id"]["source"] + == "sim://PREFIX:STATSUniqueId_RBV" + ) + assert event["data"]["det-drv-array_counter"] == 1 + assert event["data"]["det-stats-unique_id"] == 3 + + +@pytest.fixture +async def hdf_streamer_dets(): + dp = TmpDirectoryProvider() + async with DeviceCollector(sim=True): + deta = HDFStreamerDet( + drv=ADDriver(prefix="PREFIX1:DET"), + hdf=NDFileHDF("PREFIX1:HDF"), + dp=dp, + ) + detb = HDFStreamerDet( + drv=ADDriver(prefix="PREFIX1:DET"), + hdf=NDFileHDF("PREFIX1:HDF"), + dp=dp, + ) + + assert deta.name == "deta" + assert detb.name == "detb" + assert deta.drv.name == "deta-drv" + assert deta.hdf.name == "deta-hdf" + + # Simulate backend IOCs being in slightly different states + for i, det in enumerate((deta, detb)): + set_sim_value(det.drv.acquire_time, 0.8 + i) + set_sim_value(det.drv.image_mode, ImageMode.continuous) + set_sim_value(det.hdf.num_capture, 1000) + set_sim_value(det.hdf.num_captured, 1) + set_sim_value(det.hdf.full_file_name, f"/tmp/123456/{det.name}.h5") + set_sim_value(det.drv.array_size_x, 1024 + i) + set_sim_value(det.drv.array_size_y, 768 + i) + yield deta, detb + + +async def test_hdf_streamer_dets_step(hdf_streamer_dets: List[HDFStreamerDet], RE): + d = DocHolder() + RE(bp.count(hdf_streamer_dets), d.append) + + drv = hdf_streamer_dets[0].drv + assert 1 == await drv.acquire.get_value() + assert ImageMode.single == await drv.image_mode.get_value() + assert True is await drv.wait_for_plugins.get_value() + + hdf = hdf_streamer_dets[1].hdf + assert True is await hdf.lazy_open.get_value() + assert True is await hdf.swmr_mode.get_value() + assert 0 == await hdf.num_capture.get_value() + assert FileWriteMode.stream == await hdf.file_write_mode.get_value() + + assert d.names == [ + "start", + "descriptor", + "stream_resource", + "stream_datum", + "stream_resource", + "stream_datum", + "event", + "stop", + ] + _, descriptor, sra, sda, srb, sdb, event, _ = d.docs + assert descriptor["configuration"]["deta"]["data"]["deta-drv-acquire_time"] == 0.8 + assert descriptor["configuration"]["detb"]["data"]["detb-drv-acquire_time"] == 1.8 + assert descriptor["data_keys"]["deta"]["shape"] == [768, 1024] + assert descriptor["data_keys"]["detb"]["shape"] == [769, 1025] + assert sra["resource_path"] == "/tmp/123456/deta.h5" + assert srb["resource_path"] == "/tmp/123456/detb.h5" + assert sda["stream_resource"] == sra["uid"] + assert sdb["stream_resource"] == srb["uid"] + for sd in (sda, sdb): + assert sd["event_offset"] == 0 + assert sd["event_count"] == 1 + assert event["data"] == {} + + +# TODO: write test where they are in the same stream after +# https://github.com/bluesky/bluesky/issues/1558 +async def test_hdf_streamer_dets_fly_different_streams( + hdf_streamer_dets: List[HDFStreamerDet], RE +): + d = DocHolder() + deta, detb = hdf_streamer_dets + + for det in hdf_streamer_dets: + set_sim_value(det.hdf.num_captured, 5) + + @bpp.stage_decorator(hdf_streamer_dets) + @bpp.run_decorator() + def fly_det(num: int): + # Set the number of images + yield from bps.mov(deta.drv.num_images, num, detb.drv.num_images, num) + # Kick them off in parallel and wait to be done + for det in hdf_streamer_dets: + yield from bps.kickoff(det, wait=False, group="kickoff") + yield from bps.wait(group="kickoff") + # Complete them and repeatedly collect until done + statuses = [] + for det in hdf_streamer_dets: + status = yield from bps.complete(det, wait=False, group="complete") + statuses.append(status) + while any(status and not status.done for status in statuses): + yield from bps.sleep(0.1) + for det in hdf_streamer_dets: + yield from bps.collect(det, stream=True, return_payload=False) + yield from bps.wait(group="complete") + + RE(fly_det(5), d.append) + + # TODO: stream_* will come after descriptor soon + assert d.names == [ + "start", + "stream_resource", + "stream_datum", + "descriptor", + "stream_resource", + "stream_datum", + "descriptor", + "stop", + ] + + drv = hdf_streamer_dets[0].drv + assert 1 == await drv.acquire.get_value() + assert ImageMode.multiple == await drv.image_mode.get_value() + assert True is await drv.wait_for_plugins.get_value() + + hdf = hdf_streamer_dets[1].hdf + assert True is await hdf.lazy_open.get_value() + assert True is await hdf.swmr_mode.get_value() + assert 0 == await hdf.num_capture.get_value() + assert FileWriteMode.stream == await hdf.file_write_mode.get_value() + + _, sra, sda, descriptora, srb, sdb, descriptorb, _ = d.docs + + assert descriptora["configuration"]["deta"]["data"]["deta-drv-acquire_time"] == 0.8 + assert descriptorb["configuration"]["detb"]["data"]["detb-drv-acquire_time"] == 1.8 + assert descriptora["data_keys"]["deta"]["shape"] == [768, 1024] + assert descriptorb["data_keys"]["detb"]["shape"] == [769, 1025] + assert sra["resource_path"] == "/tmp/123456/deta.h5" + assert srb["resource_path"] == "/tmp/123456/detb.h5" + assert sda["stream_resource"] == sra["uid"] + assert sdb["stream_resource"] == srb["uid"] + for sd in (sda, sdb): + assert sd["event_offset"] == 0 + assert sd["event_count"] == 5 + + +async def test_hdf_streamer_dets_timeout(hdf_streamer_dets: List[HDFStreamerDet]): + det, _ = hdf_streamer_dets + await det.stage() + set_sim_put_proceeds(det.drv.acquire, False) + await det.kickoff() + t = time.monotonic() + with patch("ophyd_async.devices.areadetector.FRAME_TIMEOUT", 0.1): + with pytest.raises(TimeoutError, match="deta-hdf: writing stalled on frame 1"): + await det.complete() + assert 1.0 < time.monotonic() - t < 1.1 diff --git a/tests/devices/test_motor.py b/tests/devices/test_motor.py new file mode 100644 index 0000000000..fcfeebcc80 --- /dev/null +++ b/tests/devices/test_motor.py @@ -0,0 +1,124 @@ +import asyncio +from typing import Dict +from unittest.mock import Mock, call + +import pytest +from bluesky.protocols import Reading +from ophyd_async.core.device_collector import DeviceCollector +from ophyd_async.core.signals import set_sim_put_proceeds, set_sim_value + +from ophyd_async.devices import motor + +# Long enough for multiple asyncio event loop cycles to run so +# all the tasks have a chance to run +A_BIT = 0.001 + + +@pytest.fixture +async def sim_motor(): + async with DeviceCollector(sim=True): + sim_motor = motor.Motor("BLxxI-MO-TABLE-01:X") + # Signals connected here + + assert sim_motor.name == "sim_motor" + set_sim_value(sim_motor.units, "mm") + set_sim_value(sim_motor.precision, 3) + set_sim_value(sim_motor.velocity, 1) + yield sim_motor + + +async def test_motor_moving_well(sim_motor: motor.Motor) -> None: + set_sim_put_proceeds(sim_motor.setpoint, False) + s = sim_motor.set(0.55) + watcher = Mock() + s.watch(watcher) + done = Mock() + s.add_callback(done) + await asyncio.sleep(A_BIT) + assert watcher.call_count == 1 + assert watcher.call_args == call( + name="sim_motor", + current=0.0, + initial=0.0, + target=0.55, + unit="mm", + precision=3, + time_elapsed=pytest.approx(0.0, abs=0.05), + ) + watcher.reset_mock() + assert 0.55 == await sim_motor.setpoint.get_value() + assert not s.done + await asyncio.sleep(0.1) + set_sim_value(sim_motor.readback, 0.1) + assert watcher.call_count == 1 + assert watcher.call_args == call( + name="sim_motor", + current=0.1, + initial=0.0, + target=0.55, + unit="mm", + precision=3, + time_elapsed=pytest.approx(0.1, abs=0.05), + ) + set_sim_put_proceeds(sim_motor.setpoint, True) + await asyncio.sleep(A_BIT) + assert s.done + done.assert_called_once_with(s) + + +async def test_motor_moving_stopped(sim_motor: motor.Motor): + set_sim_put_proceeds(sim_motor.setpoint, False) + s = sim_motor.set(1.5) + s.add_callback(Mock()) + await asyncio.sleep(0.2) + assert not s.done + await sim_motor.stop() + set_sim_put_proceeds(sim_motor.setpoint, True) + await asyncio.sleep(A_BIT) + assert s.done + assert s.success is False + + +async def test_read_motor(sim_motor: motor.Motor): + sim_motor.stage() + assert (await sim_motor.read())["sim_motor"]["value"] == 0.0 + assert (await sim_motor.describe())["sim_motor"][ + "source" + ] == "sim://BLxxI-MO-TABLE-01:X.RBV" + assert (await sim_motor.read_configuration())["sim_motor-velocity"]["value"] == 1 + assert (await sim_motor.describe_configuration())["sim_motor-units"]["shape"] == [] + set_sim_value(sim_motor.readback, 0.5) + assert (await sim_motor.read())["sim_motor"]["value"] == 0.5 + sim_motor.unstage() + # Check we can still read and describe when not staged + set_sim_value(sim_motor.readback, 0.1) + assert (await sim_motor.read())["sim_motor"]["value"] == 0.1 + assert await sim_motor.describe() + + +async def test_set_velocity(sim_motor: motor.Motor) -> None: + v = sim_motor.velocity + assert (await v.describe())["sim_motor-velocity"][ + "source" + ] == "sim://BLxxI-MO-TABLE-01:X.VELO" + q: asyncio.Queue[Dict[str, Reading]] = asyncio.Queue() + v.subscribe(q.put_nowait) + assert (await q.get())["sim_motor-velocity"]["value"] == 1.0 + await v.set(2.0) + assert (await q.get())["sim_motor-velocity"]["value"] == 2.0 + v.clear_sub(q.put_nowait) + await v.set(3.0) + assert (await v.read())["sim_motor-velocity"]["value"] == 3.0 + assert q.empty() + + +def test_motor_in_re(sim_motor: motor.Motor, RE) -> None: + sim_motor.move(0) + + def my_plan(): + sim_motor.move(0) + return + yield + + with pytest.raises(RuntimeError, match="Will deadlock run engine if run in a plan"): + RE(my_plan()) diff --git a/tests/devices/test_panda.py b/tests/devices/test_panda.py new file mode 100644 index 0000000000..71d4eeb650 --- /dev/null +++ b/tests/devices/test_panda.py @@ -0,0 +1,126 @@ +"""Test file specifying how we want to eventually interact with the panda...""" +import copy +from typing import Dict + +import numpy as np +import pytest +from ophyd_async.core.device_collector import DeviceCollector + +from ophyd_async.devices.panda import PandA, PVIEntry, SeqTable, SeqTrigger, pvi_get + + +class DummyDict: + def __init__(self, dict) -> None: + self.dict = dict + + def todict(self): + return self.dict + + +class MockPvi: + def __init__(self, pvi: Dict[str, PVIEntry]) -> None: + self.pvi = pvi + + def get(self, item: str): + return DummyDict(self.pvi) + + +class MockCtxt: + def __init__(self, pvi: Dict[str, PVIEntry]) -> None: + self.pvi = copy.copy(pvi) + + def get(self, pv: str, timeout: float = 0.0): + return MockPvi(self.pvi) + + +@pytest.fixture +async def sim_panda(): + async with DeviceCollector(sim=True): + sim_panda = PandA("PANDAQSRV") + + assert sim_panda.name == "sim_panda" + yield sim_panda + + +def test_panda_names_correct(sim_panda: PandA): + assert sim_panda.seq[1].name == "sim_panda-seq-1" + assert sim_panda.pulse[1].name == "sim_panda-pulse-1" + + +async def test_pvi_get_for_inconsistent_blocks(): + dummy_pvi = { + "pcap": {}, + "pcap1": {}, + "pulse1": {}, + "pulse2": {}, + "sfp3_sync_out1": {}, + "sfp3_sync_out": {}, + } + + resulting_pvi = await pvi_get("", MockCtxt(dummy_pvi)) + assert "sfp3_sync_out1" not in resulting_pvi + assert "pcap1" not in resulting_pvi + + +async def test_panda_children_connected(sim_panda: PandA): + # try to set and retrieve from simulated values... + table = SeqTable( + repeats=np.array([1, 1, 1, 32]).astype(np.uint16), + trigger=( + SeqTrigger.POSA_GT, + SeqTrigger.POSA_LT, + SeqTrigger.IMMEDIATE, + SeqTrigger.IMMEDIATE, + ), + position=np.array([3222, -565, 0, 0], dtype=np.int32), + time1=np.array([5, 0, 10, 10]).astype(np.uint32), # TODO: change below syntax. + outa1=np.array([1, 0, 0, 1]).astype(np.bool_), + outb1=np.array([0, 0, 1, 1]).astype(np.bool_), + outc1=np.array([0, 1, 1, 0]).astype(np.bool_), + outd1=np.array([1, 1, 0, 1]).astype(np.bool_), + oute1=np.array([1, 0, 1, 0]).astype(np.bool_), + outf1=np.array([1, 0, 0, 0]).astype(np.bool_), + time2=np.array([0, 10, 10, 11]).astype(np.uint32), + outa2=np.array([1, 0, 0, 1]).astype(np.bool_), + outb2=np.array([0, 0, 1, 1]).astype(np.bool_), + outc2=np.array([0, 1, 1, 0]).astype(np.bool_), + outd2=np.array([1, 1, 0, 1]).astype(np.bool_), + oute2=np.array([1, 0, 1, 0]).astype(np.bool_), + outf2=np.array([1, 0, 0, 0]).astype(np.bool_), + ) + await sim_panda.pulse[1].delay.set(20.0) + await sim_panda.seq[1].table.set(table) + + readback_pulse = await sim_panda.pulse[1].delay.get_value() + readback_seq = await sim_panda.seq[1].table.get_value() + + assert readback_pulse == 20.0 + assert readback_seq == table + + +async def test_panda_with_missing_blocks(pva): + panda = PandA("PANDAQSRVI") + with pytest.raises(AssertionError): + await panda.connect() + + +async def test_panda_with_extra_blocks_and_signals(pva): + panda = PandA("PANDAQSRV") + await panda.connect() + + assert panda.extra # type: ignore + assert panda.extra[1] # type: ignore + assert panda.extra[2] # type: ignore + assert panda.pcap.newsignal # type: ignore + + +async def test_panda_block_missing_signals(pva): + panda = PandA("PANDAQSRVIB") + + with pytest.raises(Exception) as exc: + await panda.connect() + assert ( + exc.__str__ + == "PandA has a pulse block containing a width signal which has not been " + + "retrieved by PVI." + ) diff --git a/tests/test_cli.py b/tests/test_cli.py new file mode 100644 index 0000000000..c09138de77 --- /dev/null +++ b/tests/test_cli.py @@ -0,0 +1,9 @@ +import subprocess +import sys + +from ophyd_async import __version__ + + +def test_cli_version(): + cmd = [sys.executable, "-m", "ophyd_async", "--version"] + assert subprocess.check_output(cmd).decode().strip() == __version__