Skip to content

Commit

Permalink
added test to dataset name cache and made changes including
Browse files Browse the repository at this point in the history
changed `DATASET` pvi table to `r` instead of `rw`
changed capture to combobox in positions capture table
  • Loading branch information
evalott100 committed Jun 18, 2024
1 parent 56d3188 commit 339ae3a
Show file tree
Hide file tree
Showing 6 changed files with 68 additions and 13 deletions.
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ classifiers = [
description = "Create an IOC from a PandA"
dependencies = [
"setuptools>=64",
"numpy<2",
"numpy<2", # until https://github.com/mdavidsaver/p4p/issues/145 is fixed
"click",
"h5py",
"softioc>=4.4.0",
Expand Down
23 changes: 15 additions & 8 deletions src/pandablocks_ioc/_hdf_ioc.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,14 +24,14 @@

from ._pvi import PviGroup, add_automatic_pvi_info, add_data_capture_pvi_info
from ._tables import ReadOnlyPvaTable
from ._types import ONAM_STR, ZNAM_STR, EpicsName
from ._types import ONAM_STR, ZNAM_STR, EpicsName, epics_to_panda_name

HDFReceived = Union[ReadyData, StartData, FrameData, EndData]


class CaptureMode(Enum):
"""
The mode which the circular buffer will use to flush
The mode which the circular buffer will use to flush.
"""

#: Wait till N frames are recieved then write them
Expand Down Expand Up @@ -313,13 +313,20 @@ def handle_data(self, data: HDFReceived):

@dataclass
class Dataset:
"""A dataset name and capture mode"""

name: str
capture: str


class DatasetNameCache:
def __init__(self, datasets: Dict[str, Dataset], datasets_record_name: EpicsName):
self.datasets = datasets
"""Used for outputing formatted dataset names in the HDF5 writer, and creating
and updating the HDF5 `DATASETS` table record."""

def __init__(
self, datasets: Dict[EpicsName, Dataset], datasets_record_name: EpicsName
):
self._datasets = datasets

self._datasets_table_record = ReadOnlyPvaTable(
datasets_record_name, ["Name", "Type"]
Expand All @@ -332,11 +339,11 @@ def hdf_writer_names(self):
"""Formats the current dataset names for use in the HDFWriter"""

hdf_names: Dict[str, Dict[str, str]] = {}
for record_name, dataset in self.datasets.items():
for record_name, dataset in self._datasets.items():
if not dataset.name or dataset.capture == "No":
continue

field_name = record_name.replace(":", ".")
field_name = epics_to_panda_name(record_name)

hdf_names[field_name] = hdf_name = {}

Expand All @@ -350,7 +357,7 @@ def hdf_writer_names(self):
def update_datasets_record(self):
dataset_name_list = [
dataset.name
for dataset in self.datasets.values()
for dataset in self._datasets.values()
if dataset.name and dataset.capture != "No"
]
self._datasets_table_record.update_row("Name", dataset_name_list)
Expand Down Expand Up @@ -384,7 +391,7 @@ class HDF5RecordController:
def __init__(
self,
client: AsyncioClient,
dataset_cache: Dict[str, Dataset],
dataset_cache: Dict[EpicsName, Dataset],
record_prefix: str,
):
if find_spec("h5py") is None:
Expand Down
2 changes: 1 addition & 1 deletion src/pandablocks_ioc/_pvi.py
Original file line number Diff line number Diff line change
Expand Up @@ -216,7 +216,7 @@ def add_positions_table_row(
name=epics_to_pvi_name(capture_record_name),
label=capture_record_name,
pv=capture_record_name,
widget=TextWrite(),
widget=ComboBox(),
),
]

Expand Down
2 changes: 1 addition & 1 deletion src/pandablocks_ioc/_tables.py
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,7 @@ def __init__(
"Q:group",
{
RecordName(f"{block}:PVI"): {
f"pvi.{field.lower().replace(':', '_')}.rw": {
f"pvi.{field.lower().replace(':', '_')}.r": {
"+channel": "VAL",
"+type": "plain",
}
Expand Down
10 changes: 9 additions & 1 deletion src/pandablocks_ioc/ioc.py
Original file line number Diff line number Diff line change
Expand Up @@ -543,7 +543,9 @@ def __init__(
# All records should be blocking
builder.SetBlocking(True)

self._dataset_cache: Dict[str, Dataset] = {}
# A dataset cache for storing dataset names and capture modes for different
# capture records
self._dataset_cache: Dict[EpicsName, Dataset] = {}

def _process_labels(
self, labels: List[str], record_value: ScalarRecordValue
Expand Down Expand Up @@ -890,6 +892,9 @@ def capture_record_on_update(new_capture_mode):
on_update=capture_record_on_update,
)

# For now we have to make a `_RecordUpdater`` here and
# combine it with `on_update`.
# https://github.com/PandABlocks/PandABlocks-ioc/issues/121
capture_record_updater = _RecordUpdater(
record_dict[capture_record_name],
self._record_prefix,
Expand Down Expand Up @@ -1071,6 +1076,9 @@ def capture_record_on_update(new_capture_mode):
initial_value=capture_index,
on_update=capture_record_on_update,
)
# For now we have to make a `_RecordUpdater`` here and
# combine it with `on_update`.
# https://github.com/PandABlocks/PandABlocks-ioc/issues/121
capture_record_updater = _RecordUpdater(
record_dict[capture_record_name],
self._record_prefix,
Expand Down
42 changes: 41 additions & 1 deletion tests/test_hdf_ioc.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,10 +39,12 @@
from pandablocks_ioc._hdf_ioc import (
CaptureMode,
Dataset,
DatasetNameCache,
HDF5Buffer,
HDF5RecordController,
NumCapturedSetter,
)
from pandablocks_ioc._types import EpicsName

NAMESPACE_PREFIX = "HDF-RECORD-PREFIX"

Expand Down Expand Up @@ -230,7 +232,7 @@ async def hdf5_controller(
test_prefix, hdf5_test_prefix = new_random_hdf5_prefix

dataset_name_cache = {
"COUNTER1:OUT": Dataset("some_other_dataset_name", "Value"),
EpicsName("COUNTER1:OUT"): Dataset("some_other_dataset_name", "Value"),
}

hdf5_controller = HDF5RecordController(
Expand Down Expand Up @@ -1308,3 +1310,41 @@ def test_hdf_capture_validate_exception(
)

assert hdf5_controller._capture_validate(None, 1) is False


def test_dataset_name_cache():
with patch(
"pandablocks_ioc._hdf_ioc.ReadOnlyPvaTable", autospec=True
) as mock_table:
mock_table_instance = MagicMock()
mock_table.return_value = mock_table_instance

# Initialize DatasetNameCache
datasets = {
"TEST1:OUT": Dataset("", "Value"),
"TEST2:OUT": Dataset("test2", "No"),
"TEST3:OUT": Dataset("test3", "Value"),
"TEST4:OUT": Dataset("test4", "Min Max Mean"),
"TEST5:OUT": Dataset("test5", "Min Max"),
}
cache = DatasetNameCache(datasets, "record_name")

# Check that set_rows was called once with the correct arguments
mock_table_instance.set_rows.assert_called_once_with(
["Name", "Type"], [[], []], length=300, default_data_type=str
)
cache.update_datasets_record()

# Check that update_row was called with the correct arguments
mock_table_instance.update_row.assert_any_call(
"Name", ["test3", "test4", "test5"]
)
mock_table_instance.update_row.assert_any_call(
"Type", ["float64", "float64", "float64"]
)

assert cache.hdf_writer_names() == {
"TEST3.OUT": {"Value": "test3"},
"TEST4.OUT": {"Mean": "test4", "Min": "test4-min", "Max": "test4-max"},
"TEST5.OUT": {"Min": "test5-min", "Max": "test5-max"},
}

0 comments on commit 339ae3a

Please sign in to comment.