From 8c12b24dea8238e1b3e776448137f593b59a278a Mon Sep 17 00:00:00 2001 From: Jan Lienemann Date: Thu, 18 Jan 2024 10:31:43 +0000 Subject: [PATCH] Release 2.23.0 --- ...weight_calibration_shfsg_shfqa_shfqc.ipynb | 14 +- ..._experiments_shfsg_shfqa_shfqc_hdawg.ipynb | 3 +- .../01_randomized_benchmarking.ipynb | 3 +- laboneq/VERSION.txt | 2 +- .../compiler/code_generator/code_generator.py | 60 +- .../experiment_access/experiment_dao.py | 20 +- laboneq/compiler/workflow/compiler.py | 34 +- .../example_helpers/feedback_helper.py | 3 + .../example_helpers/plotting/plot_helpers.py | 535 +++++++----- laboneq/controller/communication.py | 79 +- laboneq/controller/controller.py | 388 +++------ laboneq/controller/devices/async_support.py | 21 +- laboneq/controller/devices/awg_pipeliner.py | 84 +- .../controller/devices/device_collection.py | 4 +- laboneq/controller/devices/device_hdawg.py | 344 ++++---- laboneq/controller/devices/device_nonqc.py | 4 +- laboneq/controller/devices/device_pqsc.py | 171 +--- .../devices/device_pretty_printer.py | 35 +- laboneq/controller/devices/device_shfppc.py | 29 +- laboneq/controller/devices/device_shfqa.py | 795 ++++++------------ laboneq/controller/devices/device_shfsg.py | 485 ++++------- laboneq/controller/devices/device_uhfqa.py | 493 ++++------- laboneq/controller/devices/device_utils.py | 20 + laboneq/controller/devices/device_zi.py | 465 ++++++---- laboneq/controller/devices/zi_emulator.py | 7 + laboneq/controller/near_time_runner.py | 8 +- .../controller/pipeliner_reload_tracker.py | 23 +- laboneq/controller/recipe_processor.py | 9 - laboneq/core/utilities/seqc_compile.py | 87 ++ laboneq/data/recipe.py | 6 +- laboneq/dsl/device/device_setup.py | 16 + laboneq/dsl/experiment/call.py | 5 +- laboneq/dsl/session.py | 12 + .../pulse_sheet_viewer/pulse_sheet_viewer.py | 78 +- requirements-dev.txt | 3 + 35 files changed, 1919 insertions(+), 2426 deletions(-) create mode 100644 laboneq/controller/devices/device_utils.py create mode 100644 laboneq/core/utilities/seqc_compile.py diff --git a/examples/01_qubit_characterization/12_readoutweight_calibration_shfsg_shfqa_shfqc.ipynb b/examples/01_qubit_characterization/12_readoutweight_calibration_shfsg_shfqa_shfqc.ipynb index a2980cd..60c5664 100644 --- a/examples/01_qubit_characterization/12_readoutweight_calibration_shfsg_shfqa_shfqc.ipynb +++ b/examples/01_qubit_characterization/12_readoutweight_calibration_shfsg_shfqa_shfqc.ipynb @@ -54,7 +54,7 @@ "from laboneq.contrib.example_helpers.feedback_helper import (\n", " state_emulation_pulse,\n", " create_calibration_experiment,\n", - " create_discrimination_experiment,\n", + " create_integration_verification_experiment,\n", ")\n", "\n", "from laboneq.contrib.example_helpers.generate_example_datastore import (\n", @@ -242,7 +242,9 @@ "id": "23004293", "metadata": {}, "source": [ - "## 2.3 Verify state discrimination" + "## 2.3 Plot acquired results after readout optimization\n", + "\n", + "When using the optimized kernels calculated in the last step, the integration results for ground and excited states are rotated so that a projection onto the real axis allows for later discrimination with a real-valued threshold." ] }, { @@ -252,14 +254,14 @@ "metadata": {}, "outputs": [], "source": [ - "my_exp = create_discrimination_experiment(\n", + "my_exp = create_integration_verification_experiment(\n", " measure_lines=[q0[\"measure_line\"], q1[\"measure_line\"]],\n", " acquire_line=q0[\"acquire_line\"],\n", " kernels=kernels,\n", " state_emulation_pulse=state_emulation_pulse,\n", ")\n", "\n", - "discrimination_results = my_session.run(my_exp)" + "integration_results = my_session.run(my_exp)" ] }, { @@ -269,8 +271,8 @@ "metadata": {}, "outputs": [], "source": [ - "s0 = discrimination_results.get_data(\"data_0\").real\n", - "s1 = discrimination_results.get_data(\"data_1\").real\n", + "s0 = integration_results.get_data(\"data_0\").real\n", + "s1 = integration_results.get_data(\"data_1\").real\n", "\n", "plt.plot(s0, \".b\")\n", "plt.plot(s1, \".r\")" diff --git a/examples/01_qubit_characterization/99_basic_experiments_shfsg_shfqa_shfqc_hdawg.ipynb b/examples/01_qubit_characterization/99_basic_experiments_shfsg_shfqa_shfqc_hdawg.ipynb index 34c536b..2ae0c7a 100644 --- a/examples/01_qubit_characterization/99_basic_experiments_shfsg_shfqa_shfqc_hdawg.ipynb +++ b/examples/01_qubit_characterization/99_basic_experiments_shfsg_shfqa_shfqc_hdawg.ipynb @@ -43,8 +43,7 @@ "id": "1423f3d9", "metadata": {}, "source": [ - "
\n", - "Remark: The spectroscopy experiments in this notebook rely on the real-time frequency sweep functionality of the Zurich Instruments SHF-line devices. [Another example notebook](https://github.com/zhinst/laboneq/blob/main/examples/spectroscopy_uhfqa_hdawg.ipynb) explains how to run spectroscopy experiments also with the UHFQA (resonator spectroscopy) and HDAWG (qubit spectroscopy).
" + "**Remark:** The spectroscopy experiments in this notebook rely on the real-time frequency sweep functionality of the Zurich Instruments SHF-line devices. [Another example notebook](https://github.com/zhinst/laboneq/blob/main/examples/spectroscopy_uhfqa_hdawg.ipynb) explains how to run spectroscopy experiments also with the UHFQA (resonator spectroscopy) and HDAWG (qubit spectroscopy)." ] }, { diff --git a/examples/02_advanced_qubit_experiments/01_randomized_benchmarking.ipynb b/examples/02_advanced_qubit_experiments/01_randomized_benchmarking.ipynb index b5f22af..6a96b3b 100644 --- a/examples/02_advanced_qubit_experiments/01_randomized_benchmarking.ipynb +++ b/examples/02_advanced_qubit_experiments/01_randomized_benchmarking.ipynb @@ -481,7 +481,8 @@ " )\n", " # relax time after readout - for qubit relaxation to groundstate and signal processing\n", " with exp_rb.section(length=1e-6):\n", - " exp_rb.reserve(signal=\"measure\")" + " exp_rb.reserve(signal=\"measure\")\n", + " exp_rb.reserve(signal=\"drive\")" ] }, { diff --git a/laboneq/VERSION.txt b/laboneq/VERSION.txt index f1270b4..da26612 100644 --- a/laboneq/VERSION.txt +++ b/laboneq/VERSION.txt @@ -1 +1 @@ -2.22.0 \ No newline at end of file +2.23.0 \ No newline at end of file diff --git a/laboneq/compiler/code_generator/code_generator.py b/laboneq/compiler/code_generator/code_generator.py index 27a500c..efad278 100644 --- a/laboneq/compiler/code_generator/code_generator.py +++ b/laboneq/compiler/code_generator/code_generator.py @@ -1651,39 +1651,45 @@ def _sample_pulses( ) has_q = True - if "samples_marker1" in sampled_pulse: - if ( - pulse_part.channel == 1 - and not multi_iq_signal - and not device_type == DeviceType.SHFQA - ): - raise LabOneQException( - f"Marker 1 not supported on channel 2 of multiplexed RF signal {signal_id}" + # RF case + if pulse_part.channel is not None: + if "samples_marker1" in sampled_pulse and pulse_part.channel == 0: + self.stencil_samples( + pulse_part.start, + sampled_pulse["samples_marker1"], + samples_marker1, ) + has_marker1 = True - self.stencil_samples( - pulse_part.start, - sampled_pulse["samples_marker1"], - samples_marker1, - ) - has_marker1 = True + # map user facing marker1 to "internal" marker 2 + if "samples_marker1" in sampled_pulse and pulse_part.channel == 1: + self.stencil_samples( + pulse_part.start, + sampled_pulse["samples_marker1"], + samples_marker2, + ) + has_marker2 = True - if "samples_marker2" in sampled_pulse: - if ( - pulse_part.channel == 0 - and not multi_iq_signal - and not device_type == DeviceType.SHFQA - ): + if "samples_marker2" in sampled_pulse and pulse_part.channel == 1: raise LabOneQException( - f"Marker 2 not supported on channel 1 of multiplexed RF signal {signal_id}" + f"Marker 2 not supported on channel 1 of multiplexed RF signal {signal_id}. Please use marker 1" + ) + else: + if "samples_marker1" in sampled_pulse: + self.stencil_samples( + pulse_part.start, + sampled_pulse["samples_marker1"], + samples_marker1, ) + has_marker1 = True - self.stencil_samples( - pulse_part.start, - sampled_pulse["samples_marker2"], - samples_marker2, - ) - has_marker2 = True + if "samples_marker2" in sampled_pulse: + self.stencil_samples( + pulse_part.start, + sampled_pulse["samples_marker2"], + samples_marker2, + ) + has_marker2 = True pm = signature_pulse_map.get(pulse_def.uid) if pm is None: diff --git a/laboneq/compiler/experiment_access/experiment_dao.py b/laboneq/compiler/experiment_access/experiment_dao.py index e602761..4f8b416 100644 --- a/laboneq/compiler/experiment_access/experiment_dao.py +++ b/laboneq/compiler/experiment_access/experiment_dao.py @@ -344,7 +344,6 @@ def validate_experiment(self): ) == DeviceType.HDAWG and len(section_pulse.signal.channels) == 1 - and section_pulse.signal.channels[0] % 2 == 0 and any( "marker2" == m.marker_selector for m in section_pulse.markers @@ -352,24 +351,7 @@ def validate_experiment(self): ): raise LabOneQException( f"Single channel RF Pulse {pulse_id} referenced in section {section_id}" - f" has marker 2 enabled but is to be played on channel {section_pulse.signal.channels[0]+1}. Please use marker 1" - ) - - if ( - DeviceType.from_device_info_type( - section_pulse.signal.device.device_type - ) - == DeviceType.HDAWG - and len(section_pulse.signal.channels) == 1 - and section_pulse.signal.channels[0] % 2 == 1 - and any( - "marker1" == m.marker_selector - for m in section_pulse.markers - ) - ): - raise LabOneQException( - f"Single channel RF Pulse {pulse_id} referenced in section {section_id}" - f" has marker 1 enabled but is to be played on channel {section_pulse.signal.channels[0]+1}. Please use marker 2" + f" has marker 2 enabled. Please only use marker 1 on RF channels." ) if ( diff --git a/laboneq/compiler/workflow/compiler.py b/laboneq/compiler/workflow/compiler.py index 73e3ca3..2e19d02 100644 --- a/laboneq/compiler/workflow/compiler.py +++ b/laboneq/compiler/workflow/compiler.py @@ -201,7 +201,8 @@ def _analyze_setup(self): _logger.debug("Using desktop setup configuration with leader %s", leader) - if has_hdawg or has_shfsg and not has_shfqa: + # TODO: Check if this is needed for standalone QC, where only SG part is used + if has_hdawg or (standalone_qc is True and has_shfsg and not has_shfqa): has_signal_on_awg_0_of_leader = False for signal_id in self._experiment_dao.signals(): signal_info = self._experiment_dao.signal_info(signal_id) @@ -319,7 +320,7 @@ def _get_total_rounded_delay(delay, signal_id, device_type, sampling_rate): def _calc_integration_unit_allocation(dao: ExperimentDAO): integration_unit_allocation: dict[str, dict] = {} - integration_signals = [ + integration_signals: list[SignalInfo] = [ signal_info for signal in dao.signals() if (signal_info := dao.signal_info(signal)).type @@ -357,7 +358,6 @@ def _calc_integration_unit_allocation(dao: ExperimentDAO): or is_spectroscopy(dao.acquisition_type) else 1 ) - integration_unit_allocation[signal_info.uid] = { "device_id": signal_info.device.uid, "awg_nr": awg_nr, @@ -799,8 +799,7 @@ def calc_outputs(self, signal_delays: SignalDelays): raise RuntimeError("Only marker1 supported on SHFSG") if signal_info.type == SignalInfoType.RF: if device_type == DeviceType.HDAWG: - marker_key = channel % 2 + 1 - if f"marker{marker_key}" in markers: + if "marker1" in markers: output["marker_mode"] = "MARKER" if triggers is not None: if signal_info.type == SignalInfoType.IQ: @@ -842,16 +841,33 @@ def calc_outputs(self, signal_delays: SignalDelays): def calc_inputs(self, signal_delays: SignalDelays): all_channels = {} + ports_delays_raw_shfqa = set() for signal_id in self._experiment_dao.signals(): - signal_info = self._experiment_dao.signal_info(signal_id) + signal_info: SignalInfo = self._experiment_dao.signal_info(signal_id) if signal_info.type != SignalInfoType.INTEGRATION: continue - lo_frequency = self._experiment_dao.lo_frequency(signal_id) - signal_range = self._experiment_dao.signal_range(signal_id) - port_delay = self._experiment_dao.port_delay(signal_id) + # SHFQA scope delay cannot be set for individual channels + if ( + self._experiment_dao.acquisition_type == AcquisitionType.RAW + and port_delay is not None + ): + device_type = DeviceType.from_device_info_type( + signal_info.device.device_type + ) + if device_type == device_type.SHFQA: + ports_delays_raw_shfqa.add( + port_delay.uid + if isinstance(port_delay, ParameterInfo) + else port_delay + ) + if len(ports_delays_raw_shfqa) > 1: + msg = f"{signal_info.device.uid}: Multiple different `port_delay`s defined for SHFQA acquisition signals in `AcquisitionType.RAW` mode. Only 1 supported." + raise LabOneQException(msg) + lo_frequency = self._experiment_dao.lo_frequency(signal_id) + signal_range = self._experiment_dao.signal_range(signal_id) port_mode = self._experiment_dao.port_mode(signal_id) scheduler_port_delay: float = 0.0 diff --git a/laboneq/contrib/example_helpers/feedback_helper.py b/laboneq/contrib/example_helpers/feedback_helper.py index 59ef2dc..28bf488 100755 --- a/laboneq/contrib/example_helpers/feedback_helper.py +++ b/laboneq/contrib/example_helpers/feedback_helper.py @@ -117,6 +117,9 @@ def exp(): return exp() +create_integration_verification_experiment = create_discrimination_experiment + + def gaussian_envelope( centre=50e-9, sigma=20e-9, start_time=0, stop_time=100e-9, sampling_rate=2e9 ): diff --git a/laboneq/contrib/example_helpers/plotting/plot_helpers.py b/laboneq/contrib/example_helpers/plotting/plot_helpers.py index 7650101..7127fa9 100644 --- a/laboneq/contrib/example_helpers/plotting/plot_helpers.py +++ b/laboneq/contrib/example_helpers/plotting/plot_helpers.py @@ -10,28 +10,77 @@ import re from typing import List +import matplotlib + # additional imports for plotting import matplotlib.pyplot as plt # numpy for mathematics import numpy as np -from matplotlib import cycler, style from laboneq.core.types.compiled_experiment import CompiledExperiment from laboneq.simulator.output_simulator import OutputSimulator -# Zurich Instruments style plotting -style.use("default") -plt.rcParams.update( - { - "font.weight": "light", - "axes.labelweight": "light", - "axes.titleweight": "normal", - "axes.prop_cycle": cycler(color=["#006699", "#FF0000", "#66CC33", "#CC3399"]), - "svg.fonttype": "none", # Make text editable in SVG - "text.usetex": False, - } -) + +def zi_mpl_theme(): + # Zurich Instruments style plotting + return matplotlib.rc_context( + { + "font.weight": "light", + "axes.labelweight": "light", + "axes.titleweight": "normal", + "axes.prop_cycle": matplotlib.cycler( + color=["#006699", "#FF0000", "#66CC33", "#CC3399"] + ), + "svg.fonttype": "none", # Make text editable in SVG + "text.usetex": False, + } + ) + + +def _integration_weights_by_signal( + compiled_experiment: CompiledExperiment +) -> dict[str, np.ndarray]: + rt_step_by_awg = {} + assert compiled_experiment.scheduled_experiment is not None + for ( + rt_init + ) in compiled_experiment.scheduled_experiment.recipe.realtime_execution_init: + key = (rt_init.device_id, rt_init.awg_id) + if key not in rt_step_by_awg: + rt_step_by_awg[key] = rt_init.kernel_indices_ref + kernel_indices_ref = set(rt_step_by_awg.values()) + kernel_name_by_signal = {} + assert hasattr( + compiled_experiment.scheduled_experiment.artifacts, "integration_weights" + ) + for iw in compiled_experiment.scheduled_experiment.artifacts.integration_weights: + if iw["filename"] not in kernel_indices_ref: + continue + + # discard all but the first kernel in case of MSD + kernel_name_by_signal.update({k: v[0] for k, v in iw["signals"].items()}) + + kernel_samples_by_signal: dict[str, np.ndarray] = {} + for signal, kernel in kernel_name_by_signal.items(): + waveform: None | np.ndarray = None + for scale, suffix in [(1, ".wave"), (1, "_i.wave"), (1j, "_q.wave")]: + new_wf = next( + iter( + ( + w["samples"] + for w in compiled_experiment.waves + if w["filename"] == kernel + suffix + ), + ), + None, + ) + if new_wf is not None: + waveform = scale * new_wf + (waveform if waveform is not None else 0) + assert waveform is not None, "kernel not found" + kernel_samples_by_signal[signal] = waveform + + return kernel_samples_by_signal def plot_simulation( @@ -77,12 +126,17 @@ def plot_simulation( else: mapped_signals = signals + kernel_samples = _integration_weights_by_signal(compiled_experiment) + xs = [] y1s = [] labels1 = [] y2s = [] labels2 = [] titles = [] + + physical_channels = {} + # extract physical channel info for each logical signal for signal in mapped_signals: mapped_path = compiled_experiment.experiment.signals[ signal @@ -98,46 +152,82 @@ def plot_simulation( .physical_channel ) + if physical_channel_path not in physical_channels.keys(): + physical_channels[physical_channel_path] = {"signals": [signal]} + else: + physical_channels[physical_channel_path]["signals"].append(signal) + + # simulate output only once for each physical channel + for channel, channel_values in physical_channels.items(): my_snippet = simulation.get_snippet( - compiled_experiment.device_setup.logical_signal_groups[signal_group_name] - .logical_signals[signal_line_name] - .physical_channel, + channel, start=start_time, output_length=length, - channel_type=physical_channel_path.type, + channel_type=channel.type, get_trigger=True, get_marker=True, get_frequency=True, ) - physcial_channel = ( - compiled_experiment.device_setup.logical_signal_groups[signal_group_name] - .logical_signals[signal_line_name] - .physical_channel.uid.replace("_", " ") - .replace("/", ": ") - ) + physical_channel_name = channel.uid.replace("_", " ").replace("/", ": ") + + signal_names = "-".join(channel_values["signals"]) - if "iq_channel" in str( - physical_channel_path.type - ).lower() and "input" not in str(physical_channel_path.name): + if ( + "iq_channel" in str(channel.type).lower() + and "input" not in channel.name + and "qas_0_1" not in channel.name + ): try: if my_snippet.time is not None: xs.append(my_snippet.time) y1s.append(my_snippet.wave.real) - labels1.append(f"{signal} I") + labels1.append(f"{physical_channel_name} - I") y2s.append(my_snippet.wave.imag) - labels2.append(f"{signal} Q") + labels2.append(f"{physical_channel_name} - Q") - titles.append(f"{physcial_channel} - {signal}".upper()) + titles.append(f"{physical_channel_name} - {signal_names}".upper()) except Exception: pass - if ( - "iq_channel" not in str(physical_channel_path.type).lower() - or "input" in physical_channel_path.name - ): + elif "input" in channel.name or "qas_0_1" in channel.name: + try: + if my_snippet.time is not None: + this_kernel_samples = kernel_samples[signal] + trigger_indices = np.argwhere(my_snippet.wave).flatten() + # known issue: the simulator does not extend the QA trigger + # waveform past the last trigger, so we make the new waveform longer + if len(trigger_indices) and trigger_indices[-1] + len( + this_kernel_samples + ) > len(my_snippet.wave): + dt: float = my_snippet.time[1] - my_snippet.time[0] # type: ignore + waveform = np.zeros( + trigger_indices[-1] + len(this_kernel_samples), + dtype=np.complex128, + ) + time = dt * np.arange(len(waveform)) + my_snippet.time[0] # type: ignore + else: + waveform = np.zeros_like(my_snippet.wave, dtype=np.complex128) + time = my_snippet.time + + for i in trigger_indices: + waveform[i : i + len(this_kernel_samples)] = this_kernel_samples + + xs.append(time) + + y1s.append(waveform.real) + labels1.append(f"{physical_channel_name} - I") + + y2s.append(waveform.imag) + labels2.append(f"{physical_channel_name} - Q") + + titles.append(f"{physical_channel_name} - {signal_names}".upper()) + + except Exception: + pass + elif "iq_channel" not in str(channel.type).lower(): try: if my_snippet.time is not None: time_length = len(my_snippet.time) @@ -145,22 +235,19 @@ def plot_simulation( xs.append(my_snippet.time) y1s.append(my_snippet.wave.real) - labels1.append(f"{signal}") + labels1.append(f"{physical_channel_name}") - titles.append(f"{physcial_channel} - {signal}".upper()) + titles.append(f"{physical_channel_name} - {signal_names}".upper()) - empty_array = np.empty((1, time_length)) - empty_array.fill(np.nan) - y2s.append(empty_array[0]) + empty_array = np.full(time_length, np.nan) + y2s.append(empty_array) labels2.append(None) - except Exception: pass - - if ( - "qa" not in str(physical_channel_path.name) + elif ( + "qa" not in str(channel.name) and np.sum(my_snippet.trigger) != 0 - and f"{physcial_channel} - Trigger".upper() not in titles + and f"{physical_channel_name} - Trigger".upper() not in titles ): try: if my_snippet.time is not None: @@ -169,13 +256,12 @@ def plot_simulation( xs.append(my_snippet.time) y1s.append(my_snippet.trigger) - labels1.append(f"{signal} - Trigger") + labels1.append(f"{physical_channel_name} - Trigger") - titles.append(f"{physcial_channel} - Trigger".upper()) + titles.append(f"{physical_channel_name} - Trigger".upper()) - empty_array = np.empty((1, time_length)) - empty_array.fill(np.nan) - y2s.append(empty_array[0]) + empty_array = np.full(time_length, np.nan) + y2s.append(empty_array) labels2.append(None) except Exception: @@ -189,73 +275,76 @@ def plot_simulation( xs.append(my_snippet.time) y1s.append(my_snippet.marker.real) - labels1.append(f"{signal} - Marker 1") + labels1.append(f"{physical_channel_name} - Marker 1") if np.any(my_snippet.marker.imag): y2s.append(my_snippet.marker.imag) - labels2.append(f"{signal} - Marker 2") + labels2.append(f"{physical_channel_name} - Marker 2") else: - empty_array = np.empty((1, time_length)) - empty_array.fill(np.nan) - y2s.append(empty_array[0]) + empty_array = np.full(time_length, np.nan) labels2.append(None) + y2s.append(empty_array) - titles.append(f"{physcial_channel} - {signal} - Marker".upper()) + titles.append( + f"{physical_channel_name} - {signal_names} - Marker".upper() + ) except Exception: pass + raise - fig, axes = plt.subplots( - nrows=len(y1s), - sharex=False, - figsize=(plot_width, len(y1s) * plot_height), - ) - - colors = plt.rcParams["axes.prop_cycle"]() + with zi_mpl_theme(): + fig, axes = plt.subplots( + nrows=len(y1s), + sharex=False, + figsize=(plot_width, len(y1s) * plot_height), + ) - if len(xs) > 1: - for axs, x, y1, y2, label1, label2, title in zip( - axes.flat, xs, y1s, y2s, labels1, labels2, titles - ): - # Get the next color from the cycler - c = next(colors)["color"] - axs.plot(x, y1, label=label1, color=c) - c = next(colors)["color"] - axs.plot(x, y2, label=label2, color=c) - axs.set_ylabel(yaxis_label) - axs.set_xlabel(xaxis_label) - axs.set_title(title) - axs.legend(bbox_to_anchor=(1.0, 1.0)) - axs.ticklabel_format(axis="both", style="sci", scilimits=(0, 0)) - axs.grid(True) - - elif len(xs) == 1: - for x, y1, y2, label1, label2, title in zip( - xs, y1s, y2s, labels1, labels2, titles - ): - # Get the next color from the cycler - c = next(colors)["color"] - axes.plot(x, y1, label=label1, color=c) - c = next(colors)["color"] - axes.plot(x, y2, label=label2, color=c) - axes.set_ylabel(yaxis_label) - axes.set_xlabel(xaxis_label) - axes.set_title(title) - axes.legend(bbox_to_anchor=(1.0, 1.0)) - axes.ticklabel_format(axis="both", style="sci", scilimits=(0, 0)) - axes.grid(True) - - # enforce same x-axis scale for all plots - if hasattr(axes, "__iter__"): - for ax in axes: - ax.set_xlim(start_time, length) - - fig.tight_layout() - - if save is True: - fig.savefig(f"{filename}.{filetype}", format=f"{filetype}") - # fig.legend(loc="upper left") - plt.show() + colors = plt.rcParams["axes.prop_cycle"]() + + if len(xs) > 1: + for axs, x, y1, y2, label1, label2, title in zip( + axes.flat, xs, y1s, y2s, labels1, labels2, titles + ): + # Get the next color from the cycler + c = next(colors)["color"] + axs.plot(x, y1, label=label1, color=c) + c = next(colors)["color"] + axs.plot(x, y2, label=label2, color=c) + axs.set_ylabel(yaxis_label) + axs.set_xlabel(xaxis_label) + axs.set_title(title) + axs.legend(bbox_to_anchor=(1.0, 1.0)) + axs.ticklabel_format(axis="both", style="sci", scilimits=(0, 0)) + axs.grid(True) + + elif len(xs) == 1: + for x, y1, y2, label1, label2, title in zip( + xs, y1s, y2s, labels1, labels2, titles + ): + # Get the next color from the cycler + c = next(colors)["color"] + axes.plot(x, y1, label=label1, color=c) + c = next(colors)["color"] + axes.plot(x, y2, label=label2, color=c) + axes.set_ylabel(yaxis_label) + axes.set_xlabel(xaxis_label) + axes.set_title(title) + axes.legend(bbox_to_anchor=(1.0, 1.0)) + axes.ticklabel_format(axis="both", style="sci", scilimits=(0, 0)) + axes.grid(True) + + # enforce same x-axis scale for all plots + if hasattr(axes, "__iter__"): + for ax in axes: + ax.set_xlim(start_time, start_time + length) + + fig.tight_layout() + + if save is True: + fig.savefig(f"{filename}.{filetype}", format=f"{filetype}") + # fig.legend(loc="upper left") + plt.show() # general result plotting @@ -265,139 +354,143 @@ def plot_results( plot_width=6, plot_height=2, ): - handles = results.acquired_results.keys() - - for handle in handles: - axis_name_list = [k for k in results.get_axis_name(handle)] - acquired_data = results.get_data(handle) - if len(axis_name_list) == 1 and phase is False: - axis_grid = results.get_axis(handle)[0] - axis_name = results.get_axis_name(handle)[0] - plt.figure(figsize=(plot_width, plot_height)) - plt.plot(axis_grid, np.absolute(acquired_data)) - plt.xlabel(axis_name) - plt.ylabel("Amplitude (a.u.)") - plt.title(f"Handle: {handle}") - plt.show() - - elif len(axis_name_list) == 1 and phase is True: - axis_grid = results.get_axis(handle)[0] - axis_name = results.get_axis_name(handle)[0] - - fig, [ax1, ax2] = plt.subplots(2, 1, figsize=(plot_width, plot_height)) - - ax1.set_title(f"Handle: {handle}") - ax1.plot(axis_grid, abs(acquired_data), ".k") - ax2.plot(axis_grid, np.unwrap(np.angle(acquired_data))) - ax1.set_ylabel("Amplitude (a.u)") - ax2.set_ylabel("$\\phi$ (rad)") - ax2.set_xlabel(axis_name) - fig.tight_layout() - plt.show() - - elif len(axis_name_list) == 2 and phase is False: - axis_1 = results.get_axis(handle)[1] - axis_1_name = results.get_axis_name(handle)[1] - axis_0 = results.get_axis(handle)[0] - axis_0_name = results.get_axis_name(handle)[0] - data = results.get_data(handle) - - X, Y = np.meshgrid(axis_1, axis_0) - fig, ax = plt.subplots(nrows=1, ncols=1, constrained_layout=True) - - CS = ax.contourf(X, Y, np.abs(data), levels=100, cmap="magma") - ax.set_title(f"{handle}") - ax.set_xlabel(axis_1_name) - ax.set_ylabel(axis_0_name) - cbar = fig.colorbar(CS) - cbar.set_label("Amplitude (a.u.)") - - elif len(axis_name_list) == 2 and phase is True: - axis_1 = results.get_axis(handle)[1] - axis_1_name = results.get_axis_name(handle)[1] - axis_0 = results.get_axis(handle)[0] - axis_0_name = results.get_axis_name(handle)[0] - data = results.get_data(handle) - - X, Y = np.meshgrid(axis_1, axis_0) - fig, ax = plt.subplots(nrows=1, ncols=2, constrained_layout=True) - - CS = ax[0].contourf(X, Y, np.abs(data), levels=100, cmap="magma") - plt.suptitle(f"Handle: {handle}") - ax[0].set_xlabel(axis_1_name) - ax[0].set_ylabel(axis_0_name) - cbar = fig.colorbar(CS) - cbar.set_label("Amplitude (a.u.)") - - cs2_max_value = ( - max( - int(np.abs(np.min(np.unwrap(np.angle(data, deg=True))))), - int(np.abs(np.max(np.unwrap(np.angle(data, deg=True))))), + with zi_mpl_theme(): + handles = results.acquired_results.keys() + + for handle in handles: + axis_name_list = [k for k in results.get_axis_name(handle)] + acquired_data = results.get_data(handle) + if len(axis_name_list) == 1 and phase is False: + axis_grid = results.get_axis(handle)[0] + axis_name = results.get_axis_name(handle)[0] + plt.figure(figsize=(plot_width, plot_height)) + plt.plot(axis_grid, np.absolute(acquired_data)) + plt.xlabel(axis_name) + plt.ylabel("Amplitude (a.u.)") + plt.title(f"Handle: {handle}") + plt.show() + + elif len(axis_name_list) == 1 and phase is True: + axis_grid = results.get_axis(handle)[0] + axis_name = results.get_axis_name(handle)[0] + + fig, [ax1, ax2] = plt.subplots(2, 1, figsize=(plot_width, plot_height)) + + ax1.set_title(f"Handle: {handle}") + ax1.plot(axis_grid, abs(acquired_data), ".k") + ax2.plot(axis_grid, np.unwrap(np.angle(acquired_data))) + ax1.set_ylabel("Amplitude (a.u)") + ax2.set_ylabel("$\\phi$ (rad)") + ax2.set_xlabel(axis_name) + fig.tight_layout() + plt.show() + + elif len(axis_name_list) == 2 and phase is False: + axis_1 = results.get_axis(handle)[1] + axis_1_name = results.get_axis_name(handle)[1] + axis_0 = results.get_axis(handle)[0] + axis_0_name = results.get_axis_name(handle)[0] + data = results.get_data(handle) + + X, Y = np.meshgrid(axis_1, axis_0) + fig, ax = plt.subplots(nrows=1, ncols=1, constrained_layout=True) + + CS = ax.contourf(X, Y, np.abs(data), levels=100, cmap="magma") + ax.set_title(f"{handle}") + ax.set_xlabel(axis_1_name) + ax.set_ylabel(axis_0_name) + cbar = fig.colorbar(CS) + cbar.set_label("Amplitude (a.u.)") + + elif len(axis_name_list) == 2 and phase is True: + axis_1 = results.get_axis(handle)[1] + axis_1_name = results.get_axis_name(handle)[1] + axis_0 = results.get_axis(handle)[0] + axis_0_name = results.get_axis_name(handle)[0] + data = results.get_data(handle) + + X, Y = np.meshgrid(axis_1, axis_0) + fig, ax = plt.subplots(nrows=1, ncols=2, constrained_layout=True) + + CS = ax[0].contourf(X, Y, np.abs(data), levels=100, cmap="magma") + plt.suptitle(f"Handle: {handle}") + ax[0].set_xlabel(axis_1_name) + ax[0].set_ylabel(axis_0_name) + cbar = fig.colorbar(CS) + cbar.set_label("Amplitude (a.u.)") + + cs2_max_value = ( + max( + int(np.abs(np.min(np.unwrap(np.angle(data, deg=True))))), + int(np.abs(np.max(np.unwrap(np.angle(data, deg=True))))), + ) + + 1 ) - + 1 - ) - cs2_levels = np.linspace( - -cs2_max_value, cs2_max_value, 2 * (cs2_max_value) + 1 - ) + cs2_levels = np.linspace( + -cs2_max_value, cs2_max_value, 2 * (cs2_max_value) + 1 + ) - CS2 = ax[1].contourf( - X, - Y, - np.unwrap(np.angle(data, deg=True)), - levels=cs2_levels, - cmap="twilight_shifted", - ) - # ax[1].set_title("Phase") - ax[1].set_xlabel(axis_1_name) - ax[1].set_ylabel(axis_0_name) - cbar2 = fig.colorbar(CS2) - cbar2.set_label("$\\phi$ (deg)") + CS2 = ax[1].contourf( + X, + Y, + np.unwrap(np.angle(data, deg=True)), + levels=cs2_levels, + cmap="twilight_shifted", + ) + # ax[1].set_title("Phase") + ax[1].set_xlabel(axis_1_name) + ax[1].set_ylabel(axis_0_name) + cbar2 = fig.colorbar(CS2) + cbar2.set_label("$\\phi$ (deg)") - elif len(axis_name_list) > 2: - print("Too many dimensions. I don't know how to plot your data!") + elif len(axis_name_list) > 2: + print("Too many dimensions. I don't know how to plot your data!") # 2D plot def plot_result_2d(results, handle, mult_axis=None): - plt.figure() - acquired_data = results.get_data(handle) - if mult_axis is None: - axis_grid = results.get_axis(handle)[0] - axis_name = results.get_axis_name(handle)[0] - else: - axis_grid = results.get_axis(handle)[0][mult_axis] - axis_name = results.get_axis_name(handle)[0][mult_axis] + with zi_mpl_theme(): + plt.figure() + acquired_data = results.get_data(handle) + if mult_axis is None: + axis_grid = results.get_axis(handle)[0] + axis_name = results.get_axis_name(handle)[0] + else: + axis_grid = results.get_axis(handle)[0][mult_axis] + axis_name = results.get_axis_name(handle)[0][mult_axis] - plt.plot(axis_grid, np.absolute(acquired_data)) - plt.xlabel(axis_name) - plt.ylabel(handle) + plt.plot(axis_grid, np.absolute(acquired_data)) + plt.xlabel(axis_name) + plt.ylabel(handle) # 3D plot def plot_result_3d(results, handle): - plt.figure() - acquired_data = results.get_data(handle) - y_axis_grid = results.get_axis(handle)[0] - y_axis_name = results.get_axis_name(handle)[0] - x_axis_grid = results.get_axis(handle)[1] - x_axis_name = results.get_axis_name(handle)[1] + with zi_mpl_theme(): + plt.figure() + acquired_data = results.get_data(handle) + y_axis_grid = results.get_axis(handle)[0] + y_axis_name = results.get_axis_name(handle)[0] + x_axis_grid = results.get_axis(handle)[1] + x_axis_name = results.get_axis_name(handle)[1] - X, Y = np.meshgrid(x_axis_grid, y_axis_grid) + X, Y = np.meshgrid(x_axis_grid, y_axis_grid) - ax = plt.axes(projection="3d") - ax.plot_wireframe(X, Y, np.absolute(acquired_data)) - ax.set_xlabel(x_axis_name) - ax.set_ylabel(y_axis_name) - ax.set_zlabel(handle) + ax = plt.axes(projection="3d") + ax.plot_wireframe(X, Y, np.absolute(acquired_data)) + ax.set_xlabel(x_axis_name) + ax.set_ylabel(y_axis_name) + ax.set_zlabel(handle) - plt.figure() # Create new dummy figure to ensure no side effects of the current 3D figure + plt.figure() # Create new dummy figure to ensure no side effects of the current 3D figure def plot2d_abs(results, handle): - data = results.get_data(handle) - axis = results.get_axis(handle)[0] - xlabel = results.get_axis_name(handle)[0] - plt.plot(axis, np.abs(data)) - plt.xlabel(xlabel) - plt.ylabel("level") + with zi_mpl_theme(): + data = results.get_data(handle) + axis = results.get_axis(handle)[0] + xlabel = results.get_axis_name(handle)[0] + plt.plot(axis, np.abs(data)) + plt.xlabel(xlabel) + plt.ylabel("level") diff --git a/laboneq/controller/communication.py b/laboneq/controller/communication.py index 9ab0d25..2b07328 100644 --- a/laboneq/controller/communication.py +++ b/laboneq/controller/communication.py @@ -14,6 +14,7 @@ import numpy as np import zhinst.core from zhinst.toolkit import Session as TKSession +from laboneq.controller.devices.device_utils import calc_dev_type from laboneq.controller.devices.zi_emulator import ziDAQServerEmulator from laboneq.controller.devices.zi_node_monitor import NodeMonitor @@ -78,6 +79,8 @@ def __str__(self): class ZiApiWrapperBase(ABC): + use_filenames_for_blobs: bool = True + def __init__(self, name): self._name = name self._node_logger = logging.getLogger("node.log") @@ -97,10 +100,27 @@ def _log_node(self, msg): def _log_set(self, method_name: str, daq_action): path = daq_action.path - value = daq_action.value if daq_action.filename is None else daq_action.filename + if self.use_filenames_for_blobs or isinstance(daq_action.value, bytes): + value = ( + daq_action.value if daq_action.filename is None else daq_action.filename + ) + else: + value = daq_action.value if isinstance(value, np.ndarray): - value = str(np.array2string(value, threshold=30)) + array_repr = np.array2string( + value, + threshold=30, + max_line_width=1000, + floatmode="maxprec", + precision=3, + edgeitems=16, + ) + if "..." in array_repr: + value = f"array({array_repr}, shape={value.shape})" + else: + value = array_repr + elif isinstance(value, (list, tuple)): value = str(value) if isinstance(value, str): @@ -129,7 +149,7 @@ def _actions_to_set_api_input( ) -> list[list[Any]]: return [[action.path, action.value] for action in daq_actions] - async def batch_set(self, daq_actions: list[DaqNodeAction]): + async def batch_set(self, daq_actions: list[DaqNodeSetAction]): """Set the list of nodes in one call to API Parameters: @@ -139,9 +159,6 @@ async def batch_set(self, daq_actions: list[DaqNodeAction]): when all nodes are set """ - if not isinstance(daq_actions, list): - raise LabOneQControllerException("List expected") - node_list = [daq_action.path for daq_action in daq_actions] _logger.debug("Batch set node list: %s", node_list) @@ -386,50 +403,22 @@ def __init__(self, name, server_qualifier: ServerQualifier = None): assert server_qualifier.dry_run is True super().__init__(name, server_qualifier) - def map_device_type(self, device_qualifier: DeviceQualifier): - assert isinstance(self._zi_api_object, ziDAQServerEmulator) - - def calc_dev_type(device_qualifier: DeviceQualifier) -> str: - if device_qualifier.options.is_qc is True: - return "SHFQC" - else: - return device_qualifier.driver - self._zi_api_object.map_device_type( - device_qualifier.options.serial, calc_dev_type(device_qualifier) - ) - self._zi_api_object.set_option( - device_qualifier.options.serial, - "dev_type", - device_qualifier.options.dev_type, - ) - if device_qualifier.options.expected_installed_options is not None: - exp_opts = ( - device_qualifier.options.expected_installed_options.upper().split("/") - ) - if len(exp_opts) > 0 and exp_opts[0] == "": - exp_opts.pop(0) - if len(exp_opts) > 0: - self._zi_api_object.set_option( - device_qualifier.options.serial, - "features/devtype", - exp_opts.pop(0), - ) - self._zi_api_object.set_option( - device_qualifier.options.serial, - "features/options", - "\n".join(exp_opts), - ) - - def set_emulation_option(self, serial: str, option: str, value: Any): - assert isinstance(self._zi_api_object, ziDAQServerEmulator) - self._zi_api_object.set_option(serial, option, value) +def map_device_type(daq: Any, device_qualifier: DeviceQualifier): + assert isinstance(daq, ziDAQServerEmulator) + daq.map_device_type( + device_qualifier.options.serial, calc_dev_type(device_qualifier) + ) -async def batch_set(all_actions: List[DaqNodeAction]): - split_actions: Dict[DaqWrapper, List[DaqNodeAction]] = {} +async def batch_set(all_actions: List[DaqNodeSetAction]): + split_actions: Dict[DaqWrapper, List[DaqNodeSetAction]] = {} for daq_action in all_actions: daq_actions = split_actions.setdefault(daq_action.daq, []) daq_actions.append(daq_action) for daq, daq_actions in split_actions.items(): await daq.batch_set(daq_actions) + + +async def batch_set_multiple(results: list[list[DaqNodeSetAction]]): + await batch_set([value for values in results for value in values]) diff --git a/laboneq/controller/controller.py b/laboneq/controller/controller.py index 5c65a5f..3b63e14 100644 --- a/laboneq/controller/controller.py +++ b/laboneq/controller/controller.py @@ -3,14 +3,11 @@ from __future__ import annotations -import concurrent.futures import itertools import logging -import os import time from collections import defaultdict from copy import deepcopy -from dataclasses import dataclass from typing import TYPE_CHECKING, Any, Callable import numpy as np @@ -20,20 +17,17 @@ from laboneq import __version__ from laboneq._observability import tracing from laboneq.controller.communication import ( - DaqNodeAction, DaqNodeSetAction, DaqWrapper, batch_set, + batch_set_multiple, ) from laboneq.controller.devices.async_support import gather_and_apply from laboneq.controller.devices.device_collection import DeviceCollection -from laboneq.controller.devices.device_uhfqa import DeviceUHFQA -from laboneq.controller.devices.device_zi import Waveforms, IntegrationWeights +from laboneq.controller.devices.device_zi import DeviceZI from laboneq.controller.devices.zi_node_monitor import ResponseWaiter from laboneq.controller.near_time_runner import NearTimeRunner -from laboneq.controller.pipeliner_reload_tracker import PipelinerReloadTracker from laboneq.controller.recipe_processor import ( - AwgKey, RecipeData, RtExecutionInfo, pre_process_compiled, @@ -51,10 +45,8 @@ from laboneq.data.recipe import NtStepKey from laboneq.executor.execution_from_experiment import ExecutionFactoryFromExperiment from laboneq.executor.executor import Statement -from laboneq.controller.devices.device_pretty_printer import DevicePRETTYPRINTER if TYPE_CHECKING: - from laboneq.controller.devices.device_zi import DeviceZI from laboneq.core.types import CompiledExperiment from laboneq.data.execution_payload import ExecutionPayload from laboneq.dsl.experiment.pulse import Pulse @@ -85,22 +77,6 @@ def _stop_controller(controller: "Controller"): controller.shut_down() -@dataclass -class _SeqCCompileItem: - awg_index: int - seqc_code: str | None = None - seqc_filename: str | None = None - elf: bytes | None = None - - -@dataclass -class _UploadItem: - seqc_item: _SeqCCompileItem | None - waves: Waveforms | None - command_table: dict[Any] | None - integration_weights: IntegrationWeights | None - - class Controller: def __init__( self, @@ -122,11 +98,10 @@ def __init__( # Waves which are uploaded to the devices via pulse replacements self._current_waves = [] self._neartime_callbacks: dict[str, Callable] = neartime_callbacks - self._nodes_from_neartime_callbacks: list[DaqNodeAction] = [] + self._nodes_from_neartime_callbacks: list[DaqNodeSetAction] = [] self._recipe_data: RecipeData = None self._session: Any = None self._results = ExperimentResults() - self._pipeliner_reload_tracker = PipelinerReloadTracker() _logger.debug("Controller created") _logger.debug("Controller debug logging is on") @@ -140,12 +115,12 @@ def _allocate_resources(self): self._devices.find_by_uid(osc_param.device_id).allocate_osc(osc_param) async def _reset_to_idle_state(self): - async with gather_and_apply(batch_set) as awaitables: + async with gather_and_apply(batch_set_multiple) as awaitables: for _, device in self._devices.all: awaitables.append(device.collect_reset_nodes()) async def _apply_recipe_initializations(self): - async with gather_and_apply(batch_set) as awaitables: + async with gather_and_apply(batch_set_multiple) as awaitables: for initialization in self._recipe_data.initializations: device = self._devices.find_by_uid(initialization.device_uid) awaitables.append( @@ -158,193 +133,41 @@ async def _apply_recipe_initializations(self): awaitables.append(device.collect_osc_initialization_nodes()) async def _set_nodes_before_awg_program_upload(self): - nodes_to_initialize = [] - for initialization in self._recipe_data.initializations: - device = self._devices.find_by_uid(initialization.device_uid) - nodes_to_initialize.extend( - device.collect_awg_before_upload_nodes( - initialization, self._recipe_data - ) - ) - await batch_set(nodes_to_initialize) - - @tracing.trace("awg-program-handler") - async def _upload_awg_programs(self, nt_step: NtStepKey, rt_section_uid: str): - # Mise en place: - awg_data: dict[DeviceZI, list[_UploadItem]] = defaultdict(list) - awgs_used: dict[DeviceZI, set[int]] = defaultdict(set) - compile_data: dict[DeviceZI, list[_SeqCCompileItem]] = defaultdict(list) - recipe_data = self._recipe_data - rt_execution_info = recipe_data.rt_execution_infos.get(rt_section_uid) - with_pipeliner = rt_execution_info.pipeliner_chunk_count is not None - acquisition_type = RtExecutionInfo.get_acquisition_type_def(rt_execution_info) - for initialization in recipe_data.initializations: - if not initialization.awgs: - continue - - device = self._devices.find_by_uid(initialization.device_uid) - if with_pipeliner and not device.has_pipeliner: - raise LabOneQControllerException( - f"{device.dev_repr}: Pipeliner is not supported by the device." - ) - - for awg_obj in initialization.awgs: - awg_index = awg_obj.awg - awgs_used[device].add(awg_index) - for pipeline_chunk in range( - rt_execution_info.pipeliner_chunk_count or 1 - ): - effective_nt_step = ( - NtStepKey(indices=tuple([*nt_step.indices, pipeline_chunk])) - if with_pipeliner - else nt_step - ) - rt_exec_step = next( - ( - r - for r in recipe_data.recipe.realtime_execution_init - if r.device_id == initialization.device_uid - and r.awg_id == awg_obj.awg - and r.nt_step == effective_nt_step - ), - None, - ) - - if rt_execution_info.pipeliner_chunk_count is None: - seqc_filename = ( - None if rt_exec_step is None else rt_exec_step.seqc_ref - ) - else: - # TODO(2K): repeated compilation of SeqC to be solved by moving it to the compile stage - ( - rt_exec_step, - seqc_filename, - ) = self._pipeliner_reload_tracker.calc_next_step( - awg_key=AwgKey( - device_uid=initialization.device_uid, - awg_index=awg_index, - ), - pipeline_chunk=pipeline_chunk, - rt_exec_step=rt_exec_step, - ) - - if rt_exec_step is None: - continue - - if isinstance(device, DevicePRETTYPRINTER): - # TODO: Temporary API - await device.prepare_artifacts( - artifacts=recipe_data.scheduled_experiment.artifacts, - channel=rt_exec_step.awg_id, - instructions_ref=rt_exec_step.seqc_ref, - waves_ref=rt_exec_step.wave_indices_ref, - ) - continue - seqc_code = device.prepare_seqc( - recipe_data.scheduled_experiment.artifacts, - rt_exec_step.seqc_ref, - ) - waves = device.prepare_waves( - recipe_data.scheduled_experiment.artifacts, - rt_exec_step.wave_indices_ref, - ) - command_table = device.prepare_command_table( - recipe_data.scheduled_experiment.artifacts, - rt_exec_step.wave_indices_ref, - ) - integration_weights = device.prepare_integration_weights( - recipe_data.scheduled_experiment.artifacts, - recipe_data.recipe.integrator_allocations, - rt_exec_step.kernel_indices_ref, - ) - - seqc_item = _SeqCCompileItem( - awg_index=awg_index, - ) - - if seqc_code is not None: - seqc_item.seqc_code = seqc_code - seqc_item.seqc_filename = seqc_filename - compile_data[device].append(seqc_item) - - awg_data[device].append( - _UploadItem( - seqc_item=seqc_item, - waves=waves, - command_table=command_table, - integration_weights=integration_weights, - ) + async with gather_and_apply(batch_set_multiple) as awaitables: + for initialization in self._recipe_data.initializations: + device = self._devices.find_by_uid(initialization.device_uid) + awaitables.append( + device.collect_awg_before_upload_nodes( + initialization, self._recipe_data ) + ) - if compile_data: - self._awg_compile(compile_data) - - # Upload AWG programs, waveforms, and command tables: - elf_node_settings: dict[DaqWrapper, list[DaqNodeSetAction]] = defaultdict(list) + async def _perform_awg_upload( + self, + results: list[ + tuple[ + DeviceZI, list[DaqNodeSetAction], list[DaqNodeSetAction], dict[str, Any] + ] + ], + ): elf_upload_conditions: dict[DaqWrapper, dict[str, Any]] = defaultdict(dict) - wf_node_settings: dict[DaqWrapper, list[DaqNodeSetAction]] = defaultdict(list) - for device, items in awg_data.items(): - if with_pipeliner: - for awg_index in awgs_used[device]: - elf_node_settings[device.daq].extend( - device.pipeliner_prepare_for_upload(awg_index) - ) - for item in items: - seqc_item = item.seqc_item - if seqc_item.elf is not None: - set_action = device.prepare_upload_elf( - seqc_item.elf, seqc_item.awg_index, seqc_item.seqc_filename - ) - node_settings = elf_node_settings[device.daq] - node_settings.append(set_action) - - if isinstance(device, DeviceUHFQA): - # UHFQA does not yet support upload of ELF and waveforms in - # a single transaction. - ready_node = device.get_sequencer_paths( - seqc_item.awg_index - ).ready - elf_upload_conditions[device.daq][ready_node] = 1 - - if isinstance(device, DeviceUHFQA): - wf_dev_nodes = wf_node_settings[device.daq] - else: - wf_dev_nodes = elf_node_settings[device.daq] - - if item.waves is not None: - wf_dev_nodes += device.prepare_upload_all_binary_waves( - seqc_item.awg_index, item.waves, acquisition_type - ) - - if item.command_table is not None: - set_action = device.prepare_upload_command_table( - seqc_item.awg_index, item.command_table - ) - wf_dev_nodes.append(set_action) - - if item.integration_weights is not None: - wf_dev_nodes += device.prepare_upload_all_integration_weights( - seqc_item.awg_index, item.integration_weights - ) + elf_node_settings: list[DaqNodeSetAction] = [] + wf_node_settings: list[DaqNodeSetAction] = [] - if with_pipeliner: - # For devices with pipeliner, wf_dev_nodes == elf_node_settings - wf_dev_nodes.extend(device.pipeliner_commit(seqc_item.awg_index)) - - if with_pipeliner: - for awg_index in awgs_used[device]: - elf_upload_conditions[device.daq].update( - device.pipeliner_ready_conditions(awg_index) - ) + for device, elf_nodes, wf_nodes, upload_ready_conditions in results: + elf_node_settings.extend(elf_nodes) + wf_node_settings.extend(wf_nodes) + if len(upload_ready_conditions) > 0: + elf_upload_conditions[device.daq].update(upload_ready_conditions) + # Upload AWG programs, waveforms, and command tables: if len(elf_upload_conditions) > 0: for daq in elf_upload_conditions.keys(): daq.node_monitor.flush() _logger.debug("Started upload of AWG programs...") with tracing.get_tracer().start_span("upload-awg-programs") as _: - for daq, nodes in elf_node_settings.items(): - await daq.batch_set(nodes) + await batch_set(elf_node_settings) if len(elf_upload_conditions) > 0: _logger.debug("Waiting for devices...") @@ -362,50 +185,39 @@ async def _upload_awg_programs(self, nt_step: NtStepKey, rt_section_uid: str): ) if len(wf_node_settings) > 0: _logger.debug("Started upload of waveforms...") - with tracing.get_tracer().start_span("upload-waveforms") as _: - for daq, nodes in wf_node_settings.items(): - await daq.batch_set(nodes) + with tracing.get_tracer().start_span("upload-waveforms"): + await batch_set(wf_node_settings) _logger.debug("Finished upload.") - @classmethod - def _awg_compile(cls, awg_data: dict[DeviceZI, list[_SeqCCompileItem]]): - # Compile in parallel: - def worker(device: DeviceZI, item: _SeqCCompileItem, span: tracing.Span): - with tracing.get_tracer().start_span("compile-awg-thread", span) as _: - item.elf = device.compile_seqc( - item.seqc_code, item.awg_index, item.seqc_filename - ) + async def _upload_awg_programs(self, nt_step: NtStepKey, rt_section_uid: str): + # Mise en place: + recipe_data = self._recipe_data - _logger.debug("Started compilation of AWG programs...") - with tracing.get_tracer().start_span("compile-awg-programs") as awg_span: - max_workers = os.environ.get("LABONEQ_AWG_COMPILER_MAX_WORKERS") - max_workers = int(max_workers) if max_workers is not None else None - with concurrent.futures.ThreadPoolExecutor( - max_workers=max_workers - ) as executor: - futures = [ - executor.submit(worker, device, item, awg_span) - for device, items in awg_data.items() - for item in items - ] - concurrent.futures.wait(futures) - exceptions = [ - future.exception() - for future in futures - if future.exception() is not None - ] - if len(exceptions) > 0: - raise LabOneQControllerException( - "Compilation failed. See log output for details." + async with gather_and_apply(self._perform_awg_upload) as awaitables: + for initialization in recipe_data.initializations: + if not initialization.awgs: + continue + + device = self._devices.find_by_uid(initialization.device_uid) + + for awg_obj in initialization.awgs: + awg_index = awg_obj.awg + awaitables.append( + device.prepare_artifacts( + recipe_data=recipe_data, + rt_section_uid=rt_section_uid, + initialization=initialization, + awg_index=awg_index, + nt_step=nt_step, + ) ) - _logger.debug("Finished compilation.") async def _set_nodes_after_awg_program_upload(self): nodes_to_initialize = [] for initialization in self._recipe_data.initializations: device = self._devices.find_by_uid(initialization.device_uid) nodes_to_initialize.extend( - device.collect_awg_after_upload_nodes(initialization) + await device.collect_awg_after_upload_nodes(initialization) ) await batch_set(nodes_to_initialize) @@ -432,20 +244,24 @@ async def _configure_triggers(self): await batch_set(nodes_to_configure_triggers) - def _prepare_nt_step( + async def _prepare_nt_step( self, sweep_params_tracker: SweepParamsTracker - ) -> list[DaqNodeAction]: + ) -> list[DaqNodeSetAction]: for param in sweep_params_tracker.updated_params(): self._recipe_data.attribute_value_tracker.update( param, sweep_params_tracker.get_param(param) ) - nt_sweep_nodes: list[DaqNodeAction] = [] + nt_sweep_nodes: list[DaqNodeSetAction] = [] for device_uid, device in self._devices.all: nt_sweep_nodes.extend( - device.collect_prepare_nt_step_nodes( - self._recipe_data.attribute_value_tracker.device_view(device_uid), - self._recipe_data, + await device.maybe_async( + device.collect_prepare_nt_step_nodes( + self._recipe_data.attribute_value_tracker.device_view( + device_uid + ), + self._recipe_data, + ) ) ) @@ -461,19 +277,17 @@ async def _initialize_devices(self): async def _execute_one_step_followers(self, with_pipeliner: bool): _logger.debug("Settings nodes to start on followers") - nodes_to_execute = [] - for _, device in self._devices.followers: - nodes_to_execute.extend( - await device.collect_execution_nodes(with_pipeliner=with_pipeliner) - ) - - await batch_set(nodes_to_execute) + async with gather_and_apply(batch_set_multiple) as awaitables: + for _, device in self._devices.followers: + awaitables.append( + device.collect_execution_nodes(with_pipeliner=with_pipeliner) + ) response_waiter = ResponseWaiter() for _, device in self._devices.followers: response_waiter.add( target=device.daq.node_monitor, - conditions=device.conditions_for_execution_ready( + conditions=await device.conditions_for_execution_ready( with_pipeliner=with_pipeliner ), ) @@ -487,11 +301,9 @@ async def _execute_one_step_followers(self, with_pipeliner: bool): # Standalone workaround: The device is triggering itself, # thus split the execution into AWG trigger arming and triggering - nodes_to_execute = [] - for _, device in self._devices.followers: - nodes_to_execute.extend(device.collect_internal_start_execution_nodes()) - - await batch_set(nodes_to_execute) + async with gather_and_apply(batch_set_multiple) as awaitables: + for _, device in self._devices.followers: + awaitables.append(device.collect_internal_start_execution_nodes()) async def _execute_one_step_leaders(self, with_pipeliner: bool): _logger.debug("Settings nodes to start on leaders") @@ -503,7 +315,7 @@ async def _execute_one_step_leaders(self, with_pipeliner: bool): ) await batch_set(nodes_to_execute) - def _wait_execution_to_stop( + async def _wait_execution_to_stop( self, acquisition_type: AcquisitionType, with_pipeliner: bool ): min_wait_time = self._recipe_data.recipe.max_step_execution_time @@ -517,7 +329,7 @@ def _wait_execution_to_stop( for _, device in self._devices.followers: response_waiter.add( target=device.daq.node_monitor, - conditions=device.conditions_for_execution_done( + conditions=await device.conditions_for_execution_done( acquisition_type, with_pipeliner=with_pipeliner ), ) @@ -541,7 +353,7 @@ async def _setup_one_step_execution(self, with_pipeliner: bool): for init in self._recipe_data.initializations ) nodes_to_execute.extend( - device.collect_execution_setup_nodes( + await device.collect_execution_setup_nodes( with_pipeliner=with_pipeliner, has_awg_in_use=has_awg_in_use ) ) @@ -551,7 +363,9 @@ async def _teardown_one_step_execution(self, with_pipeliner: bool): nodes_to_execute = [] for _, device in self._devices.all: nodes_to_execute.extend( - device.collect_execution_teardown_nodes(with_pipeliner=with_pipeliner) + await device.collect_execution_teardown_nodes( + with_pipeliner=with_pipeliner + ) ) await batch_set(nodes_to_execute) @@ -574,7 +388,9 @@ async def _execute_one_step( _logger.debug("Execution started") - self._wait_execution_to_stop(acquisition_type, with_pipeliner=with_pipeliner) + await self._wait_execution_to_stop( + acquisition_type, with_pipeliner=with_pipeliner + ) await self._teardown_one_step_execution(with_pipeliner=with_pipeliner) _logger.debug("Execution stopped") @@ -781,29 +597,35 @@ def replace_pulse( if repl.replacement_type == ReplacementType.I_Q: clipped = np.clip(repl.samples, -1.0, 1.0) bin_wave = zhinst.utils.convert_awg_waveform(*clipped) - self._nodes_from_neartime_callbacks.append( - device.prepare_upload_binary_wave( - filename=repl.sig_string + " (repl)", - waveform=bin_wave, - awg_index=awg[1], - wave_index=target_wave[0], - acquisition_type=acquisition_type, + self._nodes_from_neartime_callbacks.extend( + device.to_daq_actions( + device.prepare_upload_binary_wave( + filename=repl.sig_string + " (repl)", + waveform=bin_wave, + awg_index=awg[1], + wave_index=target_wave[0], + acquisition_type=acquisition_type, + ) ) ) elif repl.replacement_type == ReplacementType.COMPLEX: np.clip(repl.samples.real, -1.0, 1.0, out=repl.samples.real) np.clip(repl.samples.imag, -1.0, 1.0, out=repl.samples.imag) - self._nodes_from_neartime_callbacks.append( - device.prepare_upload_binary_wave( - filename=repl.sig_string + " (repl)", - waveform=repl.samples, - awg_index=awg[1], - wave_index=target_wave[0], - acquisition_type=acquisition_type, + self._nodes_from_neartime_callbacks.extend( + device.to_daq_actions( + device.prepare_upload_binary_wave( + filename=repl.sig_string + " (repl)", + waveform=repl.samples, + awg_index=awg[1], + wave_index=target_wave[0], + acquisition_type=acquisition_type, + ) ) ) - def _prepare_rt_execution(self, rt_section_uid: str) -> list[DaqNodeAction]: + async def _prepare_rt_execution( + self, rt_section_uid: str + ) -> list[DaqNodeSetAction]: if rt_section_uid is None: return [], [] # Old recipe-based execution - skip RT preparation rt_execution_info = self._recipe_data.rt_execution_infos[rt_section_uid] @@ -811,7 +633,9 @@ def _prepare_rt_execution(self, rt_section_uid: str) -> list[DaqNodeAction]: nodes_to_prepare_rt = [*self._nodes_from_neartime_callbacks] self._nodes_from_neartime_callbacks.clear() for _, device in self._devices.leaders: - nodes_to_prepare_rt.extend(device.configure_feedback(self._recipe_data)) + nodes_to_prepare_rt.extend( + await device.configure_feedback(self._recipe_data) + ) for awg_key, awg_config in self._recipe_data.awgs_producing_results(): device = self._devices.find_by_uid(awg_key.device_uid) if rt_execution_info.averaging_mode == AveragingMode.SINGLE_SHOT: @@ -822,7 +646,7 @@ def _prepare_rt_execution(self, rt_section_uid: str) -> list[DaqNodeAction]: effective_averages = rt_execution_info.averages effective_averaging_mode = rt_execution_info.averaging_mode nodes_to_prepare_rt.extend( - device.configure_acquisition( + await device.configure_acquisition( awg_key, awg_config, self._recipe_data.recipe.integrator_allocations, @@ -850,6 +674,8 @@ def _prepare_result_shapes(self): raw_acquire_length = ( 4096 if awg_config is None else awg_config.raw_acquire_length ) + # TODO: This result format does not work when sweeping in near-time, returns only + # results of the last sweep parameter value empty_res = make_acquired_result( data=np.empty(shape=[raw_acquire_length], dtype=np.complex128), axis_name=["samples"], diff --git a/laboneq/controller/devices/async_support.py b/laboneq/controller/devices/async_support.py index 03ba3bf..e3ae3f7 100644 --- a/laboneq/controller/devices/async_support.py +++ b/laboneq/controller/devices/async_support.py @@ -4,22 +4,33 @@ from __future__ import annotations import asyncio from contextlib import asynccontextmanager -from typing import TYPE_CHECKING +from typing import TYPE_CHECKING, Any, Callable, Coroutine, TypeVar if TYPE_CHECKING: from laboneq.controller.communication import ServerQualifier from laboneq.controller.devices.device_zi import DeviceQualifier +ASYNC_DEBUG_MODE = False + + async def create_device_kernel_session( *, server_qualifier: ServerQualifier, device_qualifier: DeviceQualifier ): return None # TODO(2K): stub, will return the real async api kernel session +U = TypeVar("U") + + +async def _gather(*args: Coroutine[Any, Any, U]) -> list[U]: + if ASYNC_DEBUG_MODE: + return [await arg for arg in args] + return await asyncio.gather(*args) + + @asynccontextmanager -async def gather_and_apply(func): - awaitables = [] +async def gather_and_apply(func: Callable[[list[U]], Coroutine[Any, Any, None]]): + awaitables: list[Coroutine[Any, Any, U]] = [] yield awaitables - results = await asyncio.gather(*awaitables) - await func([value for values in results for value in values]) + await func(await _gather(*awaitables)) diff --git a/laboneq/controller/devices/awg_pipeliner.py b/laboneq/controller/devices/awg_pipeliner.py index 6d835f6..3364074 100644 --- a/laboneq/controller/devices/awg_pipeliner.py +++ b/laboneq/controller/devices/awg_pipeliner.py @@ -5,15 +5,10 @@ from typing import TYPE_CHECKING, Any, Protocol -from laboneq.controller.communication import ( - CachingStrategy, - DaqNodeAction, - DaqNodeSetAction, -) +from laboneq.controller.devices.device_zi import NodeCollector class _MixInToDevice(Protocol): - _daq: Any _allocated_awgs: set[int] def _get_num_awgs(self) -> int: @@ -30,7 +25,7 @@ class AwgPipeliner(_type_base): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._node_base = "" - self._pipeliner_slot_tracker: list[int] = [] + self._pipeliner_slot_tracker: dict[int, int] = {} @property def has_pipeliner(self) -> bool: @@ -45,53 +40,30 @@ def pipeliner_control_nodes(self, index: int) -> list[str]: f"{self._node_base}/{index}/pipeliner/status", ] - def pipeliner_prepare_for_upload(self, index: int) -> list[DaqNodeAction]: - self._pipeliner_slot_tracker = [0] * self._get_num_awgs() - return [ - DaqNodeSetAction( - self._daq, - f"{self._node_base}/{index}/pipeliner/mode", - 1, - ), - DaqNodeSetAction( - self._daq, - f"{self._node_base}/{index}/pipeliner/reset", - 1, - caching_strategy=CachingStrategy.NO_CACHE, - ), - DaqNodeSetAction( - self._daq, - f"{self._node_base}/{index}/synchronization/enable", - 1, - ), - ] + def pipeliner_prepare_for_upload(self, index: int) -> NodeCollector: + self._pipeliner_slot_tracker[index] = 0 + nc = NodeCollector(base=f"{self._node_base}/") + nc.add(f"{index}/pipeliner/mode", 1) + nc.add(f"{index}/pipeliner/reset", 1, cache=False) + nc.add(f"{index}/synchronization/enable", 1) + return nc - def pipeliner_commit(self, index: int) -> list[DaqNodeAction]: + def pipeliner_commit(self, index: int) -> NodeCollector: self._pipeliner_slot_tracker[index] += 1 - return [ - DaqNodeSetAction( - self._daq, - f"{self._node_base}/{index}/pipeliner/commit", - 1, - caching_strategy=CachingStrategy.NO_CACHE, - ), - ] + nc = NodeCollector(base=f"{self._node_base}/") + nc.add(f"{index}/pipeliner/commit", 1, cache=False) + return nc def pipeliner_ready_conditions(self, index: int) -> dict[str, Any]: max_slots = 1024 # TODO(2K): read on connect from pipeliner/maxslots avail_slots = max_slots - self._pipeliner_slot_tracker[index] return {f"{self._node_base}/{index}/pipeliner/availableslots": avail_slots} - def pipeliner_collect_execution_nodes(self) -> list[DaqNodeAction]: - return [ - DaqNodeSetAction( - self._daq, - f"{self._node_base}/{index}/pipeliner/enable", - 1, - caching_strategy=CachingStrategy.NO_CACHE, - ) - for index in self._allocated_awgs - ] + def pipeliner_collect_execution_nodes(self) -> NodeCollector: + nc = NodeCollector(base=f"{self._node_base}/") + for index in self._allocated_awgs: + nc.add(f"{index}/pipeliner/enable", 1, cache=False) + return nc def pipeliner_conditions_for_execution_ready(self) -> dict[str, Any]: return { @@ -105,18 +77,8 @@ def pipeliner_conditions_for_execution_done(self) -> dict[str, Any]: for index in self._allocated_awgs } - async def pipeliner_reset_nodes(self) -> list[DaqNodeAction]: - return [ - DaqNodeSetAction( - self._daq, - f"{self._node_base}/*/pipeliner/mode", - 0, # off - caching_strategy=CachingStrategy.NO_CACHE, - ), - DaqNodeSetAction( - self._daq, - f"{self._node_base}/*/synchronization/enable", - 0, - caching_strategy=CachingStrategy.NO_CACHE, - ), - ] + def pipeliner_reset_nodes(self) -> NodeCollector: + nc = NodeCollector(base=f"{self._node_base}/") + nc.add("*/pipeliner/mode", 0, cache=False) # off + nc.add("*/synchronization/enable", 0, cache=False) + return nc diff --git a/laboneq/controller/devices/device_collection.py b/laboneq/controller/devices/device_collection.py index 3881cd3..b06b986 100644 --- a/laboneq/controller/devices/device_collection.py +++ b/laboneq/controller/devices/device_collection.py @@ -289,7 +289,7 @@ async def disable_outputs( all_actions: list[DaqNodeSetAction] = [] for device_uid, outputs in outputs_per_device.items(): device = self.find_by_uid(device_uid) - all_actions.extend(device.disable_outputs(outputs, invert)) + all_actions.extend(await device.disable_outputs(outputs, invert)) await batch_set(all_actions) def shut_down(self): @@ -303,7 +303,7 @@ def free_allocations(self): async def on_experiment_end(self): all_actions: list[DaqNodeSetAction] = [] for device in self._devices.values(): - all_actions.extend(device.on_experiment_end()) + all_actions.extend(await device.maybe_async(device.on_experiment_end())) await batch_set(all_actions) def start_monitor(self): diff --git a/laboneq/controller/devices/device_hdawg.py b/laboneq/controller/devices/device_hdawg.py index 652a8f0..4b42bb8 100644 --- a/laboneq/controller/devices/device_hdawg.py +++ b/laboneq/controller/devices/device_hdawg.py @@ -14,12 +14,14 @@ DeviceAttributesView, ) from laboneq.controller.communication import ( - CachingStrategy, - DaqNodeAction, DaqNodeSetAction, ) from laboneq.controller.devices.awg_pipeliner import AwgPipeliner -from laboneq.controller.devices.device_zi import DeviceZI, delay_to_rounded_samples +from laboneq.controller.devices.device_zi import ( + DeviceZI, + NodeCollector, + delay_to_rounded_samples, +) from laboneq.controller.devices.zi_node_monitor import ( Command, Condition, @@ -110,20 +112,14 @@ def _get_next_osc_index( osc_index_base = osc_group * max_per_group return osc_index_base + previously_allocated - def disable_outputs( + async def disable_outputs( self, outputs: set[int], invert: bool ) -> list[DaqNodeSetAction]: - channels_to_disable: list[DaqNodeSetAction] = [ - DaqNodeSetAction( - self._daq, - f"/{self.serial}/sigouts/{ch}/on", - 0, - caching_strategy=CachingStrategy.NO_CACHE, - ) - for ch in range(self._channels) - if (ch in outputs) != invert - ] - return channels_to_disable + nc = NodeCollector(base=f"/{self.serial}/") + for ch in range(self._channels): + if (ch in outputs) != invert: + nc.add(f"sigouts/{ch}/on", 0, cache=False) + return await self.maybe_async(nc) def _nodes_to_monitor_impl(self) -> list[str]: nodes = super()._nodes_to_monitor_impl() @@ -204,8 +200,10 @@ def rf_offset_control_nodes(self) -> list[NodeControlBase]: ) return nodes - def collect_awg_after_upload_nodes(self, initialization: Initialization): - nodes_to_configure_phase = [] + async def collect_awg_after_upload_nodes( + self, initialization: Initialization + ) -> list[DaqNodeSetAction]: + nc = NodeCollector(base=f"/{self.serial}/") for awg in initialization.awgs or []: _logger.debug( @@ -213,63 +211,53 @@ def collect_awg_after_upload_nodes(self, initialization: Initialization): self.dev_repr, awg.awg, ) - nodes_to_configure_phase.append( - DaqNodeSetAction( - self._daq, - f"/{self.serial}/sines/{awg.awg * 2}/phaseshift", - 90 if (awg.signal_type == SignalType.IQ) else 0, - ) - ) - - nodes_to_configure_phase.append( - DaqNodeSetAction( - self._daq, f"/{self.serial}/sines/{awg.awg * 2 + 1}/phaseshift", 0 - ) + nc.add( + f"sines/{awg.awg * 2}/phaseshift", + 90 if (awg.signal_type == SignalType.IQ) else 0, ) + nc.add(f"sines/{awg.awg * 2 + 1}/phaseshift", 0) - return nodes_to_configure_phase + return await self.maybe_async(nc) async def collect_execution_nodes( self, with_pipeliner: bool - ) -> list[DaqNodeAction]: + ) -> list[DaqNodeSetAction]: if with_pipeliner: - return self.pipeliner_collect_execution_nodes() + return await self.maybe_async(self.pipeliner_collect_execution_nodes()) return await super().collect_execution_nodes(with_pipeliner=with_pipeliner) - def conditions_for_execution_ready(self, with_pipeliner: bool) -> dict[str, Any]: + async def conditions_for_execution_ready( + self, with_pipeliner: bool + ) -> dict[str, Any]: if with_pipeliner: - return self.pipeliner_conditions_for_execution_ready() - - return { - f"/{self.serial}/awgs/{awg_index}/enable": 1 - for awg_index in self._allocated_awgs - } + conditions = self.pipeliner_conditions_for_execution_ready() + else: + conditions = { + f"/{self.serial}/awgs/{awg_index}/enable": 1 + for awg_index in self._allocated_awgs + } + return await self.maybe_async_wait(conditions) - def conditions_for_execution_done( + async def conditions_for_execution_done( self, acquisition_type: AcquisitionType, with_pipeliner: bool ) -> dict[str, Any]: if with_pipeliner: - return self.pipeliner_conditions_for_execution_done() - - return { - f"/{self.serial}/awgs/{awg_index}/enable": 0 - for awg_index in self._allocated_awgs - } + conditions = self.pipeliner_conditions_for_execution_done() + else: + conditions = { + f"/{self.serial}/awgs/{awg_index}/enable": 0 + for awg_index in self._allocated_awgs + } + return await self.maybe_async_wait(conditions) - def collect_execution_setup_nodes( + async def collect_execution_setup_nodes( self, with_pipeliner: bool, has_awg_in_use: bool - ) -> list[DaqNodeAction]: - nodes = [] + ) -> list[DaqNodeSetAction]: + nc = NodeCollector(base=f"/{self.serial}/") if with_pipeliner and has_awg_in_use: - nodes.append( - DaqNodeSetAction( - self._daq, - f"/{self.serial}/system/synchronization/source", - 1, # external - ) - ) - return nodes + nc.add("system/synchronization/source", 1) # external + return await self.maybe_async(nc) async def collect_initialization_nodes( self, @@ -279,14 +267,14 @@ async def collect_initialization_nodes( ) -> list[DaqNodeSetAction]: _logger.debug("%s: Initializing device...", self.dev_repr) - nodes: list[tuple[str, Any]] = [] + nc = NodeCollector(base=f"/{self.serial}/") outputs = initialization.outputs or [] for output in outputs: awg_idx = output.channel // 2 self._allocated_awgs.add(awg_idx) - nodes.append((f"sigouts/{output.channel}/on", 1 if output.enable else 0)) + nc.add(f"sigouts/{output.channel}/on", 1 if output.enable else 0) if output.range is not None: if output.range_unit not in (None, "volt"): @@ -301,14 +289,9 @@ async def collect_initialization_nodes( output.range, self.dev_repr, ) - nodes.append( - ( - f"sigouts/{output.channel}/range", - output.range, - ) - ) - nodes.append((f"sigouts/{output.channel}/offset", output.offset)) - nodes.append((f"awgs/{awg_idx}/single", 1)) + nc.add(f"sigouts/{output.channel}/range", output.range) + nc.add(f"sigouts/{output.channel}/offset", output.offset) + nc.add(f"awgs/{awg_idx}/single", 1) awg_ch = output.channel % 2 iq_idx = output.channel // 2 @@ -324,20 +307,17 @@ async def collect_initialization_nodes( if output.modulation else ModulationMode.OFF ) - nodes += [ - ( - f"awgs/{awg_idx}/outputs/{awg_ch}/modulation/mode", - modulation_mode, - ), - ( - f"awgs/{awg_idx}/outputs/{awg_ch}/gains/{diagonal_channel_index}", - output.gains.diagonal, - ), - ( - f"awgs/{awg_idx}/outputs/{awg_ch}/gains/{off_diagonal_channel_index}", - output.gains.off_diagonal, - ), - ] + nc.add( + f"awgs/{awg_idx}/outputs/{awg_ch}/modulation/mode", modulation_mode + ) + nc.add( + f"awgs/{awg_idx}/outputs/{awg_ch}/gains/{diagonal_channel_index}", + output.gains.diagonal, + ) + nc.add( + f"awgs/{awg_idx}/outputs/{awg_ch}/gains/{off_diagonal_channel_index}", + output.gains.off_diagonal, + ) else: # I/Q output modulation_mode = ( @@ -345,20 +325,15 @@ async def collect_initialization_nodes( if output.modulation else ModulationMode.OFF ) - nodes += [ - ( - f"awgs/{awg_idx}/outputs/{awg_ch}/modulation/mode", - modulation_mode, - ), - ( - f"awgs/{awg_idx}/outputs/{awg_ch}/gains/0", - iq_gains_mx[0][awg_ch], - ), - ( - f"awgs/{awg_idx}/outputs/{awg_ch}/gains/1", - iq_gains_mx[1][awg_ch], - ), - ] + nc.add( + f"awgs/{awg_idx}/outputs/{awg_ch}/modulation/mode", modulation_mode + ) + nc.add( + f"awgs/{awg_idx}/outputs/{awg_ch}/gains/0", iq_gains_mx[0][awg_ch] + ) + nc.add( + f"awgs/{awg_idx}/outputs/{awg_ch}/gains/1", iq_gains_mx[1][awg_ch] + ) precomp_p = f"sigouts/{output.channel}/precompensation/" has_pc = "PC" in self.dev_opts @@ -370,44 +345,38 @@ async def collect_initialization_nodes( raise LabOneQControllerException( f"Precompensation is not supported on device {self.dev_repr}." ) - nodes.append((precomp_p + "enable", 1)) + nc.add(precomp_p + "enable", 1) # Exponentials for e in range(8): exp_p = precomp_p + f"exponentials/{e}/" try: exp = precomp["exponential"][e] - nodes += [ - (exp_p + "enable", 1), - (exp_p + "timeconstant", exp["timeconstant"]), - (exp_p + "amplitude", exp["amplitude"]), - ] + nc.add(exp_p + "enable", 1) + nc.add(exp_p + "timeconstant", exp["timeconstant"]) + nc.add(exp_p + "amplitude", exp["amplitude"]) except (KeyError, IndexError, TypeError): - nodes.append((exp_p + "enable", 0)) + nc.add(exp_p + "enable", 0) # Bounce bounce_p = precomp_p + "bounces/0/" try: bounce = precomp["bounce"] delay = bounce["delay"] amp = bounce["amplitude"] - nodes += [ - (bounce_p + "enable", 1), - (bounce_p + "delay", delay), - (bounce_p + "amplitude", amp), - ] + nc.add(bounce_p + "enable", 1) + nc.add(bounce_p + "delay", delay) + nc.add(bounce_p + "amplitude", amp) except (KeyError, TypeError): - nodes.append((bounce_p + "enable", 0)) + nc.add(bounce_p + "enable", 0) # Highpass hp_p = precomp_p + "highpass/0/" try: hp = precomp["high_pass"] timeconstant = hp["timeconstant"] - nodes += [ - (hp_p + "enable", 1), - (hp_p + "timeconstant", timeconstant), - (hp_p + "clearing/slope", 1), - ] + nc.add(hp_p + "enable", 1) + nc.add(hp_p + "timeconstant", timeconstant) + nc.add(hp_p + "clearing/slope", 1) except (KeyError, TypeError): - nodes.append((hp_p + "enable", 0)) + nc.add(hp_p + "enable", 0) # FIR fir_p = precomp_p + "fir/" try: @@ -417,49 +386,47 @@ async def collect_initialization_nodes( "FIR coefficients must be a list of at most 40 doubles" ) fir = np.concatenate((fir, np.zeros((40 - len(fir))))) - nodes += [(fir_p + "enable", 1), (fir_p + "coefficients", fir)] + nc.add(fir_p + "enable", 1) + nc.add(fir_p + "coefficients", fir) except (KeyError, IndexError, TypeError): - nodes.append((fir_p + "enable", 0)) + nc.add(fir_p + "enable", 0) except (KeyError, TypeError, AttributeError): if has_pc: - nodes.append((precomp_p + "enable", 0)) + nc.add(precomp_p + "enable", 0) if output.marker_mode is not None: if output.marker_mode == "TRIGGER": - nodes.append( - (f"triggers/out/{output.channel}/source", output.channel % 2) - ) + nc.add(f"triggers/out/{output.channel}/source", output.channel % 2) elif output.marker_mode == "MARKER": - nodes.append( - ( - f"triggers/out/{output.channel}/source", - 4 + 2 * (output.channel % 2), - ) + nc.add( + f"triggers/out/{output.channel}/source", + 4 + 2 * (output.channel % 2), ) else: raise ValueError( f"Maker mode must be either 'MARKER' or 'TRIGGER', but got {output.marker_mode} for output {output.channel} on HDAWG {self.serial}" ) # set trigger delay to 0 - nodes.append((f"triggers/out/{output.channel}/delay", 0.0)) + nc.add(f"triggers/out/{output.channel}/delay", 0.0) osc_selects = { ch: osc.index for osc in self._allocated_oscs for ch in osc.channels } for ch, osc_idx in osc_selects.items(): - nodes.append((f"sines/{ch}/oscselect", osc_idx)) + nc.add(f"sines/{ch}/oscselect", osc_idx) # Configure DIO/ZSync at init (previously was after AWG loading). # This is a prerequisite for passing AWG checks in FW on the pipeliner commit. # Without the pipeliner, these checks are only performed when the AWG is enabled, # therefore DIO could be configured after the AWG loading. - nodes.extend(self._collect_dio_configuration_nodes(initialization, recipe_data)) + nc.extend(self._collect_dio_configuration_nodes(initialization, recipe_data)) - return [DaqNodeSetAction(self._daq, f"/{self.serial}/{k}", v) for k, v in nodes] + return await self.maybe_async(nc) def collect_prepare_nt_step_nodes( self, attributes: DeviceAttributesView, recipe_data: RecipeData - ) -> list[DaqNodeAction]: - nodes_to_set = super().collect_prepare_nt_step_nodes(attributes, recipe_data) + ) -> NodeCollector: + nc = NodeCollector(base=f"/{self.serial}/") + nc.extend(super().collect_prepare_nt_step_nodes(attributes, recipe_data)) for ch in range(self._channels): [scheduler_port_delay, port_delay], updated = attributes.resolve( @@ -484,26 +451,16 @@ def collect_prepare_nt_step_nodes( / self._sampling_rate ) - nodes_to_set.append( - DaqNodeSetAction( - daq=self.daq, - path=f"/{self.serial}/sigouts/{ch}/delay", - value=output_delay_rounded, - ) - ) + nc.add(f"sigouts/{ch}/delay", output_delay_rounded) - return nodes_to_set + return nc - def collect_awg_before_upload_nodes( + async def collect_awg_before_upload_nodes( self, initialization: Initialization, recipe_data: RecipeData - ): - device_specific_initialization_nodes = [ - DaqNodeSetAction( - self._daq, f"/{self.serial}/system/awg/oscillatorcontrol", 1 - ), - ] - - return device_specific_initialization_nodes + ) -> list[DaqNodeSetAction]: + nc = NodeCollector(base=f"/{self.serial}/") + nc.add("system/awg/oscillatorcontrol", 1) + return await self.maybe_async(nc) def add_command_table_header(self, body: dict) -> dict: return { @@ -517,14 +474,14 @@ def command_table_path(self, awg_index: int) -> str: async def collect_trigger_configuration_nodes( self, initialization: Initialization, recipe_data: RecipeData - ) -> list[DaqNodeAction]: + ) -> list[DaqNodeSetAction]: return [] def _collect_dio_configuration_nodes( self, initialization: Initialization, recipe_data: RecipeData - ) -> list[tuple[str, Any]]: + ) -> NodeCollector: _logger.debug("%s: Configuring trigger configuration nodes.", self.dev_repr) - nodes: list[tuple[str, Any]] = [] + nc = NodeCollector(base=f"/{self.serial}/") triggering_mode = initialization.config.triggering_mode if triggering_mode == TriggeringMode.ZSYNC_FOLLOWER: @@ -532,8 +489,8 @@ def _collect_dio_configuration_nodes( "%s: Configuring DIO mode: ZSync pass-through.", self.dev_repr ) _logger.debug("%s: Configuring external clock to ZSync.", self.dev_repr) - nodes.append(("dios/0/mode", 3)) - nodes.append(("dios/0/drive", 0xC)) + nc.add("dios/0/mode", 3) + nc.add("dios/0/drive", 0xC) # Loop over at least AWG instance to cover the case that the instrument is only used # as a communication proxy. Some of the nodes on the AWG branch are needed to get @@ -542,8 +499,8 @@ def _collect_dio_configuration_nodes( self._allocated_awgs if len(self._allocated_awgs) > 0 else range(1) ): awg_path = f"awgs/{awg_index}" - nodes.append((f"{awg_path}/dio/strobe/slope", 0)) - nodes.append((f"{awg_path}/dio/valid/polarity", 0)) + nc.add(f"{awg_path}/dio/strobe/slope", 0) + nc.add(f"{awg_path}/dio/valid/polarity", 0) awg_config = next( ( awg_config @@ -557,35 +514,29 @@ def _collect_dio_configuration_nodes( None, ) if awg_config is not None: - nodes.append( - ( - f"{awg_path}/zsync/register/shift", - awg_config.register_selector_shift, - ) + nc.add( + f"{awg_path}/zsync/register/shift", + awg_config.register_selector_shift, ) - nodes.append( - ( - f"{awg_path}/zsync/register/mask", - awg_config.register_selector_bitmask, - ) + nc.add( + f"{awg_path}/zsync/register/mask", + awg_config.register_selector_bitmask, ) - nodes.append( - ( - f"{awg_path}/zsync/register/offset", - awg_config.command_table_match_offset, - ) + nc.add( + f"{awg_path}/zsync/register/offset", + awg_config.command_table_match_offset, ) elif triggering_mode == TriggeringMode.DESKTOP_LEADER: - nodes.append(("triggers/in/0/level", DIG_TRIGGER_1_LEVEL)) + nc.add("triggers/in/0/level", DIG_TRIGGER_1_LEVEL) for awg_index in ( self._allocated_awgs if len(self._allocated_awgs) > 0 else range(1) ): - nodes.append((f"awgs/{awg_index}/auxtriggers/0/slope", 1)) - nodes.append((f"awgs/{awg_index}/auxtriggers/0/channel", 0)) + nc.add(f"awgs/{awg_index}/auxtriggers/0/slope", 1) + nc.add(f"awgs/{awg_index}/auxtriggers/0/channel", 0) - nodes.append(("dios/0/mode", 1)) - nodes.append(("dios/0/drive", 15)) + nc.add("dios/0/mode", 1) + nc.add("dios/0/drive", 15) # Loop over at least AWG instance to cover the case that the instrument is only used # as a communication proxy. Some of the nodes on the AWG branch are needed to get @@ -593,34 +544,21 @@ def _collect_dio_configuration_nodes( for awg_index in ( self._allocated_awgs if len(self._allocated_awgs) > 0 else range(1) ): - nodes.append((f"awgs/{awg_index}/dio/strobe/slope", 0)) - nodes.append((f"awgs/{awg_index}/dio/valid/polarity", 2)) - nodes.append((f"awgs/{awg_index}/dio/valid/index", 0)) - nodes.append((f"awgs/{awg_index}/dio/mask/value", 0x3FF)) - nodes.append((f"awgs/{awg_index}/dio/mask/shift", 1)) - - return nodes - - async def collect_reset_nodes(self) -> list[DaqNodeAction]: - reset_nodes = [] - reset_nodes.extend( - [ - # Reset pipeliner first, attempt to set AWG enable leads to FW error if pipeliner was enabled. - *await self.pipeliner_reset_nodes(), - DaqNodeSetAction( - self._daq, - f"/{self.serial}/awgs/*/enable", - 0, - caching_strategy=CachingStrategy.NO_CACHE, - ), - DaqNodeSetAction( - self._daq, - f"/{self.serial}/system/synchronization/source", - 0, # internal - caching_strategy=CachingStrategy.NO_CACHE, - ), - ] - ) + nc.add(f"awgs/{awg_index}/dio/strobe/slope", 0) + nc.add(f"awgs/{awg_index}/dio/valid/polarity", 2) + nc.add(f"awgs/{awg_index}/dio/valid/index", 0) + nc.add(f"awgs/{awg_index}/dio/mask/value", 0x3FF) + nc.add(f"awgs/{awg_index}/dio/mask/shift", 1) + + return nc + + async def collect_reset_nodes(self) -> list[DaqNodeSetAction]: + nc = NodeCollector(base=f"/{self.serial}/") + # Reset pipeliner first, attempt to set AWG enable leads to FW error if pipeliner was enabled. + nc.extend(self.pipeliner_reset_nodes()) + nc.add("awgs/*/enable", 0, cache=False) + nc.add("system/synchronization/source", 0, cache=False) # internal + reset_nodes = await self.maybe_async(nc) # Reset errors must be the last operation, as above sets may cause errors. # See https://zhinst.atlassian.net/browse/HULK-1606 reset_nodes.extend(await super().collect_reset_nodes()) diff --git a/laboneq/controller/devices/device_nonqc.py b/laboneq/controller/devices/device_nonqc.py index e472b13..c491461 100644 --- a/laboneq/controller/devices/device_nonqc.py +++ b/laboneq/controller/devices/device_nonqc.py @@ -1,7 +1,7 @@ # Copyright 2019 Zurich Instruments AG # SPDX-License-Identifier: Apache-2.0 -from laboneq.controller.communication import DaqNodeAction +from laboneq.controller.communication import DaqNodeSetAction from laboneq.controller.devices.device_zi import DeviceZI @@ -15,5 +15,5 @@ def is_follower(self): def is_standalone(self): return False - async def collect_reset_nodes(self) -> list[DaqNodeAction]: + async def collect_reset_nodes(self) -> list[DaqNodeSetAction]: return [] diff --git a/laboneq/controller/devices/device_pqsc.py b/laboneq/controller/devices/device_pqsc.py index 6ed06f2..24ac2bf 100644 --- a/laboneq/controller/devices/device_pqsc.py +++ b/laboneq/controller/devices/device_pqsc.py @@ -7,11 +7,9 @@ from enum import IntEnum from laboneq.controller.communication import ( - CachingStrategy, - DaqNodeAction, DaqNodeSetAction, ) -from laboneq.controller.devices.device_zi import DeviceZI +from laboneq.controller.devices.device_zi import DeviceZI, NodeCollector from laboneq.controller.devices.zi_node_monitor import ( Command, Setting, @@ -100,21 +98,20 @@ async def collect_initialization_nodes( ) -> list[DaqNodeSetAction]: return [] - def configure_feedback(self, recipe_data: RecipeData) -> list[DaqNodeAction]: + async def configure_feedback( + self, recipe_data: RecipeData + ) -> list[DaqNodeSetAction]: + nc = NodeCollector(base=f"/{self.serial}/") min_wait_time = recipe_data.recipe.max_step_execution_time # This is required because PQSC is only receiving the feedback events # during the holdoff time, even for a single trigger. - feedback_actions = [ - DaqNodeSetAction( - self.daq, f"/{self.serial}/execution/holdoff", min_wait_time - ) - ] + nc.add("execution/holdoff", min_wait_time) enabled_zsyncs = set() for port, downstream_devices in self._downlinks.items(): [p_kind, p_addr] = port.split("/") if p_kind != "ZSYNCS": continue - zsync_output = f"/{self.serial}/zsyncs/{p_addr}/output" + zsync_output = f"zsyncs/{p_addr}/output" zsync_base = f"{zsync_output}/registerbank" for follower_uid, follower_ref in downstream_devices: follower = follower_ref() @@ -126,143 +123,64 @@ def configure_feedback(self, recipe_data: RecipeData) -> list[DaqNodeAction]: or awg_config.source_feedback_register in (None, "local") ): continue # Only consider devices receiving feedback from PQSC - if p_addr in enabled_zsyncs: - actions_to_enable_feedback = [] - else: - actions_to_enable_feedback = [ - DaqNodeSetAction(self.daq, f"{zsync_output}/enable", 1), - DaqNodeSetAction(self.daq, f"{zsync_output}/source", 0), - ] + if p_addr not in enabled_zsyncs: + nc.add(f"{zsync_output}/enable", 1) + nc.add(f"{zsync_output}/source", 0) enabled_zsyncs.add(p_addr) - feedback_actions.extend(actions_to_enable_feedback) reg_selector_base = ( f"{zsync_base}/sources/{awg_config.fb_reg_target_index}" ) - feedback_actions.extend( - [ - DaqNodeSetAction( - self.daq, f"{reg_selector_base}/enable", 1 - ), - DaqNodeSetAction( - self.daq, - f"{reg_selector_base}/register", - awg_config.source_feedback_register, - ), - DaqNodeSetAction( - self.daq, - f"{reg_selector_base}/index", - awg_config.fb_reg_source_index, - ), - ] + nc.add(f"{reg_selector_base}/enable", 1) + nc.add( + f"{reg_selector_base}/register", + awg_config.source_feedback_register, ) - return feedback_actions + nc.add(f"{reg_selector_base}/index", awg_config.fb_reg_source_index) + return await self.maybe_async(nc) async def collect_execution_nodes( self, with_pipeliner: bool - ) -> list[DaqNodeAction]: + ) -> list[DaqNodeSetAction]: _logger.debug("Starting execution...") - nodes = [] - nodes.append( - DaqNodeSetAction( - self._daq, - f"/{self.serial}/execution/enable", - 1, - caching_strategy=CachingStrategy.NO_CACHE, - ) - ) - nodes.append( - DaqNodeSetAction( - self._daq, - f"/{self.serial}/triggers/out/0/enable", - 1, - caching_strategy=CachingStrategy.NO_CACHE, - ) - ) - return nodes + nc = NodeCollector(base=f"/{self.serial}/") + nc.add("execution/enable", 1, cache=False) + nc.add("triggers/out/0/enable", 1, cache=False) + return await self.maybe_async(nc) - def collect_execution_setup_nodes( + async def collect_execution_setup_nodes( self, with_pipeliner: bool, has_awg_in_use: bool - ) -> list[DaqNodeAction]: - nodes = [] + ) -> list[DaqNodeSetAction]: + nc = NodeCollector(base=f"/{self.serial}/") if with_pipeliner: - nodes.append( - DaqNodeSetAction( - self._daq, - f"/{self.serial}/execution/synchronization/enable", - 1, # enabled - ) - ) - return nodes + nc.add("execution/synchronization/enable", 1) + return await self.maybe_async(nc) - def collect_execution_teardown_nodes( + async def collect_execution_teardown_nodes( self, with_pipeliner: bool - ) -> list[DaqNodeAction]: - nodes = [] + ) -> list[DaqNodeSetAction]: + nc = NodeCollector(base=f"/{self.serial}/") if with_pipeliner: - nodes.append( - DaqNodeSetAction( - self._daq, - f"/{self.serial}/execution/synchronization/enable", - 0, - ) - ) - return nodes + nc.add("execution/synchronization/enable", 0) + return await self.maybe_async(nc) async def collect_trigger_configuration_nodes( self, initialization: Initialization, recipe_data: RecipeData - ) -> list[DaqNodeAction]: - # TODO(2K): This was moved as is from no more existing "configure_as_leader". - # Verify, if separate `batch_set` per node is truly necessary here, or the corresponding - # nodes can be set in one batch with others. - _logger.debug( - "%s: Setting reference clock frequency to %d MHz...", - self.dev_repr, - initialization.config.reference_clock, - ) - - await self._daq.batch_set( - [ - DaqNodeSetAction( - self._daq, - f"/{self.serial}/system/clocks/referenceclock/out/enable", - 1, - ) - ] - ) - - await self._daq.batch_set( - [ - DaqNodeSetAction( - self._daq, - f"/{self.serial}/system/clocks/referenceclock/out/freq", - initialization.config.reference_clock.value, - ) - ] - ) - - nodes_to_configure_triggers = [] - - nodes_to_configure_triggers.append( - DaqNodeSetAction( - self._daq, - f"/{self.serial}/execution/repetitions", - initialization.config.repetitions, - ) + ) -> list[DaqNodeSetAction]: + nc = NodeCollector(base=f"/{self.serial}/") + nc.add("system/clocks/referenceclock/out/enable", 1) + nc.add( + "system/clocks/referenceclock/out/freq", + initialization.config.reference_clock.value, ) + nc.add("execution/repetitions", initialization.config.repetitions) + return await self.maybe_async(nc) - return nodes_to_configure_triggers - - async def collect_reset_nodes(self) -> list[DaqNodeAction]: + async def collect_reset_nodes(self) -> list[DaqNodeSetAction]: + nc = NodeCollector(base=f"/{self.serial}/") + nc.add("execution/synchronization/enable", 0, cache=False) reset_nodes = await super().collect_reset_nodes() - reset_nodes.append( - DaqNodeSetAction( - self._daq, - f"/{self.serial}/execution/synchronization/enable", - 0, - caching_strategy=CachingStrategy.NO_CACHE, - ) - ) + reset_nodes.extend(await self.maybe_async(nc)) return reset_nodes def _prepare_emulator(self): @@ -275,8 +193,7 @@ def _prepare_emulator(self): if enabled_zsyncs.get(port.lower()) == to_dev.serial: continue enabled_zsyncs[port.lower()] = to_dev.serial - self._daq_dry_run.set_emulation_option( - serial=self.serial, + self._set_emulation_option( option=f"{port.lower()}/connection/serial", value=to_dev.serial[3:], ) diff --git a/laboneq/controller/devices/device_pretty_printer.py b/laboneq/controller/devices/device_pretty_printer.py index 6d7fbf0..8274166 100644 --- a/laboneq/controller/devices/device_pretty_printer.py +++ b/laboneq/controller/devices/device_pretty_printer.py @@ -4,14 +4,18 @@ from __future__ import annotations import logging -from laboneq.controller.communication import DaqNodeAction, DaqWrapper -from laboneq.controller.devices.device_zi import DeviceQualifier, DeviceZI +from typing import Any +from laboneq.controller.communication import DaqNodeSetAction, DaqWrapper +from laboneq.controller.devices.device_zi import ( + DeviceQualifier, + DeviceZI, + NodeCollector, +) from laboneq.controller.recipe_processor import DeviceRecipeData, RecipeData -from laboneq.data.recipe import Initialization +from laboneq.data.recipe import Initialization, NtStepKey from laboneq.controller.attribute_value_tracker import ( DeviceAttributesView, ) -from laboneq.compiler.workflow.compiler_output import ArtifactsPrettyPrinter _logger = logging.getLogger(__name__) @@ -34,12 +38,15 @@ def disconnect(self): async def prepare_artifacts( self, - artifacts: ArtifactsPrettyPrinter | dict[int, ArtifactsPrettyPrinter], - channel: str, - instructions_ref: str, - waves_ref: str, - ): - pass + recipe_data: RecipeData, + rt_section_uid: str, + initialization: Initialization, + awg_index: int, + nt_step: NtStepKey, + ) -> tuple[ + DeviceZI, list[DaqNodeSetAction], list[DaqNodeSetAction], dict[str, Any] + ]: + return self, [], [], {} async def collect_initialization_nodes( self, @@ -49,18 +56,18 @@ async def collect_initialization_nodes( ): return [] - async def collect_osc_initialization_nodes(self) -> list[DaqNodeAction]: + async def collect_osc_initialization_nodes(self) -> list[DaqNodeSetAction]: return [] def collect_prepare_nt_step_nodes( self, attributes: DeviceAttributesView, recipe_data: RecipeData - ) -> list[DaqNodeAction]: - return [] + ) -> NodeCollector: + return NodeCollector() async def collect_execution_nodes(self, *args, **kwargs): return [] - async def collect_reset_nodes(self) -> list[DaqNodeAction]: + async def collect_reset_nodes(self) -> list[DaqNodeSetAction]: return [] async def fetch_errors(self): diff --git a/laboneq/controller/devices/device_shfppc.py b/laboneq/controller/devices/device_shfppc.py index d6b84ad..f5e5420 100644 --- a/laboneq/controller/devices/device_shfppc.py +++ b/laboneq/controller/devices/device_shfppc.py @@ -10,8 +10,8 @@ DeviceAttribute, DeviceAttributesView, ) -from laboneq.controller.communication import DaqNodeAction, DaqNodeSetAction -from laboneq.controller.devices.device_zi import DeviceZI +from laboneq.controller.communication import DaqNodeSetAction +from laboneq.controller.devices.device_zi import DeviceZI, NodeCollector from laboneq.controller.recipe_processor import DeviceRecipeData, RecipeData from laboneq.data.recipe import Initialization @@ -61,7 +61,7 @@ def pre_process_attributes( name=attribute_name, index=channel, value_or_param=settings[key] ) - async def collect_reset_nodes(self) -> list[DaqNodeAction]: + async def collect_reset_nodes(self) -> list[DaqNodeSetAction]: return [] async def collect_initialization_nodes( @@ -70,7 +70,7 @@ async def collect_initialization_nodes( initialization: Initialization, recipe_data: RecipeData, ) -> list[DaqNodeSetAction]: - nodes_to_set: list[DaqNodeSetAction] = [] + nc = NodeCollector() ppchannels = initialization.ppchannels or [] def _convert(value): @@ -80,29 +80,24 @@ def _convert(value): for settings in ppchannels: ch = settings["channel"] - nodes_to_set.append( - DaqNodeSetAction(self._daq, self._key_to_path("_on", ch), 1) - ) + nc.add(self._key_to_path("_on", ch), 1) for key, value in settings.items(): if value is None or key in [*DeviceSHFPPC.attribute_keys, "channel"]: # Skip not set values, or values that are bound to sweep params and will # be set during the NT execution. continue - nodes_to_set.append( - DaqNodeSetAction( - self._daq, self._key_to_path(key, ch), _convert(value) - ) - ) - return nodes_to_set + nc.add(self._key_to_path(key, ch), _convert(value)) + return await self.maybe_async(nc) def collect_prepare_nt_step_nodes( self, attributes: DeviceAttributesView, recipe_data: RecipeData - ) -> list[DaqNodeAction]: - nodes_to_set = super().collect_prepare_nt_step_nodes(attributes, recipe_data) + ) -> NodeCollector: + nc = NodeCollector() + nc.extend(super().collect_prepare_nt_step_nodes(attributes, recipe_data)) for ch in range(self._channels): for key, attr_name in DeviceSHFPPC.attribute_keys.items(): [value], updated = attributes.resolve(keys=[(attr_name, ch)]) if updated: path = self._key_to_path(key, ch) - nodes_to_set.append(DaqNodeSetAction(self._daq, path, value)) - return nodes_to_set + nc.add(path, value) + return nc diff --git a/laboneq/controller/devices/device_shfqa.py b/laboneq/controller/devices/device_shfqa.py index 2bb2cd2..51d0aa1 100644 --- a/laboneq/controller/devices/device_shfqa.py +++ b/laboneq/controller/devices/device_shfqa.py @@ -17,14 +17,12 @@ DeviceAttributesView, ) from laboneq.controller.communication import ( - CachingStrategy, - DaqNodeAction, - DaqNodeGetAction, DaqNodeSetAction, ) from laboneq.controller.devices.awg_pipeliner import AwgPipeliner from laboneq.controller.devices.device_shf_base import DeviceSHFBase from laboneq.controller.devices.device_zi import ( + NodeCollector, SequencerPaths, Waveforms, delay_to_rounded_samples, @@ -74,22 +72,9 @@ MAX_WAVEFORM_LENGTH_INTEGRATION = 4096 MAX_WAVEFORM_LENGTH_SPECTROSCOPY = 65536 - -def node_generator(daq, l: list): - def append(path, value, filename=None, cache=True): - l.append( - DaqNodeSetAction( - daq=daq, - path=path, - value=value, - filename=filename, - caching_strategy=( - CachingStrategy.CACHE if cache else CachingStrategy.NO_CACHE - ), - ) - ) - - return append +MAX_AVERAGES_SCOPE = 1 << 16 +MAX_AVERAGES_RESULT_LOGGER = 1 << 17 +MAX_RESULT_VECTOR_LENGTH = 1 << 19 def calc_theoretical_assignment_vec(num_weights: int) -> np.ndarray: @@ -207,33 +192,21 @@ def _get_next_osc_index( def _make_osc_path(self, channel: int, index: int) -> str: return f"/{self.serial}/qachannels/{channel}/oscs/{index}/freq" - def disable_outputs( + async def disable_outputs( self, outputs: set[int], invert: bool ) -> list[DaqNodeSetAction]: - channels_to_disable: list[DaqNodeSetAction] = [ - DaqNodeSetAction( - self._daq, - f"/{self.serial}/qachannels/{ch}/output/on", - 0, - caching_strategy=CachingStrategy.NO_CACHE, - ) - for ch in range(self._channels) - if (ch in outputs) != invert - ] - return channels_to_disable - - def on_experiment_end(self): - nodes = super().on_experiment_end() - return [ - *nodes, - # in CW spectroscopy mode, turn off the tone - DaqNodeSetAction( - self._daq, - f"/{self.serial}/qachannels/*/spectroscopy/envelope/enable", - 1, - caching_strategy=CachingStrategy.NO_CACHE, - ), - ] + nc = NodeCollector(base=f"/{self.serial}/") + for ch in range(self._channels): + if (ch in outputs) != invert: + nc.add(f"qachannels/{ch}/output/on", 0, cache=False) + return await self.maybe_async(nc) + + def on_experiment_end(self) -> NodeCollector: + nc = NodeCollector(base=f"/{self.serial}/") + nc.extend(super().on_experiment_end()) + # in CW spectroscopy mode, turn off the tone + nc.add("qachannels/*/spectroscopy/envelope/enable", 1, cache=False) + return nc def _nodes_to_monitor_impl(self) -> list[str]: nodes = super()._nodes_to_monitor_impl() @@ -250,7 +223,7 @@ def _nodes_to_monitor_impl(self) -> list[str]: nodes.extend(self.pipeliner_control_nodes(awg)) return nodes - def configure_acquisition( + async def configure_acquisition( self, awg_key: AwgKey, awg_config: AwgConfig, @@ -258,32 +231,37 @@ def configure_acquisition( averages: int, averaging_mode: AveragingMode, acquisition_type: AcquisitionType, - ) -> list[DaqNodeAction]: + ) -> list[DaqNodeSetAction]: + nc = NodeCollector() average_mode = 0 if averaging_mode == AveragingMode.CYCLIC else 1 - nodes = [ - *self._configure_readout( + nc.extend( + self._configure_readout( acquisition_type, awg_key, awg_config, integrator_allocations, averages, average_mode, - ), - *self._configure_spectroscopy( + ) + ) + nc.extend( + self._configure_spectroscopy( acquisition_type, awg_key.awg_index, awg_config.result_length, averages, average_mode, - ), - *self._configure_scope( + ) + ) + nc.extend( + self._configure_scope( enable=acquisition_type == AcquisitionType.RAW, channel=awg_key.awg_index, averages=averages, acquire_length=awg_config.raw_acquire_length, - ), - ] - return nodes + ) + ) + return await self.maybe_async(nc) def _configure_readout( self, @@ -293,45 +271,34 @@ def _configure_readout( integrator_allocations: list[IntegratorAllocation], averages: int, average_mode: int, - ): + ) -> NodeCollector: enable = acquisition_type in [ AcquisitionType.INTEGRATION, AcquisitionType.DISCRIMINATION, ] channel = awg_key.awg_index - nodes_to_initialize_readout = [] + nc = NodeCollector(base=f"/{self.serial}/") if enable: - nodes_to_initialize_readout.extend( - [ - DaqNodeSetAction( - self._daq, - f"/{self.serial}/qachannels/{channel}/readout/result/length", - awg_config.result_length, - ), - DaqNodeSetAction( - self._daq, - f"/{self.serial}/qachannels/{channel}/readout/result/averages", - averages, - ), - DaqNodeSetAction( - self._daq, - f"/{self.serial}/qachannels/{channel}/readout/result/source", - # 1 - result_of_integration - # 3 - result_of_discrimination - 3 if acquisition_type == AcquisitionType.DISCRIMINATION else 1, - ), - DaqNodeSetAction( - self._daq, - f"/{self.serial}/qachannels/{channel}/readout/result/mode", - average_mode, - ), - DaqNodeSetAction( - self._daq, - f"/{self.serial}/qachannels/{channel}/readout/result/enable", - 0, - ), - ] + if averages > MAX_AVERAGES_RESULT_LOGGER: + raise LabOneQControllerException( + f"Number of averages {averages} exceeds the allowed maximum {MAX_AVERAGES_RESULT_LOGGER}" + ) + result_length = awg_config.result_length + if result_length > MAX_RESULT_VECTOR_LENGTH: + raise LabOneQControllerException( + f"Number of distinct readouts {result_length} on device {self.dev_repr}," + f" channel {channel}, exceeds the allowed maximum {MAX_RESULT_VECTOR_LENGTH}" + ) + nc.add(f"qachannels/{channel}/readout/result/length", result_length) + nc.add(f"qachannels/{channel}/readout/result/averages", averages) + nc.add( + f"qachannels/{channel}/readout/result/source", + # 1 - result_of_integration + # 3 - result_of_discrimination + 3 if acquisition_type == AcquisitionType.DISCRIMINATION else 1, ) + nc.add(f"qachannels/{channel}/readout/result/mode", average_mode) + nc.add(f"qachannels/{channel}/readout/result/enable", 0) if acquisition_type in [ AcquisitionType.INTEGRATION, AcquisitionType.DISCRIMINATION, @@ -346,22 +313,12 @@ def _configure_readout( integrator_idx = integrator.channels[0] assert self._integrator_has_consistent_msd_num_state(integrator) for state_i, threshold in enumerate(integrator.thresholds): - nodes_to_initialize_readout.append( - DaqNodeSetAction( - self._daq, - f"/{self.serial}/qachannels/{channel}/readout/multistate/qudits/" - f"{integrator_idx}/thresholds/{state_i}/value", - threshold or 0.0, - ) + nc.add( + f"qachannels/{channel}/readout/multistate/qudits/{integrator_idx}/thresholds/{state_i}/value", + threshold or 0.0, ) - nodes_to_initialize_readout.append( - DaqNodeSetAction( - self._daq, - f"/{self.serial}/qachannels/{channel}/readout/result/enable", - 1 if enable else 0, - ) - ) - return nodes_to_initialize_readout + nc.add(f"qachannels/{channel}/readout/result/enable", 1 if enable else 0) + return nc def _configure_spectroscopy( self, @@ -370,191 +327,113 @@ def _configure_spectroscopy( result_length: int, averages: int, average_mode: int, - ): - nodes_to_initialize_spectroscopy = [] + ) -> NodeCollector: + nc = NodeCollector(base=f"/{self.serial}/") if is_spectroscopy(acq_type): - nodes_to_initialize_spectroscopy.extend( - [ - DaqNodeSetAction( - self._daq, - f"/{self.serial}/qachannels/{channel}/spectroscopy/result/length", - result_length, - ), - DaqNodeSetAction( - self._daq, - f"/{self.serial}/qachannels/{channel}/spectroscopy/result/averages", - averages, - ), - DaqNodeSetAction( - self._daq, - f"/{self.serial}/qachannels/{channel}/spectroscopy/result/mode", - average_mode, - ), - DaqNodeSetAction( - self._daq, - f"/{self.serial}/qachannels/{channel}/spectroscopy/psd/enable", - 0, - ), - DaqNodeSetAction( - self._daq, - f"/{self.serial}/qachannels/{channel}/spectroscopy/result/enable", - 0, - ), - ] - ) - + if averages > MAX_AVERAGES_RESULT_LOGGER: + raise LabOneQControllerException( + f"Number of averages {averages} exceeds the allowed maximum {MAX_AVERAGES_RESULT_LOGGER}" + ) + if result_length > MAX_RESULT_VECTOR_LENGTH: + raise LabOneQControllerException( + f"Number of distinct readouts {result_length} on device {self.dev_repr}," + f" channel {channel}, exceeds the allowed maximum {MAX_RESULT_VECTOR_LENGTH}" + ) + nc.add(f"qachannels/{channel}/spectroscopy/result/length", result_length) + nc.add(f"qachannels/{channel}/spectroscopy/result/averages", averages) + nc.add(f"qachannels/{channel}/spectroscopy/result/mode", average_mode) + nc.add(f"qachannels/{channel}/spectroscopy/psd/enable", 0) + nc.add(f"qachannels/{channel}/spectroscopy/result/enable", 0) if acq_type == AcquisitionType.SPECTROSCOPY_PSD: - nodes_to_initialize_spectroscopy.append( - DaqNodeSetAction( - self._daq, - f"/{self.serial}/qachannels/{channel}/spectroscopy/psd/enable", - 1, - ), - ) - - nodes_to_initialize_spectroscopy.append( - DaqNodeSetAction( - self._daq, - f"/{self.serial}/qachannels/{channel}/spectroscopy/result/enable", - 1 if is_spectroscopy(acq_type) else 0, - ) + nc.add(f"qachannels/{channel}/spectroscopy/psd/enable", 1) + nc.add( + f"qachannels/{channel}/spectroscopy/result/enable", + 1 if is_spectroscopy(acq_type) else 0, ) - return nodes_to_initialize_spectroscopy + return nc def _configure_scope( self, enable: bool, channel: int, averages: int, acquire_length: int - ): + ) -> NodeCollector: # TODO(2K): multiple acquire events - nodes_to_initialize_scope = [] + nc = NodeCollector(base=f"/{self.serial}/") if enable: - nodes_to_initialize_scope.extend( - [ - DaqNodeSetAction( - self._daq, f"/{self.serial}/scopes/0/time", 0 - ), # 0 -> 2 GSa/s - DaqNodeSetAction( - self._daq, f"/{self.serial}/scopes/0/averaging/enable", 1 - ), - DaqNodeSetAction( - self._daq, f"/{self.serial}/scopes/0/averaging/count", averages - ), - DaqNodeSetAction( - self._daq, - f"/{self.serial}/scopes/0/channels/{channel}/enable", - 1, - ), - DaqNodeSetAction( - self._daq, - f"/{self.serial}/scopes/0/channels/{channel}/inputselect", - channel, - ), # channelN_signal_input - DaqNodeSetAction( - self._daq, f"/{self.serial}/scopes/0/length", acquire_length - ), - DaqNodeSetAction( - self._daq, f"/{self.serial}/scopes/0/segments/enable", 0 - ), - # TODO(2K): multiple acquire events per monitor - # DaqNodeSetAction(self._daq, f"/{self.serial}/scopes/0/segments/enable", 1), - # DaqNodeSetAction(self._daq, f"/{self.serial}/scopes/0/segments/count", - # measurement.result_length), - # TODO(2K): only one trigger is possible for all channels. Which one to use? - DaqNodeSetAction( - self._daq, - f"/{self.serial}/scopes/0/trigger/channel", - 64 + channel, - ), # channelN_sequencer_monitor0 - # TODO(caglark): HBAR-1779 - DaqNodeSetAction( - self._daq, - f"/{self.serial}/scopes/0/trigger/delay", - SCOPE_DELAY_OFFSET, - ), - DaqNodeSetAction( - self._daq, f"/{self.serial}/scopes/0/trigger/enable", 1 - ), - DaqNodeSetAction(self._daq, f"/{self.serial}/scopes/0/enable", 0), - DaqNodeSetAction( - self._daq, - f"/{self.serial}/scopes/0/single", - 1, - caching_strategy=CachingStrategy.NO_CACHE, - ), - ] - ) - nodes_to_initialize_scope.append( - DaqNodeSetAction( - self._daq, f"/{self.serial}/scopes/0/enable", 1 if enable else 0 - ) - ) - return nodes_to_initialize_scope + if averages > MAX_AVERAGES_SCOPE: + raise LabOneQControllerException( + f"Number of averages {averages} exceeds the allowed maximum {MAX_AVERAGES_SCOPE}" + ) + nc.add("scopes/0/time", 0) # 0 -> 2 GSa/s + nc.add("scopes/0/averaging/enable", 1) + nc.add("scopes/0/averaging/count", averages) + nc.add(f"scopes/0/channels/{channel}/enable", 1) + nc.add( + f"scopes/0/channels/{channel}/inputselect", channel + ) # channelN_signal_input + nc.add("scopes/0/length", acquire_length) + nc.add("scopes/0/segments/enable", 0) + # TODO(2K): multiple acquire events per monitor + # "scopes/0/segments/enable", 1 + # "scopes/0/segments/count", measurement.result_length + # TODO(2K): only one trigger is possible for all channels. Which one to use? + nc.add( + "scopes/0/trigger/channel", 64 + channel + ) # channelN_sequencer_monitor0 + nc.add("scopes/0/trigger/enable", 1) + nc.add("scopes/0/enable", 0) + nc.add("scopes/0/single", 1, cache=False) + nc.add("scopes/0/enable", 1 if enable else 0) + return nc async def collect_execution_nodes( self, with_pipeliner: bool - ) -> list[DaqNodeAction]: + ) -> list[DaqNodeSetAction]: if with_pipeliner: - return self.pipeliner_collect_execution_nodes() - - return [ - DaqNodeSetAction( - self._daq, - f"/{self.serial}/qachannels/{awg_index}/generator/enable", - 1, - caching_strategy=CachingStrategy.NO_CACHE, - ) - for awg_index in self._allocated_awgs - ] + nc = self.pipeliner_collect_execution_nodes() + else: + nc = NodeCollector(base=f"/{self.serial}/") + for awg_index in self._allocated_awgs: + nc.add(f"qachannels/{awg_index}/generator/enable", 1, cache=False) + return await self.maybe_async(nc) - def collect_execution_setup_nodes( + async def collect_execution_setup_nodes( self, with_pipeliner: bool, has_awg_in_use: bool - ) -> list[DaqNodeAction]: + ) -> list[DaqNodeSetAction]: hw_sync = with_pipeliner and has_awg_in_use - nodes = [] + nc = NodeCollector(base=f"/{self.serial}/") if hw_sync and self._emit_trigger: - nodes.append( - DaqNodeSetAction( - self._daq, - f"/{self.serial}/system/internaltrigger/synchronization/enable", - 1, # enable - ) - ) + nc.add("system/internaltrigger/synchronization/enable", 1) # enable if hw_sync and not self._emit_trigger: - nodes.append( - DaqNodeSetAction( - self._daq, - f"/{self.serial}/system/synchronization/source", - 1, # external - ) - ) - return nodes + nc.add("system/synchronization/source", 1) # external + return await self.maybe_async(nc) - def collect_internal_start_execution_nodes(self): + async def collect_internal_start_execution_nodes(self) -> list[DaqNodeSetAction]: + nc = NodeCollector(base=f"/{self.serial}/") if self._emit_trigger: - return [ - DaqNodeSetAction( - self._daq, - f"/{self.serial}/system/internaltrigger/enable" - if self.options.is_qc - else f"/{self.serial}/system/swtriggers/0/single", - 1, - caching_strategy=CachingStrategy.NO_CACHE, - ) - ] - return [] + nc.add( + "system/internaltrigger/enable" + if self.options.is_qc + else "system/swtriggers/0/single", + 1, + cache=False, + ) + return await self.maybe_async(nc) - def conditions_for_execution_ready(self, with_pipeliner: bool) -> dict[str, Any]: + async def conditions_for_execution_ready( + self, with_pipeliner: bool + ) -> dict[str, Any]: if with_pipeliner: - return self.pipeliner_conditions_for_execution_ready() - - # TODO(janl): Not sure whether we need this condition on the SHFQA (including SHFQC) - # as well. The state of the generator enable wasn't always picked up reliably, so we - # only check in cases where we rely on external triggering mechanisms. - return { - f"/{self.serial}/qachannels/{awg_index}/generator/enable": 1 - for awg_index in self._allocated_awgs - } - - def conditions_for_execution_done( + conditions = self.pipeliner_conditions_for_execution_ready() + else: + # TODO(janl): Not sure whether we need this condition on the SHFQA (including SHFQC) + # as well. The state of the generator enable wasn't always picked up reliably, so we + # only check in cases where we rely on external triggering mechanisms. + conditions = { + f"/{self.serial}/qachannels/{awg_index}/generator/enable": 1 + for awg_index in self._allocated_awgs + } + return await self.maybe_async_wait(conditions) + + async def conditions_for_execution_done( self, acquisition_type: AcquisitionType, with_pipeliner: bool ) -> dict[str, Any]: conditions: dict[str, Any] = {} @@ -581,7 +460,7 @@ def conditions_for_execution_done( conditions[ f"/{self.serial}/qachannels/{awg_index}/readout/result/enable" ] = 0 - return conditions + return await self.maybe_async_wait(conditions) def _validate_initialization(self, initialization: Initialization): super()._validate_initialization(initialization) @@ -643,7 +522,7 @@ async def collect_initialization_nodes( ) -> list[DaqNodeSetAction]: _logger.debug("%s: Initializing device...", self.dev_repr) - nodes_to_initialize_output: list[DaqNodeSetAction] = [] + nc = NodeCollector(base=f"/{self.serial}/") outputs = initialization.outputs or [] for output in outputs: @@ -656,48 +535,28 @@ async def collect_initialization_nodes( output.gains is None, "correction_matrix", output.channel ) self._allocated_awgs.add(output.channel) - nodes_to_initialize_output.append( - DaqNodeSetAction( - self._daq, - f"/{self.serial}/qachannels/{output.channel}/output/on", - 1 if output.enable else 0, - ) - ) + nc.add(f"qachannels/{output.channel}/output/on", 1 if output.enable else 0) if output.range is not None: self._validate_range(output, is_out=True) - nodes_to_initialize_output.append( - DaqNodeSetAction( - self._daq, - f"/{self.serial}/qachannels/{output.channel}/output/range", - output.range, - ) - ) + nc.add(f"qachannels/{output.channel}/output/range", output.range) - nodes_to_initialize_output.append( - DaqNodeSetAction( - self._daq, - f"/{self.serial}/qachannels/{output.channel}/generator/single", - 1, - ) - ) + nc.add(f"qachannels/{output.channel}/generator/single", 1) - nodes_to_initialize_output += [ - DaqNodeSetAction( - self._daq, - f"/{self.serial}/qachannels/{input.channel}/input/rflfpath", + for input in initialization.inputs or []: + nc.add( + f"qachannels/{input.channel}/input/rflfpath", 1 # RF if input.port_mode is None or input.port_mode == PortMode.RF.value else 0, # LF ) - for input in initialization.inputs or [] - ] - return nodes_to_initialize_output + return await self.maybe_async(nc) def collect_prepare_nt_step_nodes( self, attributes: DeviceAttributesView, recipe_data: RecipeData - ) -> list[DaqNodeAction]: - nodes_to_set = super().collect_prepare_nt_step_nodes(attributes, recipe_data) + ) -> NodeCollector: + nc = NodeCollector(base=f"/{self.serial}/") + nc.extend(super().collect_prepare_nt_step_nodes(attributes, recipe_data)) acquisition_type = RtExecutionInfo.get_acquisition_type( recipe_data.rt_execution_infos @@ -708,25 +567,13 @@ def collect_prepare_nt_step_nodes( keys=[(AttributeName.QA_CENTER_FREQ, ch)] ) if synth_cf_updated: - nodes_to_set.append( - DaqNodeSetAction( - self._daq, - f"/{self.serial}/qachannels/{ch}/centerfreq", - synth_cf, - ) - ) + nc.add(f"qachannels/{ch}/centerfreq", synth_cf) [out_amp], out_amp_updated = attributes.resolve( keys=[(AttributeName.QA_OUT_AMPLITUDE, ch)] ) if out_amp_updated: - nodes_to_set.append( - DaqNodeSetAction( - self._daq, - f"/{self.serial}/qachannels/{ch}/oscs/0/gain", - out_amp, - ) - ) + nc.add(f"qachannels/{ch}/oscs/0/gain", out_amp) ( [output_scheduler_port_delay, output_port_delay], @@ -760,18 +607,21 @@ def collect_prepare_nt_step_nodes( ) set_input = input_updated and input_scheduler_port_delay is not None - base_channel_path = f"/{self.serial}/qachannels/{ch}" if is_spectroscopy(acquisition_type): - output_delay_path = f"{base_channel_path}/spectroscopy/envelope/delay" - meas_delay_path = f"{base_channel_path}/spectroscopy/delay" + output_delay_path = f"qachannels/{ch}/spectroscopy/envelope/delay" + meas_delay_path = f"qachannels/{ch}/spectroscopy/delay" measurement_delay += SPECTROSCOPY_DELAY_OFFSET max_generator_delay = DELAY_NODE_SPECTROSCOPY_ENVELOPE_MAX_SAMPLES max_integrator_delay = DELAY_NODE_SPECTROSCOPY_MAX_SAMPLES else: - output_delay_path = f"{base_channel_path}/generator/delay" - meas_delay_path = f"{base_channel_path}/readout/integration/delay" + output_delay_path = f"qachannels/{ch}/generator/delay" + meas_delay_path = f"qachannels/{ch}/readout/integration/delay" measurement_delay += output_delay - measurement_delay += INTEGRATION_DELAY_OFFSET + measurement_delay += ( + INTEGRATION_DELAY_OFFSET + if acquisition_type != AcquisitionType.RAW + else SCOPE_DELAY_OFFSET + ) set_input = set_input or set_output max_generator_delay = DELAY_NODE_GENERATOR_MAX_SAMPLES max_integrator_delay = DELAY_NODE_READOUT_INTEGRATION_MAX_SAMPLES @@ -788,9 +638,7 @@ def collect_prepare_nt_step_nodes( ) / SAMPLE_FREQUENCY_HZ ) - nodes_to_set.append( - DaqNodeSetAction(self._daq, output_delay_path, output_delay_rounded) - ) + nc.add(output_delay_path, output_delay_rounded) if set_input: measurement_delay_rounded = ( @@ -804,19 +652,17 @@ def collect_prepare_nt_step_nodes( ) / SAMPLE_FREQUENCY_HZ ) - nodes_to_set.append( - DaqNodeSetAction( - self._daq, meas_delay_path, measurement_delay_rounded - ) - ) + if acquisition_type == AcquisitionType.RAW: + nc.add("scopes/0/trigger/delay", measurement_delay_rounded) + nc.add(meas_delay_path, measurement_delay_rounded) - return nodes_to_set + return nc def prepare_integration_weights( self, artifacts: CompilerArtifact | dict[int, CompilerArtifact], integrator_allocations: list[IntegratorAllocation], - kernel_ref: str, + kernel_ref: str | None, ) -> IntegrationWeights | None: if isinstance(artifacts, dict): artifacts: ArtifactsCodegen = artifacts[self._device_class] @@ -838,6 +684,7 @@ def prepare_integration_weights( for index, weight_name in enumerate(weight_names): wave_name = weight_name + ".wave" + # Note conjugation here: weight_vector = np.conjugate(get_wave(wave_name, artifacts.waves)) wave_len = len(weight_vector) if wave_len > max_len: @@ -865,25 +712,26 @@ def prepare_upload_binary_wave( awg_index: int, wave_index: int, acquisition_type: AcquisitionType, - ): + ) -> NodeCollector: assert not is_spectroscopy(acquisition_type) or wave_index == 0 - return DaqNodeSetAction( - self._daq, + nc = NodeCollector() + nc.add( f"/{self.serial}/qachannels/{awg_index}/spectroscopy/envelope/wave" if is_spectroscopy(acquisition_type) else f"/{self.serial}/qachannels/{awg_index}/generator/waveforms/{wave_index}/wave", waveform, + cache=False, filename=filename, - caching_strategy=CachingStrategy.NO_CACHE, ) + return nc def prepare_upload_all_binary_waves( self, awg_index, waves: Waveforms, acquisition_type: AcquisitionType, - ): - waves_upload: list[DaqNodeSetAction] = [] + ) -> NodeCollector: + nc = NodeCollector() has_spectroscopy_envelope = False if is_spectroscopy(acquisition_type): if len(waves) > 1: @@ -903,7 +751,7 @@ def prepare_upload_all_binary_waves( f"of {max_len} samples. Ensure measure pulse doesn't " f"exceed {max_pulse_len * 1e6:.3f} us." ) - waves_upload.append( + nc.extend( self.prepare_upload_binary_wave( filename=wave.name, waveform=wave.samples, @@ -924,7 +772,7 @@ def prepare_upload_all_binary_waves( f"of {max_len} samples. Ensure measure pulse doesn't exceed " f"{max_pulse_len * 1e6:.3f} us." ) - waves_upload.append( + nc.extend( self.prepare_upload_binary_wave( filename=wave.name, waveform=wave.samples, @@ -933,27 +781,27 @@ def prepare_upload_all_binary_waves( acquisition_type=acquisition_type, ) ) - waves_upload.append( - DaqNodeSetAction( - self._daq, - f"/{self.serial}/qachannels/{awg_index}/spectroscopy/envelope/enable", - 1 if has_spectroscopy_envelope else 0, - ) + nc.add( + f"/{self.serial}/qachannels/{awg_index}/spectroscopy/envelope/enable", + 1 if has_spectroscopy_envelope else 0, ) - return waves_upload + return nc def prepare_upload_all_integration_weights( self, awg_index, integration_weights: IntegrationWeights - ): - ret_nodes: list[DaqNodeSetAction] = [] - node = node_generator(self._daq, ret_nodes) - base_path = f"/{self.serial}/qachannels/{awg_index}/readout/multistate" + ) -> NodeCollector: + nc = NodeCollector( + base=f"/{self.serial}/qachannels/{awg_index}/readout/multistate/" + ) for iw in integration_weights: - qudit_path = f"{base_path}/qudits/{iw.integration_unit}" - node(f"{qudit_path}/weights/{iw.index}/wave", iw.samples, iw.name) + nc.add( + f"qudits/{iw.integration_unit}/weights/{iw.index}/wave", + iw.samples, + filename=iw.name, + ) - return ret_nodes + return nc def _integrator_has_consistent_msd_num_state( self, integrator_allocation: IntegratorAllocation.Data @@ -980,8 +828,7 @@ def _configure_readout_mode_nodes_multi_state( self, integrator_allocation: IntegratorAllocation, measurement: Measurement, - ): - ret_nodes: list[DaqNodeSetAction] = [] + ) -> NodeCollector: num_states = integrator_allocation.kernel_count + 1 assert self._integrator_has_consistent_msd_num_state(integrator_allocation) @@ -993,22 +840,21 @@ def _configure_readout_mode_nodes_multi_state( integration_unit_index = integrator_allocation.channels[0] # Note: copying this from grimsel_multistate_demo jupyter notebook - base_path = ( - f"/{self.serial}/qachannels/{measurement.channel}/readout/multistate" + nc = NodeCollector( + base=f"/{self.serial}/qachannels/{measurement.channel}/readout/multistate/" ) - qudit_path = f"{base_path}/qudits/{integration_unit_index}" - - node = node_generator(self._daq, ret_nodes) - node(f"{base_path}/enable", 1) - node(f"{base_path}/zsync/packed", 1) - node(f"{qudit_path}/numstates", num_states) - node(f"{qudit_path}/enable", 1, cache=False) - node( + + nc.add("enable", 1) + nc.add("zsync/packed", 1) + qudit_path = f"qudits/{integration_unit_index}" + nc.add(f"{qudit_path}/numstates", num_states) + nc.add(f"{qudit_path}/enable", 1, cache=False) + nc.add( f"{qudit_path}/assignmentvec", calc_theoretical_assignment_vec(num_states - 1), ) - return ret_nodes + return nc def _configure_readout_mode_nodes( self, @@ -1017,16 +863,16 @@ def _configure_readout_mode_nodes( measurement: Measurement | None, device_uid: str, recipe_data: RecipeData, - ): + ) -> NodeCollector: _logger.debug("%s: Setting measurement mode to 'Readout'.", self.dev_repr) assert measurement is not None - nodes_to_set_for_readout_mode: list[DaqNodeSetAction] = [] + nc = NodeCollector( + base=f"/{self.serial}/qachannels/{measurement.channel}/readout/" + ) - base_path = f"/{self.serial}/qachannels/{measurement.channel}/readout" - node = node_generator(self._daq, nodes_to_set_for_readout_mode) - node(f"{base_path}/integration/length", measurement.length) - node(f"{base_path}/multistate/qudits/*/enable", 0, cache=False) + nc.add("integration/length", measurement.length) + nc.add("multistate/qudits/*/enable", 0, cache=False) for integrator_allocation in recipe_data.recipe.integrator_allocations: if ( @@ -1037,46 +883,39 @@ def _configure_readout_mode_nodes( readout_nodes = self._configure_readout_mode_nodes_multi_state( integrator_allocation, measurement ) - nodes_to_set_for_readout_mode.extend(readout_nodes) + nc.extend(readout_nodes) - return nodes_to_set_for_readout_mode + return nc def _configure_spectroscopy_mode_nodes( self, dev_input: IO, measurement: Measurement | None - ): + ) -> NodeCollector: _logger.debug("%s: Setting measurement mode to 'Spectroscopy'.", self.dev_repr) - nodes_to_set_for_spectroscopy_mode = [ - DaqNodeSetAction( - self._daq, - f"/{self.serial}/qachannels/{measurement.channel}/spectroscopy/trigger/channel", - 32 + measurement.channel, - ), - DaqNodeSetAction( - self._daq, - f"/{self.serial}/qachannels/{measurement.channel}/spectroscopy/length", - measurement.length, - ), - ] + nc = NodeCollector(base=f"/{self.serial}/") + nc.add( + f"qachannels/{measurement.channel}/spectroscopy/trigger/channel", + 32 + measurement.channel, + ) + nc.add( + f"qachannels/{measurement.channel}/spectroscopy/length", measurement.length + ) - return nodes_to_set_for_spectroscopy_mode + return nc - def collect_awg_before_upload_nodes( + async def collect_awg_before_upload_nodes( self, initialization: Initialization, recipe_data: RecipeData - ): - nodes_to_initialize_measurement = [] + ) -> list[DaqNodeSetAction]: + nc = NodeCollector(base=f"/{self.serial}/") acquisition_type = RtExecutionInfo.get_acquisition_type( recipe_data.rt_execution_infos ) for measurement in initialization.measurements: - nodes_to_initialize_measurement.append( - DaqNodeSetAction( - self._daq, - f"/{self.serial}/qachannels/{measurement.channel}/mode", - 0 if is_spectroscopy(acquisition_type) else 1, - ) + nc.add( + f"qachannels/{measurement.channel}/mode", + 0 if is_spectroscopy(acquisition_type) else 1, ) dev_input = next( @@ -1096,11 +935,11 @@ def collect_awg_before_upload_nodes( None, ) if is_spectroscopy(acquisition_type): - nodes_to_initialize_measurement.extend( + nc.extend( self._configure_spectroscopy_mode_nodes(dev_input, measurement) ) elif acquisition_type != AcquisitionType.RAW: - nodes_to_initialize_measurement.extend( + nc.extend( self._configure_readout_mode_nodes( dev_input, dev_output, @@ -1109,28 +948,19 @@ def collect_awg_before_upload_nodes( recipe_data, ) ) - return nodes_to_initialize_measurement - def collect_awg_after_upload_nodes(self, initialization: Initialization): - nodes_to_initialize_measurement = [] + return await self.maybe_async(nc) + + async def collect_awg_after_upload_nodes( + self, initialization: Initialization + ) -> list[DaqNodeSetAction]: + nc = NodeCollector(base=f"/{self.serial}/") inputs = initialization.inputs or [] for dev_input in inputs: - nodes_to_initialize_measurement.append( - DaqNodeSetAction( - self._daq, - f"/{self.serial}/qachannels/{dev_input.channel}/input/on", - 1, - ) - ) + nc.add(f"qachannels/{dev_input.channel}/input/on", 1) if dev_input.range is not None: self._validate_range(dev_input, is_out=False) - nodes_to_initialize_measurement.append( - DaqNodeSetAction( - self._daq, - f"/{self.serial}/qachannels/{dev_input.channel}/input/range", - dev_input.range, - ) - ) + nc.add(f"qachannels/{dev_input.channel}/input/range", dev_input.range) for measurement in initialization.measurements: channel = 0 @@ -1141,25 +971,21 @@ def collect_awg_after_upload_nodes(self, initialization: Initialization): if self.options.is_qc else INTERNAL_TRIGGER_CHANNEL ) - nodes_to_initialize_measurement.append( - DaqNodeSetAction( - self._daq, - f"/{self.serial}/qachannels/{measurement.channel}/generator/" - f"auxtriggers/0/channel", - channel, - ) + nc.add( + f"qachannels/{measurement.channel}/generator/auxtriggers/0/channel", + channel, ) - return nodes_to_initialize_measurement + return await self.maybe_async(nc) async def collect_trigger_configuration_nodes( self, initialization: Initialization, recipe_data: RecipeData - ) -> list[DaqNodeAction]: + ) -> list[DaqNodeSetAction]: _logger.debug("Configuring triggers...") self._wait_for_awgs = True self._emit_trigger = False - nodes_to_configure_triggers = [] + nc = NodeCollector(base=f"/{self.serial}/") triggering_mode = initialization.config.triggering_mode @@ -1169,13 +995,8 @@ async def collect_trigger_configuration_nodes( self._wait_for_awgs = False self._emit_trigger = True if self.options.is_qc: - int_trig_base = f"/{self.serial}/system/internaltrigger" - nodes_to_configure_triggers.append( - DaqNodeSetAction(self._daq, f"{int_trig_base}/enable", 0) - ) - nodes_to_configure_triggers.append( - DaqNodeSetAction(self._daq, f"{int_trig_base}/repetitions", 1) - ) + nc.add("system/internaltrigger/enable", 0) + nc.add("system/internaltrigger/repetitions", 1) else: raise LabOneQControllerException( f"Unsupported triggering mode: {triggering_mode} for device type SHFQA." @@ -1184,15 +1005,10 @@ async def collect_trigger_configuration_nodes( for awg_index in ( self._allocated_awgs if len(self._allocated_awgs) > 0 else range(1) ): - markers_base = f"/{self.serial}/qachannels/{awg_index}/markers" src = 32 + awg_index - nodes_to_configure_triggers.append( - DaqNodeSetAction(self._daq, f"{markers_base}/0/source", src), - ) - nodes_to_configure_triggers.append( - DaqNodeSetAction(self._daq, f"{markers_base}/1/source", src), - ) - return nodes_to_configure_triggers + nc.add(f"qachannels/{awg_index}/markers/0/source", src) + nc.add(f"qachannels/{awg_index}/markers/1/source", src) + return await self.maybe_async(nc) async def get_measurement_data( self, @@ -1213,7 +1029,7 @@ async def get_measurement_data( attempts -= 1 # @TODO(andreyk): replace the raw daq reply parsing on site here and hide it # inside Communication class - data_node_query = self._daq.get_raw(result_path) + data_node_query = await self.get_raw(result_path) actual_num_measurement_points = len( data_node_query[result_path][0]["vector"] ) @@ -1233,7 +1049,7 @@ async def get_measurement_data( async def get_input_monitor_data(self, channel: int, num_results: int): result_path_ch = f"/{self.serial}/scopes/0/channels/{channel}/wave" - node_data = self._daq.get_raw(result_path_ch) + node_data = await self.get_raw(result_path_ch) data = node_data[result_path_ch][0]["vector"][0:num_results] return data @@ -1244,15 +1060,7 @@ async def check_results_acquired_status( results_acquired_path = ( f"/{self.serial}/qachannels/{channel}/{unit}/result/acquired" ) - batch_get_results = await self._daq.batch_get( - [ - DaqNodeGetAction( - self._daq, - results_acquired_path, - caching_strategy=CachingStrategy.NO_CACHE, - ) - ] - ) + batch_get_results = await self.get_raw_values(results_acquired_path) actual_results = batch_get_results[results_acquired_path] expected_results = result_length * hw_averages if actual_results != expected_results: @@ -1263,81 +1071,20 @@ async def check_results_acquired_status( f"a loop is too short. Please contact Zurich Instruments." ) - async def collect_reset_nodes(self) -> list[DaqNodeAction]: - reset_nodes = await super().collect_reset_nodes() + async def collect_reset_nodes(self) -> list[DaqNodeSetAction]: + nc = NodeCollector(base=f"/{self.serial}/") # Reset pipeliner first, attempt to set generator enable leads to FW error if pipeliner was enabled. - reset_nodes.extend(await self.pipeliner_reset_nodes()) - reset_nodes.append( - DaqNodeSetAction( - self._daq, - f"/{self.serial}/qachannels/*/generator/enable", - 0, - caching_strategy=CachingStrategy.NO_CACHE, - ) - ) - reset_nodes.append( - DaqNodeSetAction( - self._daq, - f"/{self.serial}/system/synchronization/source", - 0, # internal - caching_strategy=CachingStrategy.NO_CACHE, - ) - ) + nc.extend(self.pipeliner_reset_nodes()) + nc.add("qachannels/*/generator/enable", 0, cache=False) + nc.add("system/synchronization/source", 0, cache=False) # internal if self.options.is_qc: - reset_nodes.append( - DaqNodeSetAction( - self._daq, - f"/{self.serial}/system/internaltrigger/synchronization/enable", - 0, - caching_strategy=CachingStrategy.NO_CACHE, - ) - ) - reset_nodes.append( - DaqNodeSetAction( - self._daq, - f"/{self.serial}/qachannels/*/readout/result/enable", - 0, - caching_strategy=CachingStrategy.NO_CACHE, - ) - ) - reset_nodes.append( - DaqNodeSetAction( - self._daq, - f"/{self.serial}/qachannels/*/spectroscopy/psd/enable", - 0, - caching_strategy=CachingStrategy.NO_CACHE, - ) - ) - reset_nodes.append( - DaqNodeSetAction( - self._daq, - f"/{self.serial}/qachannels/*/spectroscopy/result/enable", - 0, - caching_strategy=CachingStrategy.NO_CACHE, - ) - ) - reset_nodes.append( - DaqNodeSetAction( - self._daq, - f"/{self.serial}/qachannels/*/output/rflfinterlock", - 1, - caching_strategy=CachingStrategy.NO_CACHE, - ) - ) - reset_nodes.append( - DaqNodeSetAction( - self._daq, - f"/{self.serial}/scopes/0/enable", - 0, - caching_strategy=CachingStrategy.NO_CACHE, - ) - ) - reset_nodes.append( - DaqNodeSetAction( - self._daq, - f"/{self.serial}/scopes/0/channels/*/enable", - 0, - caching_strategy=CachingStrategy.NO_CACHE, - ) - ) + nc.add("system/internaltrigger/synchronization/enable", 0, cache=False) + nc.add("qachannels/*/readout/result/enable", 0, cache=False) + nc.add("qachannels/*/spectroscopy/psd/enable", 0, cache=False) + nc.add("qachannels/*/spectroscopy/result/enable", 0, cache=False) + nc.add("qachannels/*/output/rflfinterlock", 1, cache=False) + nc.add("scopes/0/enable", 0, cache=False) + nc.add("scopes/0/channels/*/enable", 0, cache=False) + reset_nodes = await super().collect_reset_nodes() + reset_nodes.extend(await self.maybe_async(nc)) return reset_nodes diff --git a/laboneq/controller/devices/device_shfsg.py b/laboneq/controller/devices/device_shfsg.py index 0927af9..e380979 100644 --- a/laboneq/controller/devices/device_shfsg.py +++ b/laboneq/controller/devices/device_shfsg.py @@ -14,13 +14,12 @@ DeviceAttributesView, ) from laboneq.controller.communication import ( - CachingStrategy, - DaqNodeAction, DaqNodeSetAction, ) from laboneq.controller.devices.awg_pipeliner import AwgPipeliner from laboneq.controller.devices.device_shf_base import DeviceSHFBase from laboneq.controller.devices.device_zi import ( + NodeCollector, SequencerPaths, delay_to_rounded_samples, ) @@ -174,20 +173,14 @@ def _get_next_osc_index( def _make_osc_path(self, channel: int, index: int) -> str: return f"/{self.serial}/sgchannels/{channel}/oscs/{index}/freq" - def disable_outputs( + async def disable_outputs( self, outputs: set[int], invert: bool ) -> list[DaqNodeSetAction]: - channels_to_disable: list[DaqNodeSetAction] = [ - DaqNodeSetAction( - self._daq, - f"/{self.serial}/sgchannels/{ch}/output/on", - 0, - caching_strategy=CachingStrategy.NO_CACHE, - ) - for ch in range(self._outputs) - if (ch in outputs) != invert - ] - return channels_to_disable + nc = NodeCollector(base=f"/{self.serial}/") + for ch in range(self._outputs): + if (ch in outputs) != invert: + nc.add(f"sgchannels/{ch}/output/on", 0, cache=False) + return await self.maybe_async(nc) def _nodes_to_monitor_impl(self) -> list[str]: nodes = super()._nodes_to_monitor_impl() @@ -205,77 +198,58 @@ def clock_source_control_nodes(self) -> list[NodeControlBase]: async def collect_execution_nodes( self, with_pipeliner: bool - ) -> list[DaqNodeAction]: + ) -> list[DaqNodeSetAction]: if with_pipeliner: - return self.pipeliner_collect_execution_nodes() - - return [ - DaqNodeSetAction( - self._daq, - f"/{self.serial}/sgchannels/{awg_index}/awg/enable", - 1, - caching_strategy=CachingStrategy.NO_CACHE, - ) - for awg_index in self._allocated_awgs - ] + nc = self.pipeliner_collect_execution_nodes() + else: + nc = NodeCollector(base=f"/{self.serial}/") + for awg_index in self._allocated_awgs: + nc.add(f"sgchannels/{awg_index}/awg/enable", 1, cache=False) + return await self.maybe_async(nc) - def collect_execution_setup_nodes( + async def collect_execution_setup_nodes( self, with_pipeliner: bool, has_awg_in_use: bool - ) -> list[DaqNodeAction]: + ) -> list[DaqNodeSetAction]: hw_sync = with_pipeliner and has_awg_in_use and not self.is_secondary - nodes = [] + nc = NodeCollector(base=f"/{self.serial}/") if hw_sync and self._emit_trigger: - nodes.append( - DaqNodeSetAction( - self._daq, - f"/{self.serial}/system/internaltrigger/synchronization/enable", - 1, # enable - ) - ) + nc.add("system/internaltrigger/synchronization/enable", 1) # enable if hw_sync and not self._emit_trigger: - nodes.append( - DaqNodeSetAction( - self._daq, - f"/{self.serial}/system/synchronization/source", - 1, # external - ) - ) - return nodes + nc.add("system/synchronization/source", 1) # external + return await self.maybe_async(nc) - def collect_internal_start_execution_nodes(self): + async def collect_internal_start_execution_nodes(self) -> list[DaqNodeSetAction]: + nc = NodeCollector(base=f"/{self.serial}/") if self._emit_trigger: - return [ - DaqNodeSetAction( - self._daq, - f"/{self.serial}/system/internaltrigger/enable", - 1, - caching_strategy=CachingStrategy.NO_CACHE, - ) - ] - return [] + nc.add("system/internaltrigger/enable", 1, cache=False) + return await self.maybe_async(nc) - def conditions_for_execution_ready(self, with_pipeliner: bool) -> dict[str, Any]: + async def conditions_for_execution_ready( + self, with_pipeliner: bool + ) -> dict[str, Any]: if not self._wait_for_awgs: return {} if with_pipeliner: - return self.pipeliner_conditions_for_execution_ready() - - return { - f"/{self.serial}/sgchannels/{awg_index}/awg/enable": 1 - for awg_index in self._allocated_awgs - } + conditions = self.pipeliner_conditions_for_execution_ready() + else: + conditions = { + f"/{self.serial}/sgchannels/{awg_index}/awg/enable": 1 + for awg_index in self._allocated_awgs + } + return await self.maybe_async_wait(conditions) - def conditions_for_execution_done( + async def conditions_for_execution_done( self, acquisition_type: AcquisitionType, with_pipeliner: bool ) -> dict[str, Any]: if with_pipeliner: - return self.pipeliner_conditions_for_execution_done() - - return { - f"/{self.serial}/sgchannels/{awg_index}/awg/enable": 0 - for awg_index in self._allocated_awgs - } + conditions = self.pipeliner_conditions_for_execution_done() + else: + conditions = { + f"/{self.serial}/sgchannels/{awg_index}/awg/enable": 0 + for awg_index in self._allocated_awgs + } + return await self.maybe_async_wait(conditions) def pre_process_attributes( self, @@ -363,55 +337,30 @@ def _collect_output_router_nodes( router_idx: int, amplitude: float | None = None, phase: float | None = None, - ) -> list[DaqNodeAction]: - nodes = [ - DaqNodeSetAction( - self._daq, - f"/{self.serial}/sgchannels/{target}/outputrouter/enable", - 1, - ), - DaqNodeSetAction( - self._daq, - f"/{self.serial}/sgchannels/{target}/outputrouter/routes/{router_idx}/enable", - 1, - ), - DaqNodeSetAction( - self._daq, - f"/{self.serial}/sgchannels/{target}/outputrouter/routes/{router_idx}/source", - source, - ), - ] + ) -> NodeCollector: + nc = NodeCollector(base=f"/{self.serial}/") + nc.add(f"sgchannels/{target}/outputrouter/enable", 1) + nc.add(f"sgchannels/{target}/outputrouter/routes/{router_idx}/enable", 1) + nc.add(f"sgchannels/{target}/outputrouter/routes/{router_idx}/source", source) if amplitude is not None: - nodes.append( - DaqNodeSetAction( - self._daq, - f"/{self.serial}/sgchannels/{target}/outputrouter/routes/{router_idx}/amplitude", - amplitude, - ) + nc.add( + f"sgchannels/{target}/outputrouter/routes/{router_idx}/amplitude", + amplitude, ) if phase is not None: - nodes.append( - DaqNodeSetAction( - self._daq, - f"/{self.serial}/sgchannels/{target}/outputrouter/routes/{router_idx}/phase", - phase * 180 / numpy.pi, - ) + nc.add( + f"sgchannels/{target}/outputrouter/routes/{router_idx}/phase", + phase * 180 / numpy.pi, ) # Turn output router on source channel, device will make sync them internally. if self._is_full_channel(source): - nodes.append( - DaqNodeSetAction( - self._daq, - f"/{self.serial}/sgchannels/{source}/outputrouter/enable", - 1, - ) - ) - return nodes + nc.add(f"sgchannels/{source}/outputrouter/enable", 1) + return nc def _collect_output_router_initialization_nodes( self, outputs: list[IO] - ) -> list[DaqNodeSetAction]: - nodes: list[DaqNodeSetAction] = [] + ) -> NodeCollector: + nc = NodeCollector() active_output_routers: set[int] = set() for output in outputs: if output.routed_outputs and self._has_opt_rtr is False: @@ -424,7 +373,7 @@ def _collect_output_router_initialization_nodes( # We enable the router on both the source and destination channels, so that the delay matches between them. active_output_routers.add(output.channel) active_output_routers.add(route.from_channel) - nodes.extend( + nc.extend( self._collect_output_router_nodes( target=output.channel, source=route.from_channel, @@ -441,13 +390,10 @@ def _collect_output_router_initialization_nodes( if output.routed_outputs: routes_to_disable = [1, 2] [ - nodes.append( - DaqNodeSetAction( - self._daq, - f"/{self.serial}/sgchannels/{output.channel}/outputrouter/routes/{route_disable}/enable", - 0, - caching_strategy=CachingStrategy.NO_CACHE, - ), + nc.add( + f"/{self.serial}/sgchannels/{output.channel}/outputrouter/routes/{route_disable}/enable", + 0, + cache=False, ) for route_disable in routes_to_disable[ len(output.routed_outputs) - 1 : @@ -461,42 +407,26 @@ def _collect_output_router_initialization_nodes( output.channel not in active_output_routers and self._is_full_channel(output.channel) ): - nodes.extend( - [ - DaqNodeSetAction( - self._daq, - f"/{self.serial}/sgchannels/{output.channel}/outputrouter/enable", - 0, - caching_strategy=CachingStrategy.NO_CACHE, - ), - DaqNodeSetAction( - self._daq, - f"/{self.serial}/sgchannels/{output.channel}/outputrouter/routes/*/enable", - 0, - caching_strategy=CachingStrategy.NO_CACHE, - ), - ] + nc.add( + f"/{self.serial}/sgchannels/{output.channel}/outputrouter/enable", + 0, + cache=False, ) - return nodes + nc.add( + f"/{self.serial}/sgchannels/{output.channel}/outputrouter/routes/*/enable", + 0, + cache=False, + ) + return nc - def _collect_configure_oscillator_nodes(self, channel: int, oscillator_idx: int): - return [ - DaqNodeSetAction( - self._daq, - f"/{self.serial}/sgchannels/{channel}/sines/0/oscselect", - oscillator_idx, - ), - DaqNodeSetAction( - self._daq, - f"/{self.serial}/sgchannels/{channel}/sines/0/harmonic", - 1, - ), - DaqNodeSetAction( - self._daq, - f"/{self.serial}/sgchannels/{channel}/sines/0/phaseshift", - 0, - ), - ] + def _collect_configure_oscillator_nodes( + self, channel: int, oscillator_idx: int + ) -> NodeCollector: + nc = NodeCollector(base=f"/{self.serial}/sgchannels/{channel}/sines/0/") + nc.add("oscselect", oscillator_idx) + nc.add("harmonic", 1) + nc.add("phaseshift", 0) + return nc async def collect_initialization_nodes( self, @@ -506,7 +436,7 @@ async def collect_initialization_nodes( ) -> list[DaqNodeSetAction]: _logger.debug("%s: Initializing device...", self.dev_repr) - nodes_to_initialize_output: list[DaqNodeSetAction] = [] + nc = NodeCollector() outputs = initialization.outputs or [] for output in outputs: self._warn_for_unsupported_param( @@ -520,65 +450,37 @@ async def collect_initialization_nodes( self._allocated_awgs.add(output.channel) if self._is_full_channel(output.channel): - nodes_to_initialize_output.append( - DaqNodeSetAction( - self._daq, - f"/{self.serial}/sgchannels/{output.channel}/output/on", - 1 if output.enable else 0, - ) + nc.add( + f"/{self.serial}/sgchannels/{output.channel}/output/on", + 1 if output.enable else 0, ) if output.range is not None: self._validate_range(output) - nodes_to_initialize_output.append( - DaqNodeSetAction( - self._daq, - f"/{self.serial}/sgchannels/{output.channel}/output/range", - output.range, - ) + nc.add( + f"/{self.serial}/sgchannels/{output.channel}/output/range", + output.range, ) - nodes_to_initialize_output.append( - DaqNodeSetAction( - self._daq, - f"/{self.serial}/sgchannels/{output.channel}/awg/single", - 1, - ) - ) + nc.add(f"/{self.serial}/sgchannels/{output.channel}/awg/single", 1) - nodes_to_initialize_output.append( - DaqNodeSetAction( - self._daq, - f"/{self.serial}/sgchannels/{output.channel}/awg/modulation/enable", - 1, - ) + nc.add( + f"/{self.serial}/sgchannels/{output.channel}/awg/modulation/enable", 1 ) if not output.modulation: # We still use the output modulation (`awg/modulation/enable`), but we # set the oscillator to 0 Hz. - nodes_to_initialize_output.extend( - self._collect_configure_oscillator_nodes(output.channel, 0) - ) + nc.extend(self._collect_configure_oscillator_nodes(output.channel, 0)) osc_freq_path = self._make_osc_path(output.channel, 0) - nodes_to_initialize_output.append( - DaqNodeSetAction(self._daq, osc_freq_path, 0.0) - ) + nc.add(osc_freq_path, 0.0) if self._is_full_channel(output.channel): if output.marker_mode is None or output.marker_mode == "TRIGGER": - nodes_to_initialize_output.append( - DaqNodeSetAction( - self.daq, - f"/{self.serial}/sgchannels/{output.channel}/marker/source", - 0, - ) + nc.add( + f"/{self.serial}/sgchannels/{output.channel}/marker/source", 0 ) elif output.marker_mode == "MARKER": - nodes_to_initialize_output.append( - DaqNodeSetAction( - self.daq, - f"/{self.serial}/sgchannels/{output.channel}/marker/source", - 4, - ) + nc.add( + f"/{self.serial}/sgchannels/{output.channel}/marker/source", 4 ) else: raise ValueError( @@ -586,26 +488,15 @@ async def collect_initialization_nodes( ) # set trigger delay to 0 - nodes_to_initialize_output.append( - DaqNodeSetAction( - self.daq, - f"/{self.serial}/sgchannels/{output.channel}/trigger/delay", - 0.0, - ) - ) + nc.add(f"/{self.serial}/sgchannels/{output.channel}/trigger/delay", 0.0) - nodes_to_initialize_output.append( - DaqNodeSetAction( - self._daq, - f"/{self.serial}/sgchannels/{output.channel}/output/rflfpath", - 1 # RF - if output.port_mode is None or output.port_mode == "rf" - else 0, # LF - ) + nc.add( + f"/{self.serial}/sgchannels/{output.channel}/output/rflfpath", + 1 # RF + if output.port_mode is None or output.port_mode == "rf" + else 0, # LF ) - nodes_to_initialize_output.extend( - self._collect_output_router_initialization_nodes(outputs) - ) + nc.extend(self._collect_output_router_initialization_nodes(outputs)) osc_selects = { ch: osc.index for osc in self._allocated_oscs for ch in osc.channels } @@ -614,41 +505,29 @@ async def collect_initialization_nodes( # via the command table, and the oscselect node is ignored. Therefore it can be set to # any oscillator. for ch, osc_idx in osc_selects.items(): - nodes_to_initialize_output.extend( - self._collect_configure_oscillator_nodes(ch, osc_idx) - ) - return nodes_to_initialize_output + nc.extend(self._collect_configure_oscillator_nodes(ch, osc_idx)) + + return await self.maybe_async(nc) def collect_prepare_nt_step_nodes( self, attributes: DeviceAttributesView, recipe_data: RecipeData - ) -> list[DaqNodeAction]: - nodes_to_set = super().collect_prepare_nt_step_nodes(attributes, recipe_data) + ) -> NodeCollector: + nc = NodeCollector(base=f"/{self.serial}/") + nc.extend(super().collect_prepare_nt_step_nodes(attributes, recipe_data)) for synth_idx in set(self._output_to_synth_map): [synth_cf], synth_cf_updated = attributes.resolve( keys=[(AttributeName.SG_SYNTH_CENTER_FREQ, synth_idx)] ) if synth_cf_updated: - nodes_to_set.append( - DaqNodeSetAction( - self._daq, - f"/{self.serial}/synthesizers/{synth_idx}/centerfreq", - synth_cf, - ) - ) + nc.add(f"synthesizers/{synth_idx}/centerfreq", synth_cf) for ch in range(self._outputs): [dig_mixer_cf], dig_mixer_cf_updated = attributes.resolve( keys=[(AttributeName.SG_DIG_MIXER_CENTER_FREQ, ch)] ) if dig_mixer_cf_updated: - nodes_to_set.append( - DaqNodeSetAction( - self._daq, - f"/{self.serial}/sgchannels/{ch}/digitalmixer/centerfreq", - dig_mixer_cf, - ) - ) + nc.add(f"sgchannels/{ch}/digitalmixer/centerfreq", dig_mixer_cf) [scheduler_port_delay, port_delay], updated = attributes.resolve( keys=[ (AttributeName.OUTPUT_SCHEDULER_PORT_DELAY, ch), @@ -669,13 +548,7 @@ def collect_prepare_nt_step_nodes( / SAMPLE_FREQUENCY_HZ ) - nodes_to_set.append( - DaqNodeSetAction( - daq=self.daq, - path=f"/{self.serial}/sgchannels/{ch}/output/delay", - value=output_delay_rounded, - ) - ) + nc.add(f"sgchannels/{ch}/output/delay", output_delay_rounded) for route_idx, (route, ampl, phase) in enumerate( ( [ @@ -704,7 +577,7 @@ def collect_prepare_nt_step_nodes( and route_amplitude is not None or route_phase is not None ): - nodes_to_set.extend( + nc.extend( self._collect_output_router_nodes( target=ch, source=output_router.from_channel, @@ -713,7 +586,7 @@ def collect_prepare_nt_step_nodes( phase=route_phase, ) ) - return nodes_to_set + return nc def prepare_upload_binary_wave( self, @@ -722,23 +595,24 @@ def prepare_upload_binary_wave( awg_index: int, wave_index: int, acquisition_type: AcquisitionType, - ): - return DaqNodeSetAction( - self._daq, + ) -> NodeCollector: + nc = NodeCollector() + nc.add( f"/{self.serial}/sgchannels/{awg_index}/awg/waveform/waves/{wave_index}", waveform, + cache=False, filename=filename, - caching_strategy=CachingStrategy.NO_CACHE, ) + return nc async def collect_trigger_configuration_nodes( self, initialization: Initialization, recipe_data: RecipeData - ) -> list[DaqNodeAction]: + ) -> list[DaqNodeSetAction]: _logger.debug("Configuring triggers...") self._wait_for_awgs = True self._emit_trigger = False - ntc = [] + nc = NodeCollector(base=f"/{self.serial}/") for awg_key, awg_config in recipe_data.awg_configs.items(): if awg_key.device_uid != initialization.device_uid: @@ -749,43 +623,34 @@ async def collect_trigger_configuration_nodes( if awg_config.source_feedback_register == "local" and self.is_secondary: # local feedback - ntc.extend( - [ - ( - f"sgchannels/{awg_key.awg_index}/awg/intfeedback/direct/shift", - awg_config.register_selector_shift, - ), - ( - f"sgchannels/{awg_key.awg_index}/awg/intfeedback/direct/mask", - awg_config.register_selector_bitmask, - ), - ( - f"sgchannels/{awg_key.awg_index}/awg/intfeedback/direct/offset", - awg_config.command_table_match_offset, - ), - ] + nc.add( + f"sgchannels/{awg_key.awg_index}/awg/intfeedback/direct/shift", + awg_config.register_selector_shift, + ) + nc.add( + f"sgchannels/{awg_key.awg_index}/awg/intfeedback/direct/mask", + awg_config.register_selector_bitmask, + ) + nc.add( + f"sgchannels/{awg_key.awg_index}/awg/intfeedback/direct/offset", + awg_config.command_table_match_offset, ) else: # global feedback - ntc.extend( - [ - ( - f"sgchannels/{awg_key.awg_index}/awg/diozsyncswitch", - 1, # ZSync Trigger - ), - ( - f"sgchannels/{awg_key.awg_index}/awg/zsync/register/shift", - awg_config.register_selector_shift, - ), - ( - f"sgchannels/{awg_key.awg_index}/awg/zsync/register/mask", - awg_config.register_selector_bitmask, - ), - ( - f"sgchannels/{awg_key.awg_index}/awg/zsync/register/offset", - awg_config.command_table_match_offset, - ), - ] + nc.add( + f"sgchannels/{awg_key.awg_index}/awg/diozsyncswitch", 1 + ) # ZSync Trigger + nc.add( + f"sgchannels/{awg_key.awg_index}/awg/zsync/register/shift", + awg_config.register_selector_shift, + ) + nc.add( + f"sgchannels/{awg_key.awg_index}/awg/zsync/register/mask", + awg_config.register_selector_bitmask, + ) + nc.add( + f"sgchannels/{awg_key.awg_index}/awg/zsync/register/offset", + awg_config.command_table_match_offset, ) triggering_mode = initialization.config.triggering_mode @@ -800,28 +665,21 @@ async def collect_trigger_configuration_nodes( if not self.is_secondary: # otherwise, the QA will initialize the nodes self._emit_trigger = True - ntc += [ - ("system/internaltrigger/enable", 0), - ("system/internaltrigger/repetitions", 1), - ] + nc.add("system/internaltrigger/enable", 0) + nc.add("system/internaltrigger/repetitions", 1) for awg_index in ( self._allocated_awgs if len(self._allocated_awgs) > 0 else range(1) ): - ntc += [ - # Rise - (f"sgchannels/{awg_index}/awg/auxtriggers/0/slope", 1), - # Internal trigger - (f"sgchannels/{awg_index}/awg/auxtriggers/0/channel", 8), - ] + nc.add(f"sgchannels/{awg_index}/awg/auxtriggers/0/slope", 1) # Rise + nc.add( + f"sgchannels/{awg_index}/awg/auxtriggers/0/channel", 8 + ) # Internal trigger else: raise LabOneQControllerException( f"Unsupported triggering mode: {triggering_mode} for device type SHFSG." ) - nodes_to_configure_triggers = [ - DaqNodeSetAction(self._daq, f"/{self.serial}/{node}", v) for node, v in ntc - ] - return nodes_to_configure_triggers + return await self.maybe_async(nc) def add_command_table_header(self, body: dict) -> dict: return { @@ -833,36 +691,21 @@ def add_command_table_header(self, body: dict) -> dict: def command_table_path(self, awg_index: int) -> str: return f"/{self.serial}/sgchannels/{awg_index}/awg/commandtable/" - async def collect_reset_nodes(self) -> list[DaqNodeAction]: - reset_nodes = await super().collect_reset_nodes() + async def collect_reset_nodes(self) -> list[DaqNodeSetAction]: + nc = NodeCollector(base=f"/{self.serial}/") # Reset pipeliner first, attempt to set AWG enable leads to FW error if pipeliner was enabled. - reset_nodes.extend(await self.pipeliner_reset_nodes()) - reset_nodes.append( - DaqNodeSetAction( - self._daq, - f"/{self.serial}/sgchannels/*/awg/enable", - 0, - caching_strategy=CachingStrategy.NO_CACHE, - ) - ) + nc.extend(self.pipeliner_reset_nodes()) + nc.add("sgchannels/*/awg/enable", 0, cache=False) if not self.is_secondary: - reset_nodes.append( - DaqNodeSetAction( - self._daq, - f"/{self.serial}/system/synchronization/source", - 0, # internal - caching_strategy=CachingStrategy.NO_CACHE, - ) + nc.add( + "system/synchronization/source", + 0, # internal + cache=False, ) if self.options.is_qc: - reset_nodes.append( - DaqNodeSetAction( - self._daq, - f"/{self.serial}/system/internaltrigger/synchronization/enable", - 0, - caching_strategy=CachingStrategy.NO_CACHE, - ) - ) + nc.add("system/internaltrigger/synchronization/enable", 0, cache=False) + reset_nodes = await super().collect_reset_nodes() + reset_nodes.extend(await self.maybe_async(nc)) return reset_nodes def collect_warning_nodes(self) -> list[str]: diff --git a/laboneq/controller/devices/device_uhfqa.py b/laboneq/controller/devices/device_uhfqa.py index c89d02f..a5de5d3 100644 --- a/laboneq/controller/devices/device_uhfqa.py +++ b/laboneq/controller/devices/device_uhfqa.py @@ -13,13 +13,11 @@ DeviceAttributesView, ) from laboneq.controller.communication import ( - CachingStrategy, - DaqNodeAction, - DaqNodeGetAction, DaqNodeSetAction, ) from laboneq.controller.devices.device_zi import ( DeviceZI, + NodeCollector, delay_to_rounded_samples, IntegrationWeights, IntegrationWeightItem, @@ -53,6 +51,9 @@ REFERENCE_CLOCK_SOURCE_INTERNAL = 0 REFERENCE_CLOCK_SOURCE_EXTERNAL = 1 +MAX_AVERAGES_RESULT_LOGGER = 1 << 17 +MAX_AVERAGES_SCOPE = 1 << 15 + class DeviceUHFQA(DeviceZI): def __init__(self, *args, **kwargs): @@ -75,20 +76,14 @@ def _get_next_osc_index( return None return previously_allocated - def disable_outputs( + async def disable_outputs( self, outputs: set[int], invert: bool ) -> list[DaqNodeSetAction]: - channels_to_disable: list[DaqNodeSetAction] = [ - DaqNodeSetAction( - self._daq, - f"/{self.serial}/sigouts/{ch}/on", - 0, - caching_strategy=CachingStrategy.NO_CACHE, - ) - for ch in range(self._channels) - if (ch in outputs) != invert - ] - return channels_to_disable + nc = NodeCollector(base=f"/{self.serial}/") + for ch in range(self._channels): + if (ch in outputs) != invert: + nc.add(f"sigouts/{ch}/on", 0, cache=False) + return await self.maybe_async(nc) def _nodes_to_monitor_impl(self) -> list[str]: nodes = super()._nodes_to_monitor_impl() @@ -139,7 +134,7 @@ def load_factory_preset_control_nodes(self) -> list[NodeControlBase]: Response(f"/{self.serial}/system/preset/busy", 0), ] - def configure_acquisition( + async def configure_acquisition( self, awg_key: AwgKey, awg_config: AwgConfig, @@ -147,23 +142,26 @@ def configure_acquisition( averages: int, averaging_mode: AveragingMode, acquisition_type: AcquisitionType, - ) -> list[DaqNodeAction]: - nodes = [ - *self._configure_result_logger( + ) -> list[DaqNodeSetAction]: + nc = NodeCollector() + nc.extend( + self._configure_result_logger( awg_key, awg_config, integrator_allocations, averages, averaging_mode, acquisition_type, - ), - *self._configure_input_monitor( + ) + ) + nc.extend( + self._configure_input_monitor( enable=acquisition_type == AcquisitionType.RAW, averages=averages, acquire_length=awg_config.raw_acquire_length, - ), - ] - return nodes + ) + ) + return await self.maybe_async(nc) def _configure_result_logger( self, @@ -173,103 +171,67 @@ def _configure_result_logger( averages: int, averaging_mode: AveragingMode, acquisition_type: AcquisitionType, - ): - nodes_to_initialize_result_acquisition = [] - + ) -> NodeCollector: + nc = NodeCollector(base=f"/{self.serial}/") enable = acquisition_type != AcquisitionType.RAW if enable: - nodes_to_initialize_result_acquisition.extend( - [ - DaqNodeSetAction( - self._daq, - f"/{self.serial}/qas/0/result/length", - awg_config.result_length, - ), - DaqNodeSetAction( - self._daq, f"/{self.serial}/qas/0/result/averages", averages - ), - DaqNodeSetAction( - self._daq, - f"/{self.serial}/qas/0/result/mode", - 0 if averaging_mode == AveragingMode.CYCLIC else 1, - ), - DaqNodeSetAction( - self._daq, - f"/{self.serial}/qas/0/result/source", - # 1 == result source 'threshold' - # 2 == result source 'rotation' - 1 if acquisition_type == AcquisitionType.DISCRIMINATION else 2, - ), - DaqNodeSetAction( - self._daq, f"/{self.serial}/qas/0/result/enable", 0 - ), - DaqNodeSetAction( - self._daq, - f"/{self.serial}/qas/0/result/reset", - 1, - caching_strategy=CachingStrategy.NO_CACHE, - ), - ] - ) - + if averaging_mode != AveragingMode.SINGLE_SHOT: + if averages > MAX_AVERAGES_RESULT_LOGGER: + raise LabOneQControllerException( + f"Number of averages {averages} exceeds the allowed maximum {MAX_AVERAGES_RESULT_LOGGER}" + ) + if averages & (averages - 1): + raise LabOneQControllerException( + f"Number of averages {averages} must be a power of 2" + ) + nc.add("qas/0/result/length", awg_config.result_length) + nc.add("qas/0/result/averages", averages) + nc.add( + "qas/0/result/mode", 0 if averaging_mode == AveragingMode.CYCLIC else 1 + ) + nc.add( + "qas/0/result/source", + # 1 == result source 'threshold' + # 2 == result source 'rotation' + 1 if acquisition_type == AcquisitionType.DISCRIMINATION else 2, + ) + nc.add("qas/0/result/enable", 0) + nc.add("qas/0/result/reset", 1, cache=False) _logger.debug("Turning %s result logger...", "on" if enable else "off") - nodes_to_initialize_result_acquisition.append( - DaqNodeSetAction( - self._daq, f"/{self.serial}/qas/0/result/enable", 1 if enable else 0 - ) - ) - - return nodes_to_initialize_result_acquisition + nc.add("qas/0/result/enable", 1 if enable else 0) + return nc def _configure_input_monitor( self, enable: bool, averages: int, acquire_length: int - ): - nodes_to_initialize_input_monitor = [] - + ) -> NodeCollector: + nc = NodeCollector(base=f"/{self.serial}/") if enable: - nodes_to_initialize_input_monitor.extend( - [ - DaqNodeSetAction( - self._daq, - f"/{self.serial}/qas/0/monitor/length", - acquire_length, - ), - DaqNodeSetAction( - self._daq, f"/{self.serial}/qas/0/monitor/averages", averages - ), - DaqNodeSetAction( - self._daq, f"/{self.serial}/qas/0/monitor/enable", 0 - ), - DaqNodeSetAction( - self._daq, - f"/{self.serial}/qas/0/monitor/reset", - 1, - caching_strategy=CachingStrategy.NO_CACHE, - ), - ] - ) - - nodes_to_initialize_input_monitor.append( - DaqNodeSetAction( - self._daq, f"/{self.serial}/qas/0/monitor/enable", 1 if enable else 0 - ) - ) - - return nodes_to_initialize_input_monitor - - def conditions_for_execution_ready(self, with_pipeliner: bool) -> dict[str, Any]: + if averages > MAX_AVERAGES_SCOPE: + raise LabOneQControllerException( + f"Number of averages {averages} exceeds the allowed maximum {MAX_AVERAGES_SCOPE}" + ) + nc.add("qas/0/monitor/length", acquire_length) + nc.add("qas/0/monitor/averages", averages) + nc.add("qas/0/monitor/enable", 0) + nc.add("qas/0/monitor/reset", 1, cache=False) + nc.add("qas/0/monitor/enable", 1 if enable else 0) + return nc + + async def conditions_for_execution_ready( + self, with_pipeliner: bool + ) -> dict[str, Any]: conditions: dict[str, Any] = {} for awg_index in self._allocated_awgs: conditions[f"/{self.serial}/awgs/{awg_index}/enable"] = 1 - return conditions + return await self.maybe_async_wait(conditions) - def conditions_for_execution_done( + async def conditions_for_execution_done( self, acquisition_type: AcquisitionType, with_pipeliner: bool ) -> dict[str, Any]: conditions: dict[str, Any] = {} for awg_index in self._allocated_awgs: conditions[f"/{self.serial}/awgs/{awg_index}/enable"] = 0 - return conditions + return await self.maybe_async_wait(conditions) def _validate_range(self, io: IO, is_out: bool): if io.range is None: @@ -320,7 +282,7 @@ async def collect_initialization_nodes( ) -> list[DaqNodeSetAction]: _logger.debug("%s: Initializing device...", self.dev_repr) - nodes_to_initialize_output: list[DaqNodeSetAction] = [] + nc = NodeCollector(base=f"/{self.serial}/") outputs = initialization.outputs or [] for output in outputs: @@ -331,65 +293,33 @@ async def collect_initialization_nodes( awg_idx = output.channel // 2 self._allocated_awgs.add(awg_idx) - nodes_to_initialize_output.append( - DaqNodeSetAction( - self._daq, - f"/{self.serial}/sigouts/{output.channel}/on", - 1 if output.enable else 0, - ) - ) + nc.add(f"sigouts/{output.channel}/on", 1 if output.enable else 0) if output.enable: - nodes_to_initialize_output.append( - DaqNodeSetAction( - self._daq, f"/{self.serial}/sigouts/{output.channel}/imp50", 1 - ) - ) - nodes_to_initialize_output.append( - DaqNodeSetAction( - self._daq, - f"/{self.serial}/sigouts/{output.channel}/offset", - output.offset, - ) - ) + nc.add(f"sigouts/{output.channel}/imp50", 1) + nc.add(f"sigouts/{output.channel}/offset", output.offset) - nodes_to_initialize_output.append( - DaqNodeSetAction(self._daq, f"/{self.serial}/awgs/{awg_idx}/single", 1) - ) + nc.add(f"awgs/{awg_idx}/single", 1) # the following is needed so that in spectroscopy mode, pulse lengths are correct # TODO(2K): Why 2 enables per sigout, but only one is used? - nodes_to_initialize_output.append( - DaqNodeSetAction( - self._daq, - f"/{self.serial}/sigouts/{output.channel}/enables/{output.channel}", - 1, - ) - ) + nc.add(f"sigouts/{output.channel}/enables/{output.channel}", 1) - nodes_to_initialize_output.append( - DaqNodeSetAction( - self._daq, - f"/{self.serial}/awgs/{awg_idx}/outputs/{output.channel}/mode", - 1 if output.modulation else 0, - ) + nc.add( + f"awgs/{awg_idx}/outputs/{output.channel}/mode", + 1 if output.modulation else 0, ) if output.range is not None: self._validate_range(output, is_out=True) - nodes_to_initialize_output.append( - DaqNodeSetAction( - self._daq, - f"/{self.serial}/sigouts/{output.channel}/range", - output.range, - ) - ) + nc.add(f"sigouts/{output.channel}/range", output.range) - return nodes_to_initialize_output + return await self.maybe_async(nc) def collect_prepare_nt_step_nodes( self, attributes: DeviceAttributesView, recipe_data: RecipeData - ) -> list[DaqNodeAction]: - nodes_to_set = super().collect_prepare_nt_step_nodes(attributes, recipe_data) + ) -> NodeCollector: + nc = NodeCollector(base=f"/{self.serial}/") + nc.extend(super().collect_prepare_nt_step_nodes(attributes, recipe_data)) for ch in range(self._channels): [scheduler_port_delay, port_delay], updated = attributes.resolve( @@ -411,15 +341,19 @@ def collect_prepare_nt_step_nodes( max_node_delay_samples=DELAY_NODE_MAX_SAMPLES, ) - nodes_to_set.append( - DaqNodeSetAction( - self._daq, - f"/{self.serial}/qas/0/delay", - measurement_delay_rounded, - ) - ) + nc.add("qas/0/delay", measurement_delay_rounded) + + return nc + + def _choose_wf_collector( + self, elf_nodes: NodeCollector, wf_nodes: NodeCollector + ) -> NodeCollector: + return wf_nodes - return nodes_to_set + def _elf_upload_condition(self, awg_index: int) -> dict[str, Any]: + # UHFQA does not yet support upload of ELF and waveforms in a single transaction. + ready_node = self.get_sequencer_paths(awg_index).ready + return {ready_node: 1} def _adjust_frequency(self, freq): # To make the phase correct on the UHFQA (q leading i channel by 90 degrees) @@ -431,14 +365,12 @@ def _configure_standard_mode_nodes( acquisition_type: AcquisitionType, device_uid: str, recipe_data: RecipeData, - ): + ) -> NodeCollector: _logger.debug("%s: Setting measurement mode to 'Standard'.", self.dev_repr) - nodes_to_set_for_standard_mode = [] + nc = NodeCollector(base=f"/{self.serial}/") - nodes_to_set_for_standard_mode.append( - DaqNodeSetAction(self._daq, f"/{self.serial}/qas/0/integration/mode", 0) - ) + nc.add("qas/0/integration/mode", 0) for integrator_allocation in recipe_data.recipe.integrator_allocations: if integrator_allocation.device_id != device_uid: continue @@ -472,60 +404,35 @@ def _configure_standard_mode_nodes( for integrator, integration_unit_index in enumerate( integrator_allocation.channels ): - nodes_to_set_for_standard_mode.extend( - [ - DaqNodeSetAction( - self._daq, - f"/{self.serial}/qas/0/integration/sources/{integration_unit_index}", - inputs_mapping[integrator], - ), - DaqNodeSetAction( - self._daq, - f"/{self.serial}/qas/0/rotations/{integration_unit_index}", - rotations[integrator], - ), - ] + nc.add( + f"qas/0/integration/sources/{integration_unit_index}", + inputs_mapping[integrator], + ) + nc.add( + f"qas/0/rotations/{integration_unit_index}", rotations[integrator] ) if acquisition_type in [ AcquisitionType.INTEGRATION, AcquisitionType.DISCRIMINATION, ]: - nodes_to_set_for_standard_mode.extend( - [ - DaqNodeSetAction( - self._daq, - f"/{self.serial}/qas/0/thresholds/" - f"{integration_unit_index}/correlation/enable", - 0, - ), - DaqNodeSetAction( - self._daq, - f"/{self.serial}/qas/0/thresholds/{integration_unit_index}/level", - integrator_allocation.thresholds[0] or 0.0, - ), - ] + nc.add( + f"qas/0/thresholds/{integration_unit_index}/correlation/enable", + 0, + ) + nc.add( + f"qas/0/thresholds/{integration_unit_index}/level", + integrator_allocation.thresholds[0] or 0.0, ) - return nodes_to_set_for_standard_mode + return nc - def _configure_spectroscopy_mode_nodes(self): + def _configure_spectroscopy_mode_nodes(self) -> NodeCollector: _logger.debug("%s: Setting measurement mode to 'Spectroscopy'.", self.dev_repr) - nodes_to_set_for_spectroscopy_mode = [] - nodes_to_set_for_spectroscopy_mode.append( - DaqNodeSetAction(self._daq, f"/{self.serial}/qas/0/integration/mode", 1) - ) - - nodes_to_set_for_spectroscopy_mode.append( - DaqNodeSetAction( - self._daq, f"/{self.serial}/qas/0/integration/sources/0", 1 - ) - ) - nodes_to_set_for_spectroscopy_mode.append( - DaqNodeSetAction( - self._daq, f"/{self.serial}/qas/0/integration/sources/1", 0 - ) - ) + nc = NodeCollector(base=f"/{self.serial}/") + nc.add("qas/0/integration/mode", 1) + nc.add("qas/0/integration/sources/0", 1) + nc.add("qas/0/integration/sources/1", 0) # The rotation coefficients in spectroscopy mode have to take into account that I and Q are # swapped between in- and outputs, i.e. the AWG outputs are I = AWG_wave_I * cos, @@ -533,19 +440,15 @@ def _configure_spectroscopy_mode_nodes(self): # see "Complex multiplication in UHFQA": # https://zhinst.atlassian.net/wiki/spaces/~andreac/pages/787742991/Complex+multiplication+in+UHFQA # https://oldwiki.zhinst.com/wiki/display/~andreac/Complex+multiplication+in+UHFQA) - nodes_to_set_for_spectroscopy_mode.append( - DaqNodeSetAction(self._daq, f"/{self.serial}/qas/0/rotations/0", 1 - 1j) - ) - nodes_to_set_for_spectroscopy_mode.append( - DaqNodeSetAction(self._daq, f"/{self.serial}/qas/0/rotations/1", -1 - 1j) - ) - return nodes_to_set_for_spectroscopy_mode + nc.add("qas/0/rotations/0", 1 - 1j) + nc.add("qas/0/rotations/1", -1 - 1j) + return nc def prepare_integration_weights( self, artifacts: CompilerArtifact | dict[int, CompilerArtifact], integrator_allocations: list[IntegratorAllocation], - kernel_ref: str, + kernel_ref: str | None, ) -> IntegrationWeights | None: if isinstance(artifacts, dict): artifacts: ArtifactsCodegen = artifacts[self._device_class] @@ -575,6 +478,7 @@ def prepare_integration_weights( integration_unit=channel, index=index, name=weight_name, + # Note conjugation here: samples=weight_vector_real - 1j * weight_vector_imag, ) ) @@ -583,44 +487,40 @@ def prepare_integration_weights( def prepare_upload_all_integration_weights( self, awg_index, integration_weights: IntegrationWeights - ): - ret_nodes: list[DaqNodeSetAction] = [] + ) -> NodeCollector: + nc = NodeCollector(base=f"/{self.serial}/") for iw in integration_weights: - ret_nodes.extend( - [ - DaqNodeSetAction( - self._daq, - f"/{self.serial}/qas/0/integration/weights/" - f"{iw.integration_unit}/real", - iw.samples.real, - filename=iw.name + "_i.wave", - ), - DaqNodeSetAction( - self._daq, - f"/{self.serial}/qas/0/integration/weights/" - f"{iw.integration_unit}/imag", - iw.samples.imag, - filename=iw.name + "_q.wave", - ), - ] + nc.add( + f"qas/0/integration/weights/{iw.integration_unit}/real", + iw.samples.real, + filename=iw.name + "_i.wave", ) - return ret_nodes + nc.add( + f"qas/0/integration/weights/{iw.integration_unit}/imag", + iw.samples.imag, + filename=iw.name + "_q.wave", + ) + return nc - def collect_awg_before_upload_nodes( + async def collect_awg_before_upload_nodes( self, initialization: Initialization, recipe_data: RecipeData - ): + ) -> list[DaqNodeSetAction]: acquisition_type = RtExecutionInfo.get_acquisition_type( recipe_data.rt_execution_infos ) if acquisition_type == AcquisitionType.SPECTROSCOPY_IQ: - return self._configure_spectroscopy_mode_nodes() + nc = self._configure_spectroscopy_mode_nodes() else: - return self._configure_standard_mode_nodes( + nc = self._configure_standard_mode_nodes( acquisition_type, initialization.device_uid, recipe_data ) - def collect_awg_after_upload_nodes(self, initialization: Initialization): - nodes_to_initialize_measurement = [] + return await self.maybe_async(nc) + + async def collect_awg_after_upload_nodes( + self, initialization: Initialization + ) -> list[DaqNodeSetAction]: + nc = NodeCollector(base=f"/{self.serial}/") inputs = initialization.inputs if len(initialization.measurements) > 0: measurement = initialization.measurements[0] @@ -630,47 +530,22 @@ def collect_awg_after_upload_nodes(self, initialization: Initialization): self.dev_repr, measurement.length, ) - nodes_to_initialize_measurement.append( - DaqNodeSetAction( - self._daq, - f"/{self.serial}/qas/0/integration/length", - measurement.length, - ) - ) + nc.add("qas/0/integration/length", measurement.length) - nodes_to_initialize_measurement.append( - DaqNodeSetAction( - self._daq, f"/{self.serial}/qas/0/integration/trigger/channel", 7 - ) - ) + nc.add("qas/0/integration/trigger/channel", 7) for dev_input in inputs or []: if dev_input.range is None: continue self._validate_range(dev_input, is_out=False) - nodes_to_initialize_measurement.append( - DaqNodeSetAction( - self._daq, - f"/{self.serial}/sigins/{dev_input.channel}/range", - dev_input.range, - ) - ) + nc.add(f"sigins/{dev_input.channel}/range", dev_input.range) - return nodes_to_initialize_measurement + return await self.maybe_async(nc) async def collect_trigger_configuration_nodes( self, initialization: Initialization, recipe_data: RecipeData - ) -> list[DaqNodeAction]: - _logger.debug("Configuring triggers...") - _logger.debug("Configuring strobe index: 16.") - _logger.debug("Configuring strobe slope: 0.") - _logger.debug("Configuring valid polarity: 2.") - _logger.debug("Configuring valid index: 16.") - _logger.debug("Configuring dios mode: 2.") - _logger.debug("Configuring dios drive: 0x3.") - _logger.debug("Configuring dios extclk: 0x2.") - - nodes_to_configure_triggers = [] + ) -> list[DaqNodeSetAction]: + nc = NodeCollector(base=f"/{self.serial}/") # Loop over at least AWG instance to cover the case that the instrument is only used as a # communication proxy. Some of the nodes on the AWG branch are needed to get proper @@ -678,55 +553,29 @@ async def collect_trigger_configuration_nodes( for awg_index in ( self._allocated_awgs if len(self._allocated_awgs) > 0 else range(1) ): - awg_path = f"/{self.serial}/awgs/{awg_index}" - nodes_to_configure_triggers.extend( - [ - DaqNodeSetAction(self._daq, f"{awg_path}/dio/strobe/index", 16), - DaqNodeSetAction(self._daq, f"{awg_path}/dio/strobe/slope", 0), - DaqNodeSetAction(self._daq, f"{awg_path}/dio/valid/polarity", 2), - DaqNodeSetAction(self._daq, f"{awg_path}/dio/valid/index", 16), - ] - ) + nc.add(f"awgs/{awg_index}/dio/strobe/index", 16) + nc.add(f"awgs/{awg_index}/dio/strobe/slope", 0) + nc.add(f"awgs/{awg_index}/dio/valid/polarity", 2) + nc.add(f"awgs/{awg_index}/dio/valid/index", 16) triggering_mode = initialization.config.triggering_mode if triggering_mode == TriggeringMode.DIO_FOLLOWER or triggering_mode is None: - nodes_to_configure_triggers.extend( - [ - DaqNodeSetAction(self._daq, f"/{self.serial}/dios/0/mode", 4), - DaqNodeSetAction(self._daq, f"/{self.serial}/dios/0/drive", 0x3), - DaqNodeSetAction(self._daq, f"/{self.serial}/dios/0/extclk", 0x2), - ] - ) + nc.add("dios/0/mode", 4) + nc.add("dios/0/drive", 0x3) + nc.add("dios/0/extclk", 0x2) elif triggering_mode == TriggeringMode.DESKTOP_DIO_FOLLOWER: - nodes_to_configure_triggers.extend( - [ - DaqNodeSetAction(self._daq, f"/{self.serial}/dios/0/mode", 0), - DaqNodeSetAction(self._daq, f"/{self.serial}/dios/0/drive", 0), - DaqNodeSetAction(self._daq, f"/{self.serial}/dios/0/extclk", 0x2), - DaqNodeSetAction( - self._daq, f"/{self.serial}/awgs/0/auxtriggers/0/channel", 0 - ), - ] - ) - nodes_to_configure_triggers.append( - DaqNodeSetAction( - self._daq, f"/{self.serial}/awgs/0/auxtriggers/0/slope", 1 - ) - ) + nc.add("dios/0/mode", 0) + nc.add("dios/0/drive", 0) + nc.add("dios/0/extclk", 0x2) + nc.add("awgs/0/auxtriggers/0/channel", 0) + nc.add("awgs/0/auxtriggers/0/slope", 1) for trigger_index in (0, 1): - trigger_path = f"/{self.serial}/triggers/out/{trigger_index}" - nodes_to_configure_triggers.extend( - [ - DaqNodeSetAction(self._daq, f"{trigger_path}/delay", 0.0), - DaqNodeSetAction(self._daq, f"{trigger_path}/drive", 1), - DaqNodeSetAction( - self._daq, f"{trigger_path}/source", 32 + trigger_index - ), - ] - ) + nc.add(f"triggers/out/{trigger_index}/delay", 0.0) + nc.add(f"triggers/out/{trigger_index}/drive", 1) + nc.add(f"triggers/out/{trigger_index}/source", 32 + trigger_index) - return nodes_to_configure_triggers + return await self.maybe_async(nc) async def _get_integrator_measurement_data( self, result_index, num_results, averages_divider: int @@ -734,7 +583,7 @@ async def _get_integrator_measurement_data( result_path = f"/{self.serial}/qas/0/result/data/{result_index}/wave" # @TODO(andreyk): replace the raw daq reply parsing on site here and hide it inside # Communication class - data_node_query = self._daq.get_raw(result_path) + data_node_query = await self.get_raw(result_path) assert len(data_node_query[result_path][0]["vector"]) == num_results, ( "number of measurement points returned by daq from device " "'{self.uid}' does not match length of recipe" @@ -770,7 +619,7 @@ async def get_measurement_data( async def get_input_monitor_data(self, channel: int, num_results: int): result_path_ch0 = f"/{self.serial}/qas/0/monitor/inputs/0/wave".lower() result_path_ch1 = f"/{self.serial}/qas/0/monitor/inputs/1/wave".lower() - data = self._daq.get_raw(",".join([result_path_ch0, result_path_ch1])) + data = await self.get_raw(",".join([result_path_ch0, result_path_ch1])) # Truncate returned vectors to the expected length -> hotfix for GCE-681 ch0 = data[result_path_ch0][0]["vector"][0:num_results] ch1 = data[result_path_ch1][0]["vector"][0:num_results] @@ -780,15 +629,7 @@ async def check_results_acquired_status( self, channel, acquisition_type: AcquisitionType, result_length, hw_averages ): results_acquired_path = f"/{self.serial}/qas/0/result/acquired" - batch_get_results = await self._daq.batch_get( - [ - DaqNodeGetAction( - self._daq, - results_acquired_path, - caching_strategy=CachingStrategy.NO_CACHE, - ) - ] - ) + batch_get_results = await self.get_raw_values(results_acquired_path) if batch_get_results[results_acquired_path] != 0: raise LabOneQControllerException( f"The number of measurements executed for device {self.serial} does not match " diff --git a/laboneq/controller/devices/device_utils.py b/laboneq/controller/devices/device_utils.py new file mode 100644 index 0000000..c1c07cc --- /dev/null +++ b/laboneq/controller/devices/device_utils.py @@ -0,0 +1,20 @@ +# Copyright 2023 Zurich Instruments AG +# SPDX-License-Identifier: Apache-2.0 + +from __future__ import annotations +from typing import TYPE_CHECKING, Any + +if TYPE_CHECKING: + from laboneq.controller.devices.device_zi import DeviceQualifier, DeviceZI + + +def calc_dev_type(device_qualifier: DeviceQualifier) -> str: + if device_qualifier.options.is_qc is True: + return "SHFQC" + else: + return device_qualifier.driver + + +def dev_api(device: DeviceZI) -> tuple[Any, str]: + """Temporary helper to unify emulation interface for the async API.""" + return (device._api or device._daq._zi_api_object, device.serial) diff --git a/laboneq/controller/devices/device_zi.py b/laboneq/controller/devices/device_zi.py index 90528e6..b1350c2 100644 --- a/laboneq/controller/devices/device_zi.py +++ b/laboneq/controller/devices/device_zi.py @@ -2,6 +2,7 @@ # SPDX-License-Identifier: Apache-2.0 from __future__ import annotations +from collections import defaultdict import json import logging @@ -10,14 +11,13 @@ from copy import deepcopy from dataclasses import dataclass from enum import Enum -from typing import Any, Iterator, cast +from typing import Any, Iterator from weakref import ReferenceType, ref import numpy as np import zhinst.core import zhinst.utils from numpy import typing as npt -from zhinst.core.errors import CoreError as LabOneCoreError from laboneq.controller.attribute_value_tracker import ( # pylint: disable=E0401 AttributeName, @@ -26,24 +26,32 @@ ) from laboneq.controller.communication import ( CachingStrategy, - DaqNodeAction, - DaqNodeGetAction, DaqNodeSetAction, DaqWrapper, - DaqWrapperDryRun, + map_device_type, ) +from laboneq.controller.devices.device_utils import dev_api +from laboneq.controller.devices.zi_emulator import set_emulation_option from laboneq.controller.devices.zi_node_monitor import NodeControlBase +from laboneq.controller.pipeliner_reload_tracker import PipelinerReloadTracker from laboneq.controller.recipe_processor import ( AwgConfig, AwgKey, DeviceRecipeData, RecipeData, + RtExecutionInfo, ) from laboneq.controller.util import LabOneQControllerException from laboneq.core.types.enums.acquisition_type import AcquisitionType from laboneq.core.types.enums.averaging_mode import AveragingMode +from laboneq.core.utilities.seqc_compile import SeqCCompileItem, seqc_compile_async from laboneq.core.utilities.string_sanitize import string_sanitize -from laboneq.data.recipe import Initialization, IntegratorAllocation, OscillatorParam +from laboneq.data.recipe import ( + Initialization, + IntegratorAllocation, + NtStepKey, + OscillatorParam, +) from laboneq.data.scheduled_experiment import CompilerArtifact _logger = logging.getLogger(__name__) @@ -77,8 +85,8 @@ class DeviceOptions: is_qc: bool | None = False qc_with_qa: bool = False gen2: bool = False - reference_clock_source: str = None - expected_installed_options: str = None + reference_clock_source: str | None = None + expected_installed_options: str | None = None @dataclass @@ -156,6 +164,33 @@ def delay_to_rounded_samples( return delay_rounded +@dataclass +class NodeAction: + path: str + value: Any + cache: bool = True + filename: str | None = None + + +class NodeCollector: + def __init__(self, base: str = ""): + self._base = base + self._nodes: list[NodeAction] = [] + + def add( + self, path: str, value: Any, cache: bool = True, filename: str | None = None + ): + self._nodes.append(NodeAction(self._base + path, value, cache, filename)) + + def extend(self, other: NodeCollector): + for node in other: + self._nodes.append(node) + + def __iter__(self) -> Iterator[NodeAction]: + for node in self._nodes: + yield node + + class DeviceZI: def __init__(self, device_qualifier: DeviceQualifier, daq: DaqWrapper): self._device_qualifier = device_qualifier @@ -165,11 +200,14 @@ def __init__(self, device_qualifier: DeviceQualifier, daq: DaqWrapper): self._daq = daq self._api = None # TODO(2K): Add type labone.Instrument - self.dev_type: str = None + self.dev_type: str | None = None self.dev_opts: list[str] = [] self._connected = False self._allocated_oscs: list[AllocatedOscillator] = [] self._allocated_awgs: set[int] = set() + self._pipeliner_reload_tracker: dict[int, PipelinerReloadTracker] = defaultdict( + PipelinerReloadTracker + ) self._nodes_to_monitor = None self._sampling_rate = None self._device_class = 0x0 @@ -223,14 +261,38 @@ def interface(self): def daq(self): return self._daq - @property - def _daq_dry_run(self) -> DaqWrapperDryRun: - return cast(DaqWrapperDryRun, self._daq) - @property def is_secondary(self) -> bool: return False + def to_daq_actions(self, nodes: NodeCollector) -> list[DaqNodeSetAction]: + return [ + DaqNodeSetAction( + self._daq, + node.path, + node.value, + caching_strategy=CachingStrategy.CACHE + if node.cache + else CachingStrategy.NO_CACHE, + filename=node.filename, + ) + for node in nodes + ] + + async def maybe_async(self, nodes: NodeCollector) -> list[DaqNodeSetAction]: + if self._api is not None: + # await set_parallel( + # self._api, *[(node.path, node.value) for node in nodes()] + # ) + return [] + return self.to_daq_actions(nodes) + + async def maybe_async_wait(self, nodes: dict[str, Any]) -> dict[str, Any]: + if self._api is not None: + # TODO(2K): wait asynchronously + return {} + return nodes + def add_command_table_header(self, body: dict) -> dict: # Stub, implement in sub-class _logger.debug("Command table unavailable on device %s", self.dev_repr) @@ -276,7 +338,7 @@ def _process_dev_opts(self): pass def _get_sequencer_type(self) -> str: - return "auto-detect" + return "auto" def get_sequencer_paths(self, index: int) -> SequencerPaths: return SequencerPaths( @@ -358,16 +420,30 @@ async def collect_initialization_nodes( ) -> list[DaqNodeSetAction]: return [] - # TODO(2K): Routine collecting nodes does not need to be asynchronous - # (caused by PQSC doing batch_set inside). async def collect_trigger_configuration_nodes( self, initialization: Initialization, recipe_data: RecipeData - ) -> list[DaqNodeAction]: + ) -> list[DaqNodeSetAction]: return [] + def _set_emulation_option(self, option: str, value: Any): + set_emulation_option(*dev_api(self), option, value) + def _prepare_emulator(self): - if self.dry_run: - self._daq_dry_run.map_device_type(self.device_qualifier) + if not self.dry_run: + return + + if self._api is None: + map_device_type(self._daq._zi_api_object, self.device_qualifier) + + self._set_emulation_option("dev_type", self.options.dev_type) + if self.options.expected_installed_options is not None: + exp_opts = self.options.expected_installed_options.upper().split("/") + if len(exp_opts) > 0 and exp_opts[0] == "": + exp_opts.pop(0) + if len(exp_opts) > 0: + self._set_emulation_option("features/devtype", exp_opts.pop(0)) + if len(exp_opts) > 0: + self._set_emulation_option("features/options", "\n".join(exp_opts)) async def _connect_to_data_server(self): if self._connected: @@ -386,12 +462,7 @@ async def _connect_to_data_server(self): dev_type_path = f"/{self.serial}/features/devtype" dev_opts_path = f"/{self.serial}/features/options" - dev_traits = await self._daq.batch_get( - [ - DaqNodeGetAction(self._daq, dev_type_path), - DaqNodeGetAction(self._daq, dev_opts_path), - ] - ) + dev_traits = await self.get_raw_values(f"{dev_type_path},{dev_opts_path}") dev_type = dev_traits.get(dev_type_path) dev_opts = dev_traits.get(dev_opts_path) if isinstance(dev_type, str): @@ -413,7 +484,7 @@ def disconnect(self): self._daq.disconnectDevice(self.serial) self._connected = False - def disable_outputs( + async def disable_outputs( self, outputs: set[int], invert: bool ) -> list[DaqNodeSetAction]: """Returns actions to disable the specified outputs for the device. @@ -434,13 +505,13 @@ def shut_down(self): "%s: Turning off signal output (stub, not implemented).", self.dev_repr ) - def on_experiment_end(self): - nodes = [] - return nodes + def on_experiment_end(self) -> NodeCollector: + return NodeCollector() def free_allocations(self): self._allocated_oscs.clear() self._allocated_awgs.clear() + self._pipeliner_reload_tracker.clear() def _nodes_to_monitor_impl(self): nodes = [] @@ -519,10 +590,12 @@ def allocate_osc(self, osc_param: OscillatorParam): ) same_id_osc.channels.add(osc_param.channel) - def configure_feedback(self, recipe_data: RecipeData) -> list[DaqNodeAction]: + async def configure_feedback( + self, recipe_data: RecipeData + ) -> list[DaqNodeSetAction]: return [] - def configure_acquisition( + async def configure_acquisition( self, awg_key: AwgKey, awg_config: AwgConfig, @@ -530,9 +603,15 @@ def configure_acquisition( averages: int, averaging_mode: AveragingMode, acquisition_type: AcquisitionType, - ) -> list[DaqNodeAction]: + ) -> list[DaqNodeSetAction]: return [] + async def get_raw(self, path: str) -> dict[str, Any]: + return self._daq.get_raw(path) + + async def get_raw_values(self, path: str) -> dict[str, Any]: + return {p: v["value"][-1] for p, v in (await self.get_raw(path)).items()} + async def get_measurement_data( self, channel: int, @@ -546,22 +625,24 @@ async def get_measurement_data( async def get_input_monitor_data(self, channel: int, num_results: int): return None # default -> no results available from the device - def conditions_for_execution_ready(self, with_pipeliner: bool) -> dict[str, Any]: + async def conditions_for_execution_ready( + self, with_pipeliner: bool + ) -> dict[str, Any]: return {} - def conditions_for_execution_done( + async def conditions_for_execution_done( self, acquisition_type: AcquisitionType, with_pipeliner: bool ) -> dict[str, Any]: return {} - def collect_execution_setup_nodes( + async def collect_execution_setup_nodes( self, with_pipeliner: bool, has_awg_in_use: bool - ) -> list[DaqNodeAction]: + ) -> list[DaqNodeSetAction]: return [] - def collect_execution_teardown_nodes( + async def collect_execution_teardown_nodes( self, with_pipeliner: bool - ) -> list[DaqNodeAction]: + ) -> list[DaqNodeSetAction]: return [] async def check_results_acquired_status( @@ -574,8 +655,8 @@ def _adjust_frequency(self, freq): def collect_prepare_nt_step_nodes( self, attributes: DeviceAttributesView, recipe_data: RecipeData - ) -> list[DaqNodeAction]: - nodes_to_set: list[DaqNodeAction] = [] + ) -> NodeCollector: + nc = NodeCollector() for osc in self._allocated_oscs: osc_index = recipe_data.oscillator_ids.index(osc.id) [osc_freq], updated = attributes.resolve( @@ -583,15 +664,138 @@ def collect_prepare_nt_step_nodes( ) if updated: osc_freq_adjusted = self._adjust_frequency(osc_freq) - nodes_to_set.extend( - DaqNodeSetAction( - self._daq, - self._make_osc_path(ch, osc.index), - osc_freq_adjusted, + for ch in osc.channels: + nc.add(self._make_osc_path(ch, osc.index), osc_freq_adjusted) + return nc + + def _choose_wf_collector( + self, elf_nodes: NodeCollector, wf_nodes: NodeCollector + ) -> NodeCollector: + return elf_nodes + + def _elf_upload_condition(self, awg_index: int) -> dict[str, Any]: + return {} + + async def prepare_artifacts( + self, + recipe_data: RecipeData, + rt_section_uid: str, + initialization: Initialization, + awg_index: int, + nt_step: NtStepKey, + ) -> tuple[ + DeviceZI, list[DaqNodeSetAction], list[DaqNodeSetAction], dict[str, Any] + ]: + artifacts = recipe_data.scheduled_experiment.artifacts + rt_execution_info = recipe_data.rt_execution_infos[rt_section_uid] + with_pipeliner = rt_execution_info.pipeliner_chunk_count is not None + + if with_pipeliner and not self.has_pipeliner: + raise LabOneQControllerException( + f"{self.dev_repr}: Pipeliner is not supported by the device." + ) + + elf_nodes = NodeCollector() + wf_nodes = NodeCollector() + wf_eff = self._choose_wf_collector(elf_nodes, wf_nodes) + upload_ready_conditions: dict[str, Any] = {} + + if with_pipeliner: + elf_nodes.extend(self.pipeliner_prepare_for_upload(awg_index)) + + for pipeline_chunk in range(rt_execution_info.pipeliner_chunk_count or 1): + effective_nt_step = ( + NtStepKey(indices=tuple([*nt_step.indices, pipeline_chunk])) + if with_pipeliner + else nt_step + ) + rt_exec_step = next( + ( + r + for r in recipe_data.recipe.realtime_execution_init + if r.device_id == initialization.device_uid + and r.awg_id == awg_index + and r.nt_step == effective_nt_step + ), + None, + ) + + if with_pipeliner: + rt_exec_step = self._pipeliner_reload_tracker[awg_index].calc_next_step( + pipeline_chunk=pipeline_chunk, + rt_exec_step=rt_exec_step, + ) + + if rt_exec_step is None: + continue + + seqc_code = self.prepare_seqc( + artifacts, + rt_exec_step.seqc_ref, + ) + waves = self.prepare_waves( + artifacts, + rt_exec_step.wave_indices_ref, + ) + command_table = self.prepare_command_table( + artifacts, + rt_exec_step.wave_indices_ref, + ) + integration_weights = self.prepare_integration_weights( + artifacts, + recipe_data.recipe.integrator_allocations, + rt_exec_step.kernel_indices_ref, + ) + + if seqc_code is not None: + seqc_item = SeqCCompileItem( + dev_type=self.dev_type, + dev_opts=self.dev_opts, + awg_index=awg_index, + sequencer=self._get_sequencer_type(), + sampling_rate=self._sampling_rate, + code=seqc_code, + filename=rt_exec_step.seqc_ref, + ) + await seqc_compile_async(seqc_item) + elf_nodes.extend( + self.prepare_upload_elf( + seqc_item.elf, awg_index, seqc_item.filename ) - for ch in osc.channels ) - return nodes_to_set + upload_ready_conditions.update(self._elf_upload_condition(awg_index)) + + if waves is not None: + acquisition_type = RtExecutionInfo.get_acquisition_type_def( + rt_execution_info + ) + wf_eff.extend( + self.prepare_upload_all_binary_waves( + awg_index, waves, acquisition_type + ) + ) + if command_table is not None: + wf_eff.extend( + self.prepare_upload_command_table(awg_index, command_table) + ) + if integration_weights is not None: + wf_eff.extend( + self.prepare_upload_all_integration_weights( + awg_index, integration_weights + ) + ) + if with_pipeliner: + # For devices with pipeliner, wf_eff == elf_nodes + wf_eff.extend(self.pipeliner_commit(awg_index)) + + if with_pipeliner: + upload_ready_conditions.update(self.pipeliner_ready_conditions(awg_index)) + + elf_nodes_actions = await self.maybe_async(elf_nodes) + upload_ready_conditions = await self.maybe_async_wait(upload_ready_conditions) + wf_nodes_actions = await self.maybe_async(wf_nodes) + + return self, elf_nodes_actions, wf_nodes_actions, upload_ready_conditions @staticmethod def _contains_only_zero_or_one(a): @@ -651,13 +855,6 @@ def _prepare_markers_single(self, waves, sig: str) -> npt.ArrayLike: except StopIteration: pass - try: - marker_samples = next( - (w for w in waves if w["filename"] == f"{sig}_marker2.wave") - )["samples"] - except StopIteration: - pass - if marker_samples is not None: if not self._contains_only_zero_or_one(marker_samples): raise LabOneQControllerException( @@ -716,7 +913,7 @@ def _prepare_wave_complex(self, waves, sig: str) -> tuple[str, npt.ArrayLike]: def prepare_waves( self, artifacts: CompilerArtifact | dict[int, CompilerArtifact], - wave_indices_ref: str, + wave_indices_ref: str | None, ) -> Waveforms | None: if wave_indices_ref is None: return None @@ -748,7 +945,9 @@ def prepare_waves( return bin_waves def prepare_command_table( - self, artifacts: CompilerArtifact | dict[int, CompilerArtifact], ct_ref: str + self, + artifacts: CompilerArtifact | dict[int, CompilerArtifact], + ct_ref: str | None, ) -> dict | None: if ct_ref is None: return None @@ -774,7 +973,9 @@ def prepare_command_table( return self.add_command_table_header(command_table_body) def prepare_seqc( - self, artifacts: CompilerArtifact | dict[int, CompilerArtifact], seqc_ref: str + self, + artifacts: CompilerArtifact | dict[int, CompilerArtifact], + seqc_ref: str | None, ) -> str | None: if seqc_ref is None: return None @@ -814,19 +1015,22 @@ def prepare_integration_weights( self, artifacts: CompilerArtifact | dict[int, CompilerArtifact], integrator_allocations: list[IntegratorAllocation], - kernel_ref: str, + kernel_ref: str | None, ) -> IntegrationWeights | None: pass # implemented in subclasses of QA instruments - def prepare_upload_elf(self, elf: bytes, awg_index: int, filename: str): + def prepare_upload_elf( + self, elf: bytes, awg_index: int, filename: str + ) -> NodeCollector: + nc = NodeCollector() sequencer_paths = self.get_sequencer_paths(awg_index) - return DaqNodeSetAction( - self._daq, + nc.add( sequencer_paths.elf, elf, + cache=False, filename=filename, - caching_strategy=CachingStrategy.NO_CACHE, ) + return nc def prepare_upload_binary_wave( self, @@ -835,90 +1039,57 @@ def prepare_upload_binary_wave( awg_index: int, wave_index: int, acquisition_type: AcquisitionType, - ): - return DaqNodeSetAction( - self._daq, + ) -> NodeCollector: + nc = NodeCollector() + nc.add( f"/{self.serial}/awgs/{awg_index}/waveform/waves/{wave_index}", waveform, + cache=False, filename=filename, - caching_strategy=CachingStrategy.NO_CACHE, ) + return nc def prepare_upload_all_binary_waves( self, awg_index, waves: Waveforms, acquisition_type: AcquisitionType, - ): + ) -> NodeCollector: # Default implementation for "old" devices, override for newer devices - return [ - self.prepare_upload_binary_wave( - filename=wave.name, - waveform=wave.samples, - awg_index=awg_index, - wave_index=wave.index, - acquisition_type=acquisition_type, + nc = NodeCollector() + for wave in waves: + nc.extend( + self.prepare_upload_binary_wave( + filename=wave.name, + waveform=wave.samples, + awg_index=awg_index, + wave_index=wave.index, + acquisition_type=acquisition_type, + ) ) - for wave in waves - ] + return nc - def prepare_upload_command_table(self, awg_index, command_table: dict): + def prepare_upload_command_table( + self, awg_index, command_table: dict + ) -> NodeCollector: command_table_path = self.command_table_path(awg_index) - return DaqNodeSetAction( - self._daq, + nc = NodeCollector() + nc.add( command_table_path + "data", json.dumps(command_table, sort_keys=True), - caching_strategy=CachingStrategy.NO_CACHE, + cache=False, ) + return nc def prepare_upload_all_integration_weights( self, awg_index, integration_weights: IntegrationWeights - ): + ) -> NodeCollector: raise NotImplementedError - def compile_seqc(self, code: str, awg_index: int, filename_hint: str | None = None): - _logger.debug( - "%s: Compiling sequence for AWG #%d...", - self.dev_repr, - awg_index, - ) - sequencer = self._get_sequencer_type() - sequencer = "auto" if sequencer == "auto-detect" else sequencer - - try: - elf, extra = zhinst.core.compile_seqc( - code, - self.dev_type, - options=self.dev_opts, - index=awg_index, - sequencer=sequencer, - filename=filename_hint, - samplerate=self._sampling_rate, - ) - except LabOneCoreError as exc: - raise LabOneQControllerException( - f"{self.dev_repr}: AWG compilation failed.\n{str(exc)}" - ) from None - - compiler_warnings = extra["messages"] - if compiler_warnings: - raise LabOneQControllerException( - f"{self.dev_repr}: AWG compilation succeeded, but there are warnings:\n" - f"{compiler_warnings}" - ) - - _logger.debug( - "%s: Compilation successful on AWG #%d with no warnings.", - self.dev_repr, - awg_index, - ) - - return elf - - def pipeliner_prepare_for_upload(self, index: int) -> list[DaqNodeAction]: + def pipeliner_prepare_for_upload(self, index: int) -> NodeCollector: return [] - def pipeliner_commit(self, index: int) -> list[DaqNodeAction]: + def pipeliner_commit(self, index: int) -> NodeCollector: return [] def pipeliner_ready_conditions(self, index: int) -> dict[str, Any]: @@ -927,33 +1098,31 @@ def pipeliner_ready_conditions(self, index: int) -> dict[str, Any]: def _get_num_awgs(self) -> int: return 0 - async def collect_osc_initialization_nodes(self) -> list[DaqNodeAction]: - nodes_to_initialize_oscs = [] + async def collect_osc_initialization_nodes(self) -> list[DaqNodeSetAction]: + nc = NodeCollector() osc_inits = { self._make_osc_path(ch, osc.index): osc.frequency for osc in self._allocated_oscs for ch in osc.channels } for path, freq in osc_inits.items(): - nodes_to_initialize_oscs.append( - DaqNodeSetAction( - self._daq, path, 0 if freq is None else self._adjust_frequency(freq) - ) - ) - return nodes_to_initialize_oscs + nc.add(path, 0 if freq is None else self._adjust_frequency(freq)) + return await self.maybe_async(nc) - def collect_awg_before_upload_nodes( + async def collect_awg_before_upload_nodes( self, initialization: Initialization, recipe_data: RecipeData - ): + ) -> list[DaqNodeSetAction]: return [] - def collect_awg_after_upload_nodes(self, initialization: Initialization): + async def collect_awg_after_upload_nodes( + self, initialization: Initialization + ) -> list[DaqNodeSetAction]: return [] async def collect_execution_nodes( self, with_pipeliner: bool - ) -> list[DaqNodeAction]: - nodes_to_execute = [] + ) -> list[DaqNodeSetAction]: + nc = NodeCollector(base=f"/{self.serial}/") _logger.debug("%s: Executing AWGS...", self.dev_repr) if self._daq is not None: @@ -961,32 +1130,22 @@ async def collect_execution_nodes( _logger.debug( "%s: Starting AWG #%d sequencer", self.dev_repr, awg_index ) - path = f"/{self.serial}/awgs/{awg_index}/enable" - nodes_to_execute.append( - DaqNodeSetAction( - self._daq, path, 1, caching_strategy=CachingStrategy.NO_CACHE - ) - ) + nc.add(f"awgs/{awg_index}/enable", 1, cache=False) - return nodes_to_execute + return await self.maybe_async(nc) - def collect_internal_start_execution_nodes(self): + async def collect_internal_start_execution_nodes(self) -> list[DaqNodeSetAction]: return [] async def fetch_errors(self): error_node = f"/{self.serial}/raw/error/json/errors" - all_errors = self._daq.get_raw(error_node) + all_errors = await self.get_raw(error_node) return all_errors[error_node] - async def collect_reset_nodes(self) -> list[DaqNodeAction]: - return [ - DaqNodeSetAction( - self._daq, - f"/{self.serial}/raw/error/clear", - 1, - caching_strategy=CachingStrategy.NO_CACHE, - ) - ] + async def collect_reset_nodes(self) -> list[DaqNodeSetAction]: + nc = NodeCollector(base=f"/{self.serial}/") + nc.add("raw/error/clear", 1, cache=False) + return await self.maybe_async(nc) def collect_warning_nodes(self) -> list[str]: return [] diff --git a/laboneq/controller/devices/zi_emulator.py b/laboneq/controller/devices/zi_emulator.py index a91a59d..3f2899c 100644 --- a/laboneq/controller/devices/zi_emulator.py +++ b/laboneq/controller/devices/zi_emulator.py @@ -1088,3 +1088,10 @@ def _delay(delay: float): _delay(remaining) break _delay(delay_till_next_event) + + +def set_emulation_option(api: Any, serial: str, option: str, value: Any): + if isinstance(api, ziDAQServerEmulator): + api.set_option(serial, option, value) + else: + raise AssertionError("Unexpected emulation implementation") diff --git a/laboneq/controller/near_time_runner.py b/laboneq/controller/near_time_runner.py index 1d436b1..b644553 100644 --- a/laboneq/controller/near_time_runner.py +++ b/laboneq/controller/near_time_runner.py @@ -120,8 +120,12 @@ async def rt_entry_handler( nt_step=self.nt_step(), rt_section_uid=uid ) await self.controller._configure_triggers() - nt_sweep_nodes = self.controller._prepare_nt_step(self.sweep_params_tracker) - step_prepare_nodes = self.controller._prepare_rt_execution(rt_section_uid=uid) + nt_sweep_nodes = await self.controller._prepare_nt_step( + self.sweep_params_tracker + ) + step_prepare_nodes = await self.controller._prepare_rt_execution( + rt_section_uid=uid + ) await batch_set([*self.user_set_nodes, *nt_sweep_nodes, *step_prepare_nodes]) self.user_set_nodes.clear() diff --git a/laboneq/controller/pipeliner_reload_tracker.py b/laboneq/controller/pipeliner_reload_tracker.py index 4161e92..764b7d1 100644 --- a/laboneq/controller/pipeliner_reload_tracker.py +++ b/laboneq/controller/pipeliner_reload_tracker.py @@ -1,15 +1,16 @@ # Copyright 2023 Zurich Instruments AG # SPDX-License-Identifier: Apache-2.0 +from __future__ import annotations import copy -from collections import defaultdict -from laboneq.controller.recipe_processor import AwgKey from laboneq.controller.util import LabOneQControllerException from laboneq.data.recipe import RealtimeExecutionInit -def _merge(last: RealtimeExecutionInit, update: RealtimeExecutionInit): +def _merge( + last: RealtimeExecutionInit, update: RealtimeExecutionInit +) -> RealtimeExecutionInit: new = copy.deepcopy(last) if update.seqc_ref is not None: new.seqc_ref = update.seqc_ref @@ -20,16 +21,13 @@ def _merge(last: RealtimeExecutionInit, update: RealtimeExecutionInit): class PipelinerReloadTracker: def __init__(self): - self.last_rt_exec_steps_per_awg: dict[ - AwgKey, list[RealtimeExecutionInit] - ] = defaultdict(list) + self.last_rt_exec_steps: list[RealtimeExecutionInit] = [] def calc_next_step( self, - awg_key: AwgKey, pipeline_chunk: int, - rt_exec_step: RealtimeExecutionInit, - ) -> tuple[RealtimeExecutionInit, str]: + rt_exec_step: RealtimeExecutionInit | None, + ) -> RealtimeExecutionInit: """Constructs the current RT chunk of a pipeline (PL) from recipe data + trace from previous NT steps Assuming similar sequence of pipeliner jobs for each near-time step, and that any potential @@ -51,7 +49,7 @@ def calc_next_step( | 3 | ^ | ^+ | ^ | Update from recipe for a PL step > 1 """ assert pipeline_chunk >= 0 - last_rt_exec_steps = self.last_rt_exec_steps_per_awg[awg_key] + last_rt_exec_steps = self.last_rt_exec_steps if rt_exec_step is None: # No update from recipe if pipeline_chunk < len(last_rt_exec_steps): @@ -86,7 +84,4 @@ def calc_next_step( rt_exec_step = _merge(last_rt_exec_steps[-1], rt_exec_step) last_rt_exec_steps.append(rt_exec_step) - return ( - rt_exec_step, - f"{awg_key.device_uid}_{awg_key.awg_index}_{pipeline_chunk}.seqc", - ) + return rt_exec_step diff --git a/laboneq/controller/recipe_processor.py b/laboneq/controller/recipe_processor.py index eaed4e7..736a5be 100644 --- a/laboneq/controller/recipe_processor.py +++ b/laboneq/controller/recipe_processor.py @@ -329,15 +329,6 @@ def rt_entry_handler( averaging_mode: AveragingMode, acquisition_type: AcquisitionType, ): - if averaging_mode != AveragingMode.SINGLE_SHOT: - max_hw_averages = ( - pow(2, 15) if acquisition_type == AcquisitionType.RAW else pow(2, 17) - ) - if count > max_hw_averages: - raise LabOneQControllerException( - f"Maximum number of hardware averages is {max_hw_averages}, but {count} was given" - ) - self._current_rt_uid = uid self._current_rt_info = self.rt_execution_infos.setdefault( uid, diff --git a/laboneq/core/utilities/seqc_compile.py b/laboneq/core/utilities/seqc_compile.py new file mode 100644 index 0000000..2b4e8ab --- /dev/null +++ b/laboneq/core/utilities/seqc_compile.py @@ -0,0 +1,87 @@ +# Copyright 2023 Zurich Instruments AG +# SPDX-License-Identifier: Apache-2.0 + +from __future__ import annotations +import asyncio +import concurrent.futures +from dataclasses import dataclass +import logging +import os +from typing import Any + +import zhinst.core +from zhinst.core.errors import CoreError as LabOneCoreError + +from laboneq._observability import tracing +from laboneq.core.exceptions.laboneq_exception import LabOneQException + +_logger = logging.getLogger(__name__) + + +@dataclass +class SeqCCompileItem: + dev_type: str + dev_opts: list[str] + awg_index: int + sequencer: str + sampling_rate: float | None + code: str | None = None + filename: str | None = None + elf: bytes | None = None + + +def seqc_compile_one(item: SeqCCompileItem, span: Any): + args = ("compile-awg-thread",) if span is None else ("compile-awg-thread", span) + with tracing.get_tracer().start_span(*args): + if item.code is None: + return + try: + elf, extra = zhinst.core.compile_seqc( + item.code, + item.dev_type, + options=item.dev_opts, + index=item.awg_index, + sequencer=item.sequencer, + filename=item.filename, + samplerate=item.sampling_rate, + ) + except LabOneCoreError as exc: + raise LabOneQException( + f"{item.filename}: AWG compilation failed.\n{str(exc)}" + ) from None + + compiler_warnings = extra["messages"] + if compiler_warnings: + raise LabOneQException( + f"AWG compilation succeeded, but there are warnings:\n" + f"{compiler_warnings}" + ) + + item.elf = elf + + +async def seqc_compile_async(item: SeqCCompileItem): + loop = asyncio.get_event_loop() + await loop.run_in_executor(None, seqc_compile_one, item, None) + + +def awg_compile(awg_data: list[SeqCCompileItem]): + # Compile in parallel: + _logger.debug("Started compilation of AWG programs...") + with tracing.get_tracer().start_span("compile-awg-programs") as awg_span: + max_workers_str = os.environ.get("LABONEQ_AWG_COMPILER_MAX_WORKERS") + max_workers = None if max_workers_str is None else int(max_workers_str) + with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor: + futures = [ + executor.submit(seqc_compile_one, item, awg_span) for item in awg_data + ] + concurrent.futures.wait(futures) + exceptions = [ + future.exception() + for future in futures + if future.exception() is not None + ] + if len(exceptions) > 0: + errors = "\n".join([str(e) for e in exceptions]) + raise LabOneQException(f"Compilation failed.\n{errors}") + _logger.debug("Finished compilation.") diff --git a/laboneq/data/recipe.py b/laboneq/data/recipe.py index b806a49..c13028f 100644 --- a/laboneq/data/recipe.py +++ b/laboneq/data/recipe.py @@ -149,9 +149,9 @@ class AcquireLength: class RealtimeExecutionInit: device_id: str awg_id: int - seqc_ref: str - wave_indices_ref: str - kernel_indices_ref: str + seqc_ref: str | None + wave_indices_ref: str | None + kernel_indices_ref: str | None nt_step: NtStepKey diff --git a/laboneq/dsl/device/device_setup.py b/laboneq/dsl/device/device_setup.py index 9a6801e..3b5c78c 100644 --- a/laboneq/dsl/device/device_setup.py +++ b/laboneq/dsl/device/device_setup.py @@ -142,6 +142,13 @@ def add_connections( raise LabOneQException(str(e)) from e def instrument_by_uid(self, uid: str) -> Instrument | None: + """Get an instrument by its uid. + + Args: + uid (str): UID of the instrument. + Returns: + Instrument with the given UID, or `None` if no such instrument was found. + """ return next((i for i in self.instruments if i.uid == uid), None) def logical_signal_by_uid(self, uid: str) -> LogicalSignal: @@ -321,6 +328,15 @@ def reset_calibration(self, calibration: Calibration | None = None): self.set_calibration(calibration) def list_calibratables(self): + """Load the device setup from a specified file. + + Returns: + calibratables (dict): + Dictionary of calibratable objects within the device setup. + The dictionary keys are the path string of the calibratable, + and the values are again a dictionary with type of the calibratable + and whether it is already set or not. + """ calibratables = dict() for logical_signal_group in self.logical_signal_groups.values(): calibratables = { diff --git a/laboneq/dsl/experiment/call.py b/laboneq/dsl/experiment/call.py index 2812cf4..85e480d 100644 --- a/laboneq/dsl/experiment/call.py +++ b/laboneq/dsl/experiment/call.py @@ -2,6 +2,7 @@ # SPDX-License-Identifier: Apache-2.0 from __future__ import annotations + from dataclasses import dataclass, field from typing import Any, Callable, Dict, Union @@ -21,11 +22,11 @@ class Call(Operation): args: Dict[str, Any] = field(default=None) def __init__(self, func_name: Union[str, Callable], **kwargs): - """Constructor + """Constructor. Args: func_name: Function that should be called. - kwargs: Arguments of the function call. + **kwargs (dict): Arguments of the function call. """ self.func_name = func_name.__name__ if callable(func_name) else func_name self.args = {} diff --git a/laboneq/dsl/session.py b/laboneq/dsl/session.py index 3c4930f..5222cbc 100644 --- a/laboneq/dsl/session.py +++ b/laboneq/dsl/session.py @@ -444,6 +444,18 @@ def submit( def replace_pulse( self, pulse_uid: str | Pulse, pulse_or_array: npt.ArrayLike | Pulse ): + """ + Replaces a specific pulse with new sample data on the device. + + This is useful when called from within a near-time callback, and allows fast + waveform replacement within near-time loops without recompilation of the experiment. + + Args: + pulse_uid: Pulse to replace, can be a Pulse object or the UID of the pulse. + pulse_or_array: + Replacement pulse, can be a Pulse object or array of values. + Needs to have the same length as the pulse it replaces. + """ LabOneQFacade.replace_pulse(self, pulse_uid, pulse_or_array) def get_results(self) -> Results: diff --git a/laboneq/pulse_sheet_viewer/pulse_sheet_viewer.py b/laboneq/pulse_sheet_viewer/pulse_sheet_viewer.py index 2504258..fb285b0 100644 --- a/laboneq/pulse_sheet_viewer/pulse_sheet_viewer.py +++ b/laboneq/pulse_sheet_viewer/pulse_sheet_viewer.py @@ -29,14 +29,56 @@ def _get_html_template(): return template.read_text(encoding="utf-8") +def _fill_maybe_missing_information( + compiled_experiment: CompiledExperiment, max_events_to_publish: int +) -> CompiledExperiment: + if (compiled_experiment.schedule is None) or ( + len(compiled_experiment.schedule["event_list"]) < max_events_to_publish + ): + _logger.info( + "Recompiling the experiment due to missing extra information in the compiled experiment. " + f"Compile with `OUTPUT_EXTRAS=True` and `MAX_EVENTS_TO_PUBLISH={max_events_to_publish}` " + "to bypass this step with a small impact on the compilation time." + ) + dummy_session = SimpleNamespace() + dummy_session.experiment = compiled_experiment.experiment + dummy_session.device_setup = compiled_experiment.device_setup + + compiled_experiment_for_psv = LabOneQFacade.compile( + dummy_session, + _logger, + { + "MAX_EVENTS_TO_PUBLISH": max_events_to_publish, + "OUTPUT_EXTRAS": True, + "LOG_REPORT": False, + }, + ) + compiled_experiment_for_psv.experiment = compiled_experiment.experiment + return compiled_experiment_for_psv + return compiled_experiment + + def interactive_psv( compiled_experiment: CompiledExperiment, inline=True, max_simulation_length: float | None = None, + max_events_to_publish: int = 1000, ): + """Start an interactive pulse sheet viewer. + + Args: + compiled_experiment: The compiled experiment to show. + inline: If `True`, displays the pulse sheet viewer in the Notebook. + max_simulation_length: Displays signals up to this time in seconds. + No signals beyond this time are shown. Default: 10ms. + max_events_to_publish: Number of events to show + """ name = compiled_experiment.experiment.uid + compiled_experiment = _fill_maybe_missing_information( + compiled_experiment, max_events_to_publish + ) html_text = PulseSheetViewer.generate_viewer_html_text( - compiled_experiment.schedule, name, interactive=True + compiled_experiment.scheduled_experiment.schedule, name, interactive=True ) simulation = OutputSimulator( compiled_experiment, @@ -169,35 +211,13 @@ def show_pulse_sheet( """ timestamp = datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S") filename = f"{name}_{timestamp}.html" - - schedule = compiled_experiment.scheduled_experiment.schedule - - if (schedule is None) or (len(schedule["event_list"]) < max_events_to_publish): - _logger.info( - "Recompiling the experiment due to missing extra information in the compiled experiment. " - f"Compile with `OUTPUT_EXTRAS=True` and `MAX_EVENTS_TO_PUBLISH={max_events_to_publish}` " - "to bypass this step with a small impact on the compilation time." - ) - dummy_session = SimpleNamespace() - dummy_session.experiment = compiled_experiment.experiment - dummy_session.device_setup = compiled_experiment.device_setup - - compiled_experiment_for_psv = LabOneQFacade.compile( - dummy_session, - _logger, - { - "MAX_EVENTS_TO_PUBLISH": max_events_to_publish, - "OUTPUT_EXTRAS": True, - "LOG_REPORT": False, - }, - ) - compiled_experiment_for_psv.experiment = compiled_experiment.experiment - compiled_experiment = compiled_experiment_for_psv - - schedule = compiled_experiment.scheduled_experiment.schedule - + compiled_experiment = _fill_maybe_missing_information( + compiled_experiment, max_events_to_publish + ) if not interactive: - PulseSheetViewer.generate_viewer_html_file(schedule, name, filename) + PulseSheetViewer.generate_viewer_html_file( + compiled_experiment.scheduled_experiment.schedule, name, filename + ) try: import IPython.display as ipd diff --git a/requirements-dev.txt b/requirements-dev.txt index 8b669ff..bf4d585 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -30,6 +30,9 @@ ipykernel rustworkx ipython parse +opentelemetry-sdk +opentelemetry-api +cattrs # Packaging test pydeps