From 467bda3d8269a96f05a25b5d380f6600c6d9b527 Mon Sep 17 00:00:00 2001 From: Pol Welter Date: Fri, 9 Feb 2024 12:59:22 +0000 Subject: [PATCH] Release 2.24.0 --- examples/00_reference/14_PRNG_usage.ipynb | 635 +++++++++++++ .../15_Advanced_sweeping_examples.ipynb | 483 ++++++++++ .../01_randomized_benchmarking.ipynb | 374 ++++---- ...2_RandomizedBenchmarking_from_Qiskit.ipynb | 873 ++++++++++++++++++ examples/06_qasm/02_Two_Qubit_RB_Qiskit.ipynb | 621 ------------- laboneq/VERSION.txt | 2 +- .../code_generator/analyze_playback.py | 2 + .../compiler/code_generator/code_generator.py | 41 +- .../code_generator/sampled_event_handler.py | 20 +- .../experiment_access/experiment_dao.py | 10 +- .../feedback_router/feedback_router.py | 26 +- laboneq/compiler/scheduler/match_schedule.py | 6 +- laboneq/compiler/scheduler/preorder_map.py | 45 +- laboneq/compiler/scheduler/schedule_data.py | 5 + laboneq/compiler/scheduler/scheduler.py | 44 +- laboneq/compiler/workflow/compiler.py | 10 +- .../compiler/workflow/neartime_execution.py | 44 +- .../compiler/workflow/realtime_compiler.py | 4 +- laboneq/compiler/workflow/recipe_generator.py | 20 +- laboneq/compiler/workflow/reporter.py | 248 ++--- laboneq/compiler/workflow/rt_linker.py | 6 + .../example_helpers/plotting/plot_helpers.py | 8 +- laboneq/controller/__init__.py | 1 - laboneq/controller/attribute_value_tracker.py | 4 +- laboneq/controller/communication.py | 53 +- laboneq/controller/controller.py | 85 +- laboneq/controller/devices/async_support.py | 17 +- .../controller/devices/device_collection.py | 80 +- laboneq/controller/devices/device_hdawg.py | 2 +- laboneq/controller/devices/device_pqsc.py | 15 - .../devices/device_pretty_printer.py | 3 +- .../controller/devices/device_setup_dao.py | 22 +- laboneq/controller/devices/device_shfppc.py | 60 +- laboneq/controller/devices/device_shfqa.py | 242 +++-- laboneq/controller/devices/device_uhfqa.py | 12 +- laboneq/controller/devices/device_utils.py | 62 +- laboneq/controller/devices/device_zi.py | 167 ++-- laboneq/controller/devices/zi_emulator.py | 304 +++--- laboneq/controller/devices/zi_node_monitor.py | 96 +- laboneq/controller/near_time_runner.py | 18 +- .../controller/pipeliner_reload_tracker.py | 49 +- laboneq/controller/recipe_processor.py | 39 +- laboneq/controller/versioning.py | 18 +- laboneq/data/calibration/__init__.py | 19 +- laboneq/data/compilation_job.py | 17 +- .../data/experiment_description/__init__.py | 1 + laboneq/data/scheduled_experiment.py | 6 +- laboneq/dsl/calibration/__init__.py | 2 +- laboneq/dsl/calibration/amplifier_pump.py | 62 +- laboneq/dsl/calibration/precompensation.py | 2 - laboneq/dsl/experiment/builtins.py | 6 +- laboneq/dsl/experiment/experiment.py | 15 +- laboneq/dsl/experiment/section.py | 26 +- laboneq/dsl/experiment/section_context.py | 4 + laboneq/dsl/laboneq_facade.py | 5 +- laboneq/dsl/parameter.py | 2 +- laboneq/dsl/serialization/serializer.py | 1 + laboneq/dsl/session.py | 12 +- .../legacy_adapters/calibration_converter.py | 15 +- .../converters_experiment_description.py | 3 + .../experiment_info_builder.py | 32 +- laboneq/{controller => }/laboneq_logging.py | 73 ++ laboneq/openqasm3/openqasm3_importer.py | 177 +++- laboneq/simple.py | 21 +- laboneq/simulator/seqc_parser.py | 17 +- pyproject.toml | 9 +- 66 files changed, 3833 insertions(+), 1570 deletions(-) create mode 100644 examples/00_reference/14_PRNG_usage.ipynb create mode 100644 examples/00_reference/15_Advanced_sweeping_examples.ipynb create mode 100644 examples/06_qasm/02_RandomizedBenchmarking_from_Qiskit.ipynb delete mode 100644 examples/06_qasm/02_Two_Qubit_RB_Qiskit.ipynb rename laboneq/{controller => }/laboneq_logging.py (77%) diff --git a/examples/00_reference/14_PRNG_usage.ipynb b/examples/00_reference/14_PRNG_usage.ipynb new file mode 100644 index 0000000..addc4bf --- /dev/null +++ b/examples/00_reference/14_PRNG_usage.ipynb @@ -0,0 +1,635 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "32e40b7136f49820", + "metadata": { + "collapsed": false + }, + "source": [ + "# Using the in-sequencer PRNG with LabOne Q\n", + "\n", + "The PRNG is a peripheral of the sequencer on HDAWG and SHFSG for generating pseudo-random numbers.\n", + "\n", + "The PRNG enables to play a (deterministically) shuffled sequence of pulses or gates, while efficiently using sequencer memory. \n", + "This may be particularly interesting for long randomized benchmarking sequences and similar experiments.\n" + ] + }, + { + "cell_type": "markdown", + "id": "ab861622", + "metadata": {}, + "source": [ + "## General notes on PRNG setup and sampling \n", + "\n", + "In LabOne Q, usage of the PRNG can be invoked via the `setup_prng()`, `prng_loop()` commands. An experiment using the PRNG roughly has the following structure:\n", + "\n", + "```python\n", + "with prng_setup(seed=..., range=...) as prng: # 1.\n", + " with prng_loop(prng=prng, count=...) as prng_sample: # 2.\n", + " ...\n", + " with match(prng_sample=prng_sample): # 3.\n", + " with case(0):\n", + " play(...)\n", + " with case(1):\n", + " play(...)\n", + " ...\n", + "```\n", + "\n", + "The important steps here:\n", + "1. We _seed_ the PRNG and specify its range with `prng_setup()`. We can now draw random numbers from the PRNG, in the range of 0 to `range - 1`.\n", + " \n", + " The seed and range are valid within the scope of the `prng_setup()` block. As there is only a single PRNG available per sequencer, PRNG setups cannot be nested, but we are free to reseed the PRNG again later.\n", + " \n", + "2. The actual sampling of the random numbers happens in `prng_loop()`. This block marks a section that will be executed `count` times, with a new random number drawn each time. The result of the context manager (i.e. the right-hand side of `as`, here `prng_sample`) provides us with a handle to those random numbers.\n", + " \n", + " It may be helpful to think of `prng_sample` as similar to a sweep parameter. Like a sweep parameter, it is representative of the values that the the variable will take during the iterations of the loop. The PRNG sample is also a convenient way to access a simulation of the PRNG values, see below.\n", + " \n", + "3. We use the PRNG sample to branch into one of multiple options. We do this with a `match` block, and providing one `case` for each value the PRNG might emit.\n", + "\n", + "\n", + "## Note on PRNG loop iteration length\n", + "\n", + "If the body of the PRNG loop between subsequent calls to `get_sample()` is too short, the waveform play-back may contain gaps as the sequencer may become unable to issue new waveforms fast enough. \n", + "We recommend to always make the body of the PRNG loop at least 64 samples long. \n" + ] + }, + { + "cell_type": "markdown", + "id": "aaa4bf28", + "metadata": {}, + "source": [ + "## 0. Imports and setup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "initial_id", + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "import numpy as np\n", + "\n", + "# To use the PRNG in LabOne Q DSL, we currently require the exepriment builtins\n", + "from laboneq.contrib.example_helpers.plotting.plot_helpers import plot_simulation\n", + "from laboneq.dsl.experiment import PlayPulse\n", + "from laboneq.dsl.experiment.builtins import *\n", + "from laboneq.simple import *" + ] + }, + { + "cell_type": "markdown", + "id": "77aaaeb8474651df", + "metadata": { + "collapsed": false + }, + "source": [ + "Create a device setup and connect to a session" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7abdb8fc", + "metadata": {}, + "outputs": [], + "source": [ + "device_setup = DeviceSetup(uid=\"my_QCCS\")\n", + "\n", + "device_setup.add_dataserver(host=\"localhost\", port=\"8004\")\n", + "\n", + "device_setup.add_instruments(\n", + " SHFQC(uid=\"device_shfqc\", address=\"dev12345\", device_options=\"SHFQC/QC6CH\")\n", + ")\n", + "\n", + "device_setup.add_connections(\n", + " \"device_shfqc\",\n", + " create_connection(to_signal=\"q0/drive_line\", ports=\"SGCHANNELS/0/OUTPUT\"),\n", + " create_connection(to_signal=\"q0/measure_line\", ports=\"QACHANNELS/0/OUTPUT\"),\n", + " create_connection(to_signal=\"q0/acquire_line\", ports=\"QACHANNELS/0/INPUT\"),\n", + ")\n", + "\n", + "# set a minimal calibration to device setup\n", + "drive_lo = Oscillator(frequency=1e9)\n", + "measure_lo = Oscillator(frequency=4e9)\n", + "cal = Calibration()\n", + "cal[\"/logical_signal_groups/q0/drive_line\"] = SignalCalibration(\n", + " local_oscillator=drive_lo\n", + ")\n", + "cal[\"/logical_signal_groups/q0/measure_line\"] = SignalCalibration(\n", + " local_oscillator=measure_lo\n", + ")\n", + "cal[\"/logical_signal_groups/q0/acquire_line\"] = SignalCalibration(\n", + " local_oscillator=measure_lo\n", + ")\n", + "device_setup.set_calibration(cal)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "45a335ea0ac4ae47", + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "emulate = True\n", + "\n", + "session = Session(device_setup)\n", + "session.connect(do_emulation=emulate)" + ] + }, + { + "cell_type": "markdown", + "id": "12f3c53a", + "metadata": {}, + "source": [ + "\n", + "## 1. Simple example\n", + "\n", + "Let us look at a simple but already complete example." + ] + }, + { + "cell_type": "markdown", + "id": "a29019655d7438b0", + "metadata": { + "collapsed": false + }, + "source": [ + "To keep it simple, we specify `range=4` when setting up the PRNG. This means the PRNG will only produce the numbers 0, 1, 2, and 3. We then play twenty pulses where the amplitude of each pulse is determined by the random number. After these 20 pulses, we read out, and then start over by reseeding the PRNG." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e58a64841b27a96", + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "const_pulse = pulse_library.const(length=100e-9)\n", + "\n", + "# needed to access the generated prng samples after experiment creation\n", + "prng_sample = None\n", + "\n", + "\n", + "@experiment(signals=[\"drive\", \"measure\", \"acquire\"])\n", + "def prng_example1():\n", + " global prng_sample\n", + " with acquire_loop_rt(8):\n", + " with prng_setup(range=4, seed=123, uid=\"prng_setup\") as prng:\n", + " with prng_loop(prng, count=20, uid=\"prng_sample\") as prng_sample:\n", + " with match(prng_sample=prng_sample):\n", + " with case(0):\n", + " play(\"drive\", const_pulse, amplitude=0.2)\n", + " with case(1):\n", + " play(\"drive\", const_pulse, amplitude=0.4)\n", + " with case(2):\n", + " play(\"drive\", const_pulse, amplitude=0.6)\n", + " with case(3):\n", + " play(\"drive\", const_pulse, amplitude=0.8)\n", + "\n", + " with section(uid=\"readout\", play_after=\"prng_setup\"):\n", + " play(\"measure\", const_pulse)\n", + " acquire(signal=\"acquire\", kernel=const_pulse, handle=\"h1\")\n", + "\n", + " delay(\"measure\", 100e-9)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f06b60b0", + "metadata": {}, + "outputs": [], + "source": [ + "exp1 = prng_example1()\n", + "q0_ls = device_setup.logical_signal_groups[\"q0\"].logical_signals\n", + "exp1.map_signal(\"drive\", q0_ls[\"drive_line\"])\n", + "exp1.map_signal(\"measure\", q0_ls[\"measure_line\"])\n", + "exp1.map_signal(\"acquire\", q0_ls[\"acquire_line\"])" + ] + }, + { + "cell_type": "markdown", + "id": "39440c6c0054f51", + "metadata": { + "collapsed": false + }, + "source": [ + "Compile the experiment. We can then inspect the generated seqc code for the HDAWG (`drive` signal) to see the code that drives tge PRNG." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ce1c00a0eada2d97", + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "compiled_exp1 = session.compile(exp1)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b1a7ddb010360ed5", + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "# print the sequencer code generated by the example\n", + "print(compiled_exp1.src[1][\"text\"])" + ] + }, + { + "cell_type": "markdown", + "id": "a38f0f914a33d008", + "metadata": { + "collapsed": false + }, + "source": [ + "Indeed we can see that the inner loop of 20 pulses is a simple `repeat(20) {...}` loop, with the random number used as an pointer into the command table.\n", + "\n", + "### 1.1 Simulation\n", + "\n", + "We can simulate this experiment with the `OutputSimulator`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "34005d9b885a1494", + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "plot_simulation(\n", + " compiled_exp1,\n", + " start_time=0,\n", + " length=1e-3,\n", + " signals=[\"drive\"],\n", + " plot_height=5,\n", + " plot_width=15,\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "6c0058688f2b286f", + "metadata": { + "collapsed": false + }, + "source": [ + "### 1.2 Tracing the values produced by the PRNG\n", + "\n", + "While the output simulator produces an accurate preview of the waveform that the AWG will produce, it'd be tedious to attempt to reconstruct the actual sequence of random numbers from the waveform alone.\n", + "\n", + "Instead, L1Q can directly give us the values that the PRNG will emit as part of the `PRNGSample` produced by `prng_loop`.\n", + "\n", + "We can inspect the `prng_sample.values` from the example above (note how we had to sneak in a `global prng_sample` to exfiltrate the object out of the `@experiment` definition):" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "916c9843584722f0", + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "print(prng_sample.values)" + ] + }, + { + "cell_type": "markdown", + "id": "b00db0cad11fb93d", + "metadata": {}, + "source": [ + "### 1.3 Syntactic sugar: `play_indexed()`\n", + "\n", + "If you want to distinguish many different options for the random variable (e.g. 28 Clifford gates for a simple RB experiment, or even more), the `match...case` notation becomes overly verbose. \n", + "LabOne Q provides a helper function that allows you to more concisely specify a list of pulses.\n", + "The command `playIndexed(pulses, index)` takes an iterable (e.g. a list) of pulses, and plays one of them based on `index: PRNGSample`.\n", + "\n", + "The argument of `pulses` needs to contain instances of `PlayPulse`, this may be extended to complete sections in the future. \n", + "It is also currently not possible to play more than a single pulse per branch when using `play_indexed()`.\n", + "\n", + "The earlier example looked like so:\n", + "```python\n", + " with prng_loop(prng, count=20, uid=\"prng_sample\") as prng_sample:\n", + " with match(prng_sample=prng_sample):\n", + " with case(0):\n", + " play(\"drive\", pulse, amplitude=0.2)\n", + " with case(1):\n", + " play(\"drive\", pulse, amplitude=0.4)\n", + " with case(2):\n", + " play(\"drive\", pulse, amplitude=0.6)\n", + " with case(3):\n", + " play(\"drive\", pulse, amplitude=0.8)\n", + "```\n", + "\n", + "We can rewrite it using `play_indexed()`:\n", + "```python\n", + " pulses = [\n", + " PlayPulse(signal=\"drive\", pulse=pulse, amplitude=a)\n", + " for a in [0.2, 0.4, 0.6, 0.8]\n", + " ]\n", + " with prng_loop(prng, count=20, uid=\"prng_sample\") as prng_sample:\n", + " play_indexed(pulses, prng_samples)\n", + "```\n", + "\n", + "Note that `play_indexed()` is purely for convenience, it calls `match()` internally, and both snippets yield the same experiment object.\n" + ] + }, + { + "cell_type": "markdown", + "id": "465a66c855da253a", + "metadata": { + "collapsed": false + }, + "source": [ + "## 2. Measurements inside PRNG loop\n", + "\n", + "Measuring a qubit inside `prng_loop()` is of course allowed. In this case, the results object will contain an extra dimension, labelled with the `PRNGSample`, just as it would have if the PRNG loop was instead a sweep over some parameter." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "242ede48f6b3b4ab", + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "pulses = [\n", + " PlayPulse(signal=\"drive\", pulse=const_pulse, amplitude=a)\n", + " for a in [0.2, 0.4, 0.6, 0.8]\n", + "]\n", + "\n", + "\n", + "@experiment(signals=[\"drive\", \"measure\", \"acquire\"])\n", + "def prng_example():\n", + " with acquire_loop_rt(4):\n", + " # We add a 'dummy' sweep here, to illustrate how sweeps compose with the PRNG.\n", + " with sweep_range(0, 1, count=5, axis_name=\"sweep_param\"):\n", + " # Seed the PRNG\n", + " with prng_setup(range=4, seed=0xABCD, uid=\"seed_prng\") as prng:\n", + " # Draw values from the PRNG in a loop\n", + " with prng_loop(prng, 35, uid=\"prng_loop\") as prng_sample:\n", + " # 'match' the PRNG sample to choose a pulse to play\n", + " play_indexed(pulses, prng_sample)\n", + "\n", + " # Readout _inside_ the PRNG loop\n", + " with section():\n", + " reserve(\"drive\")\n", + " play(\"measure\", const_pulse)\n", + " acquire(\"acquire\", kernel=const_pulse, handle=\"h1\")\n", + " delay(\"measure\", 100e-9)\n", + "\n", + "\n", + "exp = prng_example()\n", + "q0_ls = device_setup.logical_signal_groups[\"q0\"].logical_signals\n", + "exp.map_signal(\"drive\", q0_ls[\"drive_line\"])\n", + "exp.map_signal(\"measure\", q0_ls[\"measure_line\"])\n", + "exp.map_signal(\"acquire\", q0_ls[\"acquire_line\"])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "268e2876", + "metadata": {}, + "outputs": [], + "source": [ + "compiled = session.compile(exp)\n", + "\n", + "results = session.run(compiled)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b0c6b503", + "metadata": {}, + "outputs": [], + "source": [ + "acquired_results = results.acquired_results[\"h1\"]\n", + "print(f\"Result shape: {acquired_results.data.shape}\")\n", + "print(f\"Result axes: {acquired_results.axis_name}\")\n", + "print(\"Result coordinates:\")\n", + "for name, coords in zip(acquired_results.axis_name, acquired_results.axis):\n", + " print(f\" {name}: {coords}\")" + ] + }, + { + "cell_type": "markdown", + "id": "b96904c52eeeda3f", + "metadata": { + "collapsed": false + }, + "source": [ + "## 3. Advanced examples\n" + ] + }, + { + "cell_type": "markdown", + "id": "43dca27c", + "metadata": {}, + "source": [ + "### 3.1 Reseeding the PRNG\n", + "\n", + "Reseeding the PRNG is allowed. For example, consider this DSL snippet.\n", + "\n", + "```python\n", + "# seed the PRNG with the value 0xCAFE, with a max. value of 9\n", + "with prng_setup(seed=0xCAFE, range=10) as prng:\n", + " with prng_loop(prng=prng, count=...) as prng_sample:\n", + " ...\n", + " with match(prng_sample=prng_sample):\n", + " # play something coniditionally on `prng_sample`\n", + " ...\n", + "\n", + "# reseed the PRNG with a different value, e.g. 0xBEEF, and an upper value of 15\n", + "with prng_setup(seed=0xBEEF, range=16) as prng2:\n", + " with prng_loop(prng=prng2, count=...) as prng_sample2:\n", + " ...\n", + " with match(prng_sample=prng_sample2):\n", + " # play something coniditionally on `prng_sample2`\n", + " ...\n", + "```\n", + "\n", + "Naturally, the count of iterations in both instances of `prng_loop` need not be identical either, nor do the pulses played in the match block.\n", + "The compiler will enforce that we cannot match `prng_sample2` inside the first `prng_setup` block and vice versa." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "987770e8", + "metadata": {}, + "outputs": [], + "source": [ + "pulses = [\n", + " PlayPulse(signal=\"drive\", pulse=const_pulse, amplitude=a)\n", + " for a in np.linspace(0.1, 1, 10)\n", + "]\n", + "\n", + "\n", + "@experiment(signals=[\"drive\", \"measure\", \"acquire\"])\n", + "def prng_reseeding_example():\n", + " with acquire_loop_rt(4):\n", + " # seed the PRNG with the value 0xCAFE, with a max. value of 9\n", + " with prng_setup(seed=0xCAFE, range=10) as prng:\n", + " with prng_loop(prng=prng, count=5) as prng_sample:\n", + " play_indexed(pulses, prng_sample)\n", + " # Readout _inside_ the first PRNG loop\n", + " with section():\n", + " reserve(\"drive\")\n", + " play(\"measure\", const_pulse)\n", + " acquire(\"acquire\", kernel=const_pulse, handle=\"h1\")\n", + " delay(\"measure\", 100e-9)\n", + "\n", + " # reseed the PRNG with a different value, e.g. 0xBEEF, and an upper value of 4\n", + " with prng_setup(seed=0xBEEF, range=5) as prng2:\n", + " with prng_loop(prng=prng2, count=10) as prng_sample2:\n", + " play_indexed(pulses[::2], prng_sample2)\n", + " # Readout _inside_ the second PRNG loop\n", + " with section():\n", + " reserve(\"drive\")\n", + " play(\"measure\", const_pulse)\n", + " acquire(\"acquire\", kernel=const_pulse, handle=\"h2\")\n", + " delay(\"measure\", 100e-9)\n", + "\n", + "\n", + "exp = prng_reseeding_example()\n", + "q0_ls = device_setup.logical_signal_groups[\"q0\"].logical_signals\n", + "exp.map_signal(\"drive\", q0_ls[\"drive_line\"])\n", + "exp.map_signal(\"measure\", q0_ls[\"measure_line\"])\n", + "exp.map_signal(\"acquire\", q0_ls[\"acquire_line\"])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "30ee71d0", + "metadata": {}, + "outputs": [], + "source": [ + "compiled = session.compile(exp)\n", + "\n", + "results = session.run(compiled)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3bc0c176", + "metadata": {}, + "outputs": [], + "source": [ + "for handle in [\"h1\", \"h2\"]:\n", + " print(f\"=== Result handle {handle} ===\")\n", + " acquired_results = results.acquired_results[handle]\n", + " print(f\"Result shape: {acquired_results.data.shape}\")\n", + " print(f\"Result axes: {acquired_results.axis_name}\")\n", + " print(\"Result coordinates:\")\n", + " for name, coords in zip(acquired_results.axis_name, acquired_results.axis):\n", + " print(f\" {name}: {coords}\")\n", + " print()\n" + ] + }, + { + "cell_type": "markdown", + "id": "d1d64ebb", + "metadata": {}, + "source": [ + "### 3.2 Multiple PRNG loops without reseeding\n", + "\n", + "Similarly, we can also opt _not_ to reseed, and directly command another `prng_loop()`. \n", + "\n", + "```python\n", + "with prng_setup(seed=0xCAFE, range=10) as prng:\n", + " with prng_loop(prng=prng, count=...) as prng_sample:\n", + " with match(prng_sample=prng_sample):\n", + " # play something conditionally on `prng_sample`\n", + " ...\n", + " \n", + " # do something that is not randomized\n", + " play(...)\n", + "\n", + " # enter another PRNG loop without reseeding\n", + " with prng_loop(prng=prng, count=...) as prng_sample2:\n", + " with match(prng_sample=prng_sample2):\n", + " # play something conditionally on `prng_sample2`\n", + " ...\n", + "```\n", + "\n", + "\n", + "\n", + "#### ⚠️ Note \n", + "When using multiple PRNG loops without reseeding, the values provided by `PRNGSample.values` are not correct. Similarly, the values stored in the the results object (`AcquiredResult.axis`) are also not accurate. Indeed, these values are computed under the assumption that the PRNG _was_ in fact freshly seeded before entering the loop.\n", + "\n", + "\n", + "If you still need to elaborate the values the PRNG will emit, use `laboneq.core.utilities.prng` to simulate the PRNG at a lower level." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c5f7193f09eab315", + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "from laboneq.core.utilities.prng import PRNG as PRNGSim\n", + "\n", + "# `upper` is the maximum value produced, i.e. it corresponds to `range - 1` in the DSL\n", + "prng_sim = PRNGSim(seed=0xCAFE, upper=17)\n", + "\n", + "# the first 10 values\n", + "print([next(prng_sim) for _ in range(10)])" + ] + }, + { + "cell_type": "markdown", + "id": "90529c93", + "metadata": {}, + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.1" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/examples/00_reference/15_Advanced_sweeping_examples.ipynb b/examples/00_reference/15_Advanced_sweeping_examples.ipynb new file mode 100644 index 0000000..99b9d47 --- /dev/null +++ b/examples/00_reference/15_Advanced_sweeping_examples.ipynb @@ -0,0 +1,483 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "6465da362e29365b", + "metadata": { + "collapsed": false + }, + "source": [ + "# Changing the experiment structure with a sweep - match-case statements with sweep parameters\n", + "\n", + "Typical sweep parameters in standard experiments change some aspect of the experimental pulse sequence, without changing its underlying structure. \n", + "However, in experiments like randomized benchmarking or dynamical decoupling, each instance of a sweep parameter will affect the structure of the pulse sequence itself. \n", + "This notebook will show how to achieve this behavior in LabOne Q, by constructing a match-case statement conditioned on a sweep parameter.\n", + "\n", + "Advanced examples are also available, for example the [randomized benchmarking demonstration](https://github.com/zhinst/laboneq/blob/main/examples/02_advanced_qubit_experiments/01_randomized_benchmarking.ipynb) notebook uses this behavior." + ] + }, + { + "cell_type": "markdown", + "id": "5d0dde9c", + "metadata": {}, + "source": [ + "## 0. Imports and setup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9b34660b7adcef48", + "metadata": { + "ExecuteTime": { + "end_time": "2024-01-15T16:41:18.267029100Z", + "start_time": "2024-01-15T16:41:16.310545800Z" + }, + "collapsed": false + }, + "outputs": [], + "source": [ + "from __future__ import annotations\n", + "\n", + "from laboneq.contrib.example_helpers.plotting.plot_helpers import plot_simulation\n", + "from laboneq.dsl.experiment.builtins import *\n", + "from laboneq.simple import *" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f53b090c", + "metadata": {}, + "outputs": [], + "source": [ + "# construct a simple device setup\n", + "device_setup = DeviceSetup(uid=\"my_QCCS\")\n", + "\n", + "device_setup.add_dataserver(host=\"localhost\", port=\"8004\")\n", + "\n", + "device_setup.add_instruments(\n", + " SHFQC(uid=\"device_shfqc\", address=\"dev12345\", device_options=\"SHFQC/QC6CH\")\n", + ")\n", + "\n", + "device_setup.add_connections(\n", + " \"device_shfqc\",\n", + " create_connection(to_signal=\"q0/drive_line\", ports=\"SGCHANNELS/0/OUTPUT\"),\n", + " create_connection(to_signal=\"q0/measure_line\", ports=\"QACHANNELS/0/OUTPUT\"),\n", + " create_connection(to_signal=\"q0/acquire_line\", ports=\"QACHANNELS/0/INPUT\"),\n", + ")\n", + "\n", + "# set a minimal calibration to the device setup\n", + "drive_lo = Oscillator(frequency=1e9)\n", + "measure_lo = Oscillator(frequency=4e9)\n", + "cal = Calibration()\n", + "cal[\"/logical_signal_groups/q0/drive_line\"] = SignalCalibration(\n", + " local_oscillator=drive_lo\n", + ")\n", + "cal[\"/logical_signal_groups/q0/measure_line\"] = SignalCalibration(\n", + " local_oscillator=measure_lo\n", + ")\n", + "cal[\"/logical_signal_groups/q0/acquire_line\"] = SignalCalibration(\n", + " local_oscillator=measure_lo\n", + ")\n", + "device_setup.set_calibration(cal)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6f16ba49", + "metadata": {}, + "outputs": [], + "source": [ + "# connect to the session\n", + "emulate = True\n", + "\n", + "session = Session(device_setup)\n", + "session.connect(do_emulation=emulate)" + ] + }, + { + "cell_type": "markdown", + "id": "58524df45b40d762", + "metadata": { + "collapsed": false + }, + "source": [ + "## 1. Choose different sections or pulses based on sweep parameter\n", + "\n", + "In short, a sweep parameters in LabOne Q can appear as the target of a match block:\n", + "\n", + "```python\n", + "with sweep(...) as p:\n", + " ...\n", + " with match(sweep_parameter=p):\n", + " with case(0):\n", + " ...\n", + " with case(1):\n", + " ...\n", + " ...\n", + "```\n", + "The individual match arms are selected based on the current value of the sweep parameter when stepping through the sweep.\n", + "\n", + "The following examples sweep the parameter in real-time. This makes for easy visualization using the output simulator. However, the sweep may equally happen in near-time.\n" + ] + }, + { + "cell_type": "markdown", + "id": "474d7507", + "metadata": {}, + "source": [ + "### 1.1 Simple example" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "bd6afdab963dff11", + "metadata": { + "ExecuteTime": { + "end_time": "2024-01-15T16:41:18.726852300Z", + "start_time": "2024-01-15T16:41:18.366296600Z" + }, + "collapsed": false + }, + "outputs": [], + "source": [ + "# define a set of pulses\n", + "pulse_const = pulse_library.const(length=100e-9)\n", + "pulse_saw = pulse_library.sawtooth(length=100e-9)\n", + "\n", + "\n", + "# define an experiment\n", + "@experiment(signals=[\"drive\"])\n", + "def match_sweep_simple():\n", + " map_signal(\n", + " \"drive\", device_setup.logical_signal_groups[\"q0\"].logical_signals[\"drive_line\"]\n", + " )\n", + " with acquire_loop_rt(1):\n", + " with sweep_range(start=0, stop=1, count=2) as pulse_type_sweep:\n", + " with section():\n", + " delay(\"drive\", 100e-9)\n", + " # play either constant or sawtooth pulse depending on the value of pulse_type_sweep\n", + " with match(sweep_parameter=pulse_type_sweep):\n", + " with case(0):\n", + " play(\"drive\", pulse_const)\n", + " with case(1):\n", + " play(\"drive\", pulse_saw)\n", + " with section():\n", + " delay(\"drive\", 100e-9)\n", + "\n", + "\n", + "# compile experiment and plot the simulated output\n", + "compiled_match_sweep = session.compile(match_sweep_simple())\n", + "\n", + "plot_simulation(\n", + " compiled_match_sweep,\n", + " start_time=0,\n", + " length=6e-7,\n", + " signals=[\"drive\"],\n", + " plot_height=4,\n", + " plot_width=12,\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "a314289187bda203", + "metadata": { + "collapsed": false + }, + "source": [ + "### 1.2 Advanced example - Nesting of `match` blocks for different sweep parameters" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "12fefa7569767b2c", + "metadata": { + "ExecuteTime": { + "end_time": "2024-01-15T16:41:18.730082600Z", + "start_time": "2024-01-15T16:41:18.686897Z" + }, + "collapsed": false + }, + "outputs": [], + "source": [ + "# define a set of pulses\n", + "pulse_const = pulse_library.const(length=100e-9)\n", + "pulse_saw = pulse_library.sawtooth(length=100e-9)\n", + "pulse_gauss = pulse_library.gaussian(length=100e-9)\n", + "pulse_triangle = pulse_library.triangle(length=100e-9)\n", + "\n", + "\n", + "# define an experiment\n", + "@experiment(signals=[\"drive\"])\n", + "def match_sweep_nested():\n", + " map_signal(\n", + " \"drive\", device_setup.logical_signal_groups[\"q0\"].logical_signals[\"drive_line\"]\n", + " )\n", + " with acquire_loop_rt(1):\n", + " with sweep_range(0, 1, 2) as pulse_type_sweep_1:\n", + " with sweep_range(0, 1, 2) as pulse_type__sweep_2:\n", + " with section():\n", + " delay(\"drive\", 100e-9)\n", + " with match(sweep_parameter=pulse_type_sweep_1):\n", + " with case(0):\n", + " with match(sweep_parameter=pulse_type__sweep_2):\n", + " with case(0):\n", + " play(\"drive\", pulse_const)\n", + " with case(1):\n", + " play(\"drive\", pulse_saw)\n", + " with case(1):\n", + " with match(sweep_parameter=pulse_type__sweep_2):\n", + " with case(0):\n", + " play(\"drive\", pulse_gauss)\n", + " with case(1):\n", + " play(\"drive\", pulse_triangle)\n", + " with section():\n", + " delay(\"drive\", 100e-9)\n", + "\n", + "\n", + "# compile experiment and plot the simulated output\n", + "compiled_match_sweep_nested = session.compile(match_sweep_nested())\n", + "\n", + "plot_simulation(\n", + " compiled_match_sweep_nested,\n", + " start_time=0,\n", + " length=1.25e-6,\n", + " signals=[\"drive\"],\n", + " plot_height=4,\n", + " plot_width=12,\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "4e1bddaafe46f27c", + "metadata": { + "collapsed": false + }, + "source": [ + "## 2. Sweeping pulse count for e.g. dynamical decoupling or RB\n", + "\n", + "While LabOne Q does not yet have 1st class support for sweeping pulse count, matchable sweep parameters allow us to get there with only minor workarounds. We can create a dedicated `case` section for every pulse count, such that `case(N)` contains `N` pulses.\n", + "\n", + "For example, the following plays 1, then 2, and finally 3 pulses." + ] + }, + { + "cell_type": "markdown", + "id": "181e75ad", + "metadata": {}, + "source": [ + "### 2.1 Simple example" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b2f17eb6c1bfd242", + "metadata": { + "ExecuteTime": { + "end_time": "2024-01-15T16:41:18.858824100Z", + "start_time": "2024-01-15T16:41:18.692624Z" + }, + "collapsed": false + }, + "outputs": [], + "source": [ + "# define a pulse\n", + "pulse = pulse_library.const(length=30e-9)\n", + "\n", + "\n", + "# define an experiment\n", + "@experiment(signals=[\"drive\"])\n", + "def match_pulse_count_simple():\n", + " map_signal(\n", + " \"drive\", device_setup.logical_signal_groups[\"q0\"].logical_signals[\"drive_line\"]\n", + " )\n", + " with acquire_loop_rt(1):\n", + " with sweep_range(start=0, stop=2, count=3) as pulse_number_sweep:\n", + " with section():\n", + " delay(\"drive\", 100e-9)\n", + " # vary the number of pulse played based on the value of pulse_number_sweep\n", + " with match(sweep_parameter=pulse_number_sweep):\n", + " with case(0):\n", + " play(\"drive\", pulse)\n", + " with case(1):\n", + " play(\"drive\", pulse)\n", + " delay(\"drive\", 30e-9)\n", + " play(\"drive\", pulse)\n", + " with case(2):\n", + " play(\"drive\", pulse)\n", + " delay(\"drive\", 30e-9)\n", + " play(\"drive\", pulse)\n", + " delay(\"drive\", 30e-9)\n", + " play(\"drive\", pulse)\n", + "\n", + " with section():\n", + " delay(\"drive\", 100e-9)\n", + "\n", + "\n", + "# compile experiment and plot the simulated output\n", + "compiled_match_pulse_count = session.compile(match_pulse_count_simple())\n", + "\n", + "plot_simulation(\n", + " compiled_match_pulse_count,\n", + " start_time=0,\n", + " length=8e-7,\n", + " signals=[\"drive\"],\n", + " plot_height=4,\n", + " plot_width=12,\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "2724e3fc", + "metadata": {}, + "source": [ + "### 2.2 Advanced example - using a helper function to implicitly construct the match-case statement" + ] + }, + { + "cell_type": "markdown", + "id": "d5c648d261e30916", + "metadata": { + "collapsed": false + }, + "source": [ + "This is a helper function that allows us to conveniently express a number of pulses that is _parametrized_." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9357e18df1ecf61d", + "metadata": { + "ExecuteTime": { + "end_time": "2024-01-15T16:41:18.876729900Z", + "start_time": "2024-01-15T16:41:18.743876200Z" + }, + "collapsed": false + }, + "outputs": [], + "source": [ + "def repeat(count: int | SweepParameter | LinearSweepParameter):\n", + " def decorator(f):\n", + " if isinstance(count, (LinearSweepParameter, SweepParameter)):\n", + " with match(sweep_parameter=count):\n", + " for v in count.values:\n", + " with case(v):\n", + " for _ in range(int(v)):\n", + " f()\n", + " else:\n", + " for _ in range(count):\n", + " f()\n", + "\n", + " return decorator" + ] + }, + { + "cell_type": "markdown", + "id": "3820e235fa7c470e", + "metadata": { + "collapsed": false + }, + "source": [ + "Now a similar experiment is easily expressed. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "13865f52e03d2163", + "metadata": { + "ExecuteTime": { + "end_time": "2024-01-15T16:41:19.974208200Z", + "start_time": "2024-01-15T16:41:18.801933100Z" + }, + "collapsed": false + }, + "outputs": [], + "source": [ + "# define 90 and 190 degree rotations\n", + "pulse_pi = pulse_library.gaussian(length=30e-9)\n", + "pulse_pi_half = pulse_library.gaussian(length=30e-9, amplitude=0.5)\n", + "\n", + "\n", + "# define a dynamical decoupling experiment\n", + "@experiment(signals=[\"drive\"])\n", + "def dynamical_decoupling():\n", + " map_signal(\n", + " \"drive\", device_setup.logical_signal_groups[\"q0\"].logical_signals[\"drive_line\"]\n", + " )\n", + " with acquire_loop_rt(1):\n", + " with sweep_range(start=2, stop=50, count=10) as pulse_count:\n", + " with section(length=2.5e-6):\n", + " with section():\n", + " play(\"drive\", pulse_pi_half)\n", + " delay(\"drive\", 15e-9)\n", + "\n", + " @repeat(pulse_count)\n", + " def _():\n", + " with section():\n", + " play(\"drive\", pulse_pi)\n", + " delay(\"drive\", 15e-9)\n", + "\n", + " with section():\n", + " play(\"drive\", pulse_pi_half)\n", + "\n", + "\n", + "# compile experiment and plot the simulated output\n", + "compiled_dynamical_decouplimg = session.compile(dynamical_decoupling())\n", + "\n", + "plot_simulation(\n", + " compiled_dynamical_decouplimg,\n", + " start_time=0,\n", + " length=0.6e-5,\n", + " signals=[\"drive\"],\n", + " plot_height=5,\n", + " plot_width=15,\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e3b001bbe8a1c6cf", + "metadata": { + "ExecuteTime": { + "end_time": "2024-01-15T16:41:19.974208200Z", + "start_time": "2024-01-15T16:41:19.950091Z" + }, + "collapsed": false + }, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.1" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/examples/02_advanced_qubit_experiments/01_randomized_benchmarking.ipynb b/examples/02_advanced_qubit_experiments/01_randomized_benchmarking.ipynb index 6a96b3b..c0ab695 100644 --- a/examples/02_advanced_qubit_experiments/01_randomized_benchmarking.ipynb +++ b/examples/02_advanced_qubit_experiments/01_randomized_benchmarking.ipynb @@ -38,31 +38,23 @@ "metadata": {}, "outputs": [], "source": [ - "# LabOne Q:\n", - "from laboneq.simple import *\n", + "from __future__ import annotations\n", "\n", - "# additional imports needed for Clifford gate calculation\n", - "import numpy as np\n", "import matplotlib.pyplot as plt\n", + "import numpy as np\n", + "\n", + "from laboneq.contrib.example_helpers.plotting.plot_helpers import plot_simulation\n", "\n", "# Helpers:\n", + "# additional imports needed for Clifford gate calculation\n", "from laboneq.contrib.example_helpers.randomized_benchmarking_helper import (\n", - " make_pauli_gate_map,\n", " clifford_parametrized,\n", " generate_play_rb_pulses,\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "e221aa1c", - "metadata": {}, - "outputs": [], - "source": [ - "## hardcoded properties:\n", - "GATE_LENGTH = 64e-9 # single Cliffordgate length\n", - "SIGMA = 1 / 3 # shape - gaussian with width = 1/3 length" + " make_pauli_gate_map,\n", + ")\n", + "\n", + "# LabOne Q:\n", + "from laboneq.simple import *" ] }, { @@ -84,55 +76,30 @@ "id": "c463c14f", "metadata": {}, "source": [ - "### 1.1 Setup Descriptor" + "### 1.1 Create Device Setup" ] }, { "cell_type": "code", "execution_count": null, - "id": "4cf8171b", + "id": "16561398", "metadata": {}, "outputs": [], "source": [ - "class MyRack:\n", - " shfsg_address = \"DEV12050\"\n", - " shfqa_address = \"DEV12036\"\n", - " pqsc_address = \"DEV10056\"\n", - "\n", - " server_host = \"10.42.11.0\"\n", - " server_port = \"8004\"\n", - " setup_name = \"my_setup\"\n", - "\n", - "\n", - "rack = MyRack\n", - "\n", - "my_descriptor = f\"\"\"\\\n", - "instruments:\n", - " SHFQA:\n", - " - address: {rack.shfqa_address}\n", - " uid: device_shfqa\n", - " SHFSG:\n", - " - address: {rack.shfsg_address}\n", - " uid: device_shfsg\n", - " PQSC:\n", - " - address: {rack.pqsc_address}\n", - " uid: device_pqsc\n", - "connections:\n", - " device_shfqa:\n", - " - iq_signal: q0/measure_line\n", - " ports: [QACHANNELS/0/OUTPUT]\n", - " - acquire_signal: q0/acquire_line\n", - " ports: [QACHANNELS/0/INPUT]\n", - " device_shfsg:\n", - " - iq_signal: q0/drive_line\n", - " ports: SGCHANNELS/0/OUTPUT\n", - " device_pqsc:\n", - " - to: device_shfqa\n", - " port: ZSYNCS/7\n", - " - to: device_shfsg\n", - " port: ZSYNCS/6\n", - " - internal_clock_signal\n", - "\"\"\"" + "device_setup = DeviceSetup(uid=\"my_QCCS\")\n", + "\n", + "device_setup.add_dataserver(host=\"localhost\", port=\"8004\")\n", + "\n", + "device_setup.add_instruments(\n", + " SHFQC(uid=\"device_shfqc\", address=\"dev12345\", device_options=\"SHFQC/QC6CH\")\n", + ")\n", + "\n", + "device_setup.add_connections(\n", + " \"device_shfqc\",\n", + " create_connection(to_signal=\"q0/drive_line\", ports=\"SGCHANNELS/0/OUTPUT\"),\n", + " create_connection(to_signal=\"q0/measure_line\", ports=\"QACHANNELS/0/OUTPUT\"),\n", + " create_connection(to_signal=\"q0/acquire_line\", ports=\"QACHANNELS/0/INPUT\"),\n", + ")" ] }, { @@ -155,16 +122,16 @@ "qubit_parameters = {\n", " \"freq\": 100e6, # qubit 0 drive frequency in [Hz] - relative to local oscillator for qubit drive upconversion\n", " \"ro_freq\": 50e6,\n", - " \"ro_delay\": 0, # 15e-9,#100e-9,\n", - " \"ro_int_delay\": 0e-9, # 40-9,\n", + " \"ro_delay\": 0,\n", + " \"ro_int_delay\": 0e-9,\n", " \"qb_len_spec\": 1e-6,\n", " \"qb_amp_spec\": 1.0,\n", " \"pi_amp\": 1,\n", " \"qb_len\": 200e-9,\n", " \"freq_ef\": -500e6,\n", - " \"ro_len\": 2.0e-6,\n", - " \"ro_amp\": 1,\n", - " \"relax\": 1e-6,\n", + " \"ro_len\": 200e-9,\n", + " \"ro_amp\": 0.8,\n", + " \"relax\": 50e-9,\n", "}\n", "\n", "# up / downconversion settings - to convert between IF and RF frequencies\n", @@ -191,8 +158,6 @@ "outputs": [], "source": [ "# function that defines a setup calibration containing the qubit / readout parameters\n", - "\n", - "\n", "def define_calibration(qubit_parameters, lo_settings):\n", " qubit0_ro_lo = Oscillator(\n", " uid=\"ro_lo_\" + \"q0\" + \"_osc\",\n", @@ -242,7 +207,7 @@ " local_oscillator=qubit0_ro_lo,\n", " range=-10,\n", " # add a threshold for the state discrimination -- this requires optimized readout integrator weights\n", - " threshold=0.5,\n", + " threshold=0.0,\n", " )\n", "\n", " return my_calibration" @@ -254,7 +219,7 @@ "id": "001f2e70", "metadata": {}, "source": [ - "### 1.4 Create Device Setup and Apply Baseline Calibration" + "### 1.4 Apply Baseline Calibration" ] }, { @@ -264,22 +229,14 @@ "metadata": {}, "outputs": [], "source": [ - "# define the DeviceSetup from descriptor - additionally include information on the dataserver used to connect to the instruments\n", - "my_setup = DeviceSetup.from_descriptor(\n", - " my_descriptor,\n", - " server_host=rack.server_host,\n", - " server_port=rack.server_port,\n", - " setup_name=rack.setup_name,\n", - ")\n", - "\n", "# define Calibration object based on qubit control and readout parameters\n", "my_calibration = define_calibration(qubit_parameters, lo_settings)\n", "# apply calibration to device setup\n", - "my_setup.set_calibration(my_calibration)\n", + "device_setup.set_calibration(my_calibration)\n", "\n", "\n", "## define shortcut to logical signals for convenience\n", - "lsg_q0 = my_setup.logical_signal_groups[\"q0\"].logical_signals\n", + "lsg_q0 = device_setup.logical_signal_groups[\"q0\"].logical_signals\n", "drive_Oscillator_q0 = lsg_q0[\"drive_line\"].oscillator\n", "readout_Oscillator_q0 = lsg_q0[\"measure_line\"].oscillator\n", "acquire_Oscillator_q0 = lsg_q0[\"acquire_line\"].oscillator\n", @@ -310,7 +267,7 @@ "source": [ "emulate = True # perform experiments in emulation mode only?\n", "\n", - "my_session = Session(device_setup=my_setup)\n", + "my_session = Session(device_setup=device_setup)\n", "my_session.connect(do_emulation=emulate)" ] }, @@ -343,9 +300,6 @@ "metadata": {}, "outputs": [], "source": [ - "# how many averages per point: 2^n_average\n", - "n_average = 12\n", - "\n", "# qubit readout pulse\n", "readout_pulse = pulse_library.const(\n", " uid=\"readout_pulse\",\n", @@ -353,7 +307,7 @@ " amplitude=qubit_parameters[\"ro_amp\"],\n", ")\n", "# integration weights for qubit measurement\n", - "readout_weighting_function = pulse_library.const(\n", + "integration_kernel = pulse_library.const(\n", " uid=\"readout_weighting_function\", length=qubit_parameters[\"ro_len\"], amplitude=1.0\n", ")" ] @@ -366,7 +320,7 @@ "source": [ "#### 2.1.1 Adjust Pulse Parameters for Clifford Gates\n", "\n", - "Calculate the basic gate set and the pulse objects corresponding to them" + "Define and prepare the basic gate set and the pulse objects corresponding to them" ] }, { @@ -376,14 +330,15 @@ "metadata": {}, "outputs": [], "source": [ - "pulse_ref = pulse_library.gaussian\n", - "pulse_parameters = {\"sigma\": SIGMA}\n", + "pulse_reference = pulse_library.gaussian\n", + "pulse_parameters = {\"sigma\": 1 / 3}\n", + "pulse_length = 64e-9\n", "\n", "gate_map = make_pauli_gate_map(\n", " pi_pulse_amp=0.8,\n", - " pi_half_pulse_amp=0.5,\n", - " excitation_length=GATE_LENGTH,\n", - " pulse_factory=pulse_ref,\n", + " pi_half_pulse_amp=0.42,\n", + " excitation_length=pulse_length,\n", + " pulse_factory=pulse_reference,\n", " pulse_kwargs=pulse_parameters,\n", ")" ] @@ -395,26 +350,7 @@ "metadata": {}, "source": [ "### 2.2 Define and run the RB Experiment \n", - "The RB experiment will consist of random sequences of different lengths, where each sequence length has a number of instances." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "21b5d687", - "metadata": {}, - "outputs": [], - "source": [ - "# Different sequence lengths will range from 2^1 to 2^max_seq_length\n", - "max_seq_length = 3\n", - "seq_lengths = [2**it for it in range(1, max_seq_length + 1)]\n", - "\n", - "# number of different random sequences per length\n", - "n_seq_per_length = 10\n", - "\n", - "# the maximum sequence duration is determined by its length,\n", - "# the max number of basic gates in each Clifford gate, and the length of each gate\n", - "max_seq_duration = 2**max_seq_length * 3 * GATE_LENGTH" + "The RB experiment will consist of random sequences of different lengths, where each sequence length contains a number of random instances." ] }, { @@ -424,7 +360,7 @@ "metadata": {}, "source": [ "### Create Randomized Benchmarking Experiment\n", - "In real time (within `acquire_loop_rt`), the sequence lengths are swept, and for each sequence length, `n_seq_per_length` random sequences are created.\n", + "In real time (within `acquire_loop_rt`), the sequence lengths are swept, and for each sequence length, `n_sequences_per_length` random sequences are created.\n", "\n", "Each random sequence consists of three sections:\n", "- A right-aligned drive section, which is populated by the helper function `generate_play_rb_pulses`\n", @@ -439,65 +375,162 @@ { "cell_type": "code", "execution_count": null, - "id": "e46ebe4e", + "id": "08969d98", "metadata": {}, "outputs": [], "source": [ - "exp_rb = Experiment(\n", - " uid=\"RandomizedBenchmark\",\n", - " signals=[\n", - " ExperimentSignal(\"drive\"),\n", - " ExperimentSignal(\"measure\"),\n", - " ExperimentSignal(\"acquire\"),\n", - " ],\n", - ")\n", - "\n", - "# outer loop - real-time, cyclic averaging in standard integration mode\n", - "with exp_rb.acquire_loop_rt(\n", - " uid=\"rb_shots\",\n", - " count=pow(2, n_average),\n", - " averaging_mode=AveragingMode.CYCLIC,\n", - " acquisition_type=AcquisitionType.DISCRIMINATION,\n", + "# define a convenience function to generate the RB sequences\n", + "def sweep_rb_pulses(\n", + " sequence_length: SweepParameter | LinearSweepParameter,\n", + " exp: Experiment,\n", + " signal: str,\n", + " cliffords,\n", + " gate_map,\n", + " rng,\n", "):\n", - " # inner loop - sweep over sequence lengths\n", - " for seq_length in seq_lengths:\n", - " # innermost loop - different random sequences for each length\n", - " for _ in range(n_seq_per_length):\n", - " with exp_rb.section(\n", - " length=max_seq_duration, alignment=SectionAlignment.RIGHT\n", - " ):\n", + " with exp.match(sweep_parameter=sequence_length):\n", + " for v in sequence_length.values:\n", + " with exp.case(v):\n", " generate_play_rb_pulses(\n", - " exp_rb, \"drive\", seq_length, clifford_parametrized, gate_map\n", - " )\n", - " # readout and data acquisition\n", - " with exp_rb.section():\n", - " exp_rb.reserve(\"drive\")\n", - " exp_rb.play(signal=\"measure\", pulse=readout_pulse)\n", - " # trigger signal data acquisition\n", - " exp_rb.acquire(\n", - " signal=\"acquire\",\n", - " handle=f\"acq_{seq_length}\", # use an individual handle for every sequence length\n", - " kernel=readout_weighting_function,\n", - " )\n", - " # relax time after readout - for qubit relaxation to groundstate and signal processing\n", - " with exp_rb.section(length=1e-6):\n", - " exp_rb.reserve(signal=\"measure\")\n", - " exp_rb.reserve(signal=\"drive\")" + " exp=exp,\n", + " signal=signal,\n", + " seq_length=v,\n", + " cliffords=cliffords,\n", + " gate_map=gate_map,\n", + " rng=rng,\n", + " )" ] }, { "cell_type": "code", "execution_count": null, - "id": "94cd6836", + "id": "4423ea79", "metadata": {}, "outputs": [], "source": [ - "# set signal map for the experiment\n", - "exp_rb.set_signal_map(map_q0)\n", + "# define the RB experiment\n", + "def define_rb_experiment(\n", + " num_average=2**8,\n", + " min_sequence_exponent=1,\n", + " max_sequence_exponent=8,\n", + " chunk_count=1,\n", + " n_sequences_per_length=2,\n", + " signal_map=map_q0,\n", + " pulse_length=pulse_length,\n", + " readout_pulse=readout_pulse,\n", + " integration_kernel=integration_kernel,\n", + " reset_delay=qubit_parameters[\"relax\"],\n", + " prng=None,\n", + "):\n", + " # construct the sweep over sequence length as powers of 2 of the sequence exponent\n", + " sequence_length_sweep = SweepParameter(\n", + " values=np.array(\n", + " [2**it for it in range(min_sequence_exponent, max_sequence_exponent + 1)]\n", + " )\n", + " )\n", + "\n", + " # we are using fixed timing, where the maximum duration is determined by the maximum sequence length\n", + " max_seq_duration = 2**max_sequence_exponent * 3 * pulse_length\n", + "\n", + " prng = np.random.default_rng(seed=42) if prng is None else prng\n", + "\n", + " exp_rb = Experiment(\n", + " uid=\"RandomizedBenchmark\",\n", + " signals=[\n", + " ExperimentSignal(\"drive\"),\n", + " ExperimentSignal(\"measure\"),\n", + " ExperimentSignal(\"acquire\"),\n", + " ],\n", + " )\n", + "\n", + " # outer loop - real-time, cyclic averaging in discrimination mode\n", + " with exp_rb.acquire_loop_rt(\n", + " uid=\"rb_shots\",\n", + " count=num_average,\n", + " averaging_mode=AveragingMode.CYCLIC,\n", + " acquisition_type=AcquisitionType.DISCRIMINATION,\n", + " ):\n", + " # inner loop - sweep over sequence lengths\n", + " with exp_rb.sweep(\n", + " parameter=sequence_length_sweep,\n", + " chunk_count=chunk_count,\n", + " ) as sequence_length:\n", + " # innermost loop - different random sequences for each length\n", + " ## KNOWN ISSUE: using a sweep instead of the for loop here will lead to unchanged sequences\n", + " for num in range(n_sequences_per_length):\n", + " # with exp_rb.sweep(parameter=iteration_sweep):\n", + " with exp_rb.section(\n", + " uid=f\"drive_{num}\",\n", + " length=max_seq_duration,\n", + " alignment=SectionAlignment.RIGHT,\n", + " ):\n", + " sweep_rb_pulses(\n", + " sequence_length,\n", + " exp_rb,\n", + " \"drive\",\n", + " clifford_parametrized,\n", + " gate_map,\n", + " prng,\n", + " )\n", + " # readout and data acquisition\n", + " with exp_rb.section(uid=f\"measure_{num}\", play_after=f\"drive_{num}\"):\n", + " exp_rb.measure(\n", + " measure_pulse=readout_pulse,\n", + " measure_signal=\"measure\",\n", + " acquire_signal=\"acquire\",\n", + " handle=\"rb_results\",\n", + " integration_kernel=integration_kernel,\n", + " reset_delay=reset_delay,\n", + " )\n", + "\n", + " exp_rb.set_signal_map(signal_map)\n", + "\n", + " return exp_rb" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e46ebe4e", + "metadata": {}, + "outputs": [], + "source": [ + "# Initialise PRNG\n", + "my_prng = np.random.default_rng(42)\n", + "\n", + "exp_rb = define_rb_experiment(max_sequence_exponent=3, chunk_count=1)\n", "\n", "# compile the experiment\n", - "compiler_settings = {\"SHFSG_MIN_PLAYWAVE_HINT\": 256}\n", - "compiled_exp_rb = my_session.compile(exp_rb, compiler_settings=compiler_settings)" + "compiled_exp_rb = my_session.compile(exp_rb)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "0272f193", + "metadata": {}, + "outputs": [], + "source": [ + "# KNOWN ISSUE - pulse sheet viewer not working for this experiment\n", + "# show_pulse_sheet('rb_experiment', compiled_exp_rb)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6fa75fd6", + "metadata": {}, + "outputs": [], + "source": [ + "## KNOWN ISSUE - output simulation is not yet supported with piplined experiments, if chunk_count>1\n", + "plot_simulation(\n", + " compiled_exp_rb,\n", + " start_time=0,\n", + " length=10e-6,\n", + " plot_width=15,\n", + " plot_height=4,\n", + " signals=[\"drive\", \"measure\"],\n", + ")" ] }, { @@ -523,29 +556,37 @@ { "cell_type": "code", "execution_count": null, - "id": "ce89b1df", + "id": "7a3de381", "metadata": {}, "outputs": [], "source": [ - "my_results.get_data(\"acq_2\")\n", - "avg_meas = []\n", - "for seq_length in seq_lengths:\n", - " avg_meas.append(np.mean(my_results.get_data(f\"acq_{seq_length}\")))" + "rb_axis = my_results.get_axis(\"rb_results\")\n", + "\n", + "rb_results = my_results.get_data(\"rb_results\")\n", + "rb_results" ] }, { "cell_type": "code", "execution_count": null, - "id": "9ccaaa24", + "id": "47f99d6f", "metadata": {}, "outputs": [], "source": [ - "plt.figure()\n", - "plt.plot(seq_lengths, 1 - np.real(avg_meas))\n", + "# plt.figure()\n", + "plt.plot(rb_axis[0], np.mean(rb_results, axis=1))\n", "plt.xlabel(\"Sequence Length\")\n", "plt.ylabel(\"Average Fidelity\")\n", "plt.show()" ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "65a42441", + "metadata": {}, + "outputs": [], + "source": [] } ], "metadata": { @@ -553,7 +594,7 @@ "kernelspec": { "display_name": "develop", "language": "python", - "name": "develop" + "name": "python3" }, "language_info": { "codemirror_mode": { @@ -565,7 +606,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.0" + "version": "3.12.1" }, "toc": { "base_numbering": 1, @@ -613,11 +654,6 @@ "_Feature" ], "window_display": false - }, - "vscode": { - "interpreter": { - "hash": "1b9cd87bacaca95307316567b90635eca490931021bc2a81dadc6410ac7e1912" - } } }, "nbformat": 4, diff --git a/examples/06_qasm/02_RandomizedBenchmarking_from_Qiskit.ipynb b/examples/06_qasm/02_RandomizedBenchmarking_from_Qiskit.ipynb new file mode 100644 index 0000000..3e836f1 --- /dev/null +++ b/examples/06_qasm/02_RandomizedBenchmarking_from_Qiskit.ipynb @@ -0,0 +1,873 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# One- and Two-Qubit Randomized Benchmarking in LabOne Q with Qiskit" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In this notebook, we'll use the [Qiskit Experiment Library](https://qiskit.org/ecosystem/experiments/apidocs/library.html) to generate one and two qubit randomized benchmarking experiments. \n", + "We'll then export the generated experiment to [OpenQASM](https://openqasm.com/), import these OpenQASM experiments into LabOne Q, compile, and simulate the output signals.\n", + "\n", + "When generating randomized benchmarking experiments in Qiskit, it will return a list of quantum circuits with the specified parameters. \n", + "We show here how to efficiently import, compile and execute such a list into LabOne Q, resulting in a single, large experiment." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 0. Python Imports" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# LabOne Q:\n", + "# additional imports\n", + "from math import pi\n", + "\n", + "# qiskit\n", + "from qiskit import qasm3, transpile\n", + "from qiskit_experiments.library import randomized_benchmarking\n", + "\n", + "# device setup and descriptor\n", + "from laboneq._utils import id_generator\n", + "from laboneq.contrib.example_helpers.generate_example_datastore import (\n", + " generate_example_datastore,\n", + ")\n", + "\n", + "# plotting functionality\n", + "from laboneq.contrib.example_helpers.plotting.plot_helpers import plot_simulation\n", + "\n", + "# core LabOne Q functionality\n", + "from laboneq.simple import *" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 1. LabOne Q startup" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 1.1 Import pre-calibrated setup - Qubits and setup configuration & set up LabOne Q session" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Build an in-memory data store with device setup and qubit parameters for the\n", + "# example notebooks\n", + "setup_db = generate_example_datastore(path=\"\", filename=\":memory:\")\n", + "\n", + "# load a calibrated device setup from the dummy database\n", + "all_data = setup_db.find(\n", + " metadata={\"name\": \"24_tuneable_qubit_setup_shfqc_hdawg_pqsc_calibrated\"}\n", + ")\n", + "device_setup = setup_db.get(next(all_data))\n", + "\n", + "all_transmons = setup_db.find(\n", + " condition=lambda metadata: \"tuneable_transmon_\" in metadata[\"name\"]\n", + ")\n", + "[q0, q1] = [setup_db.get(next(all_transmons)) for _ in range(2)]\n", + "\n", + "# use emulation mode - no connection to instruments\n", + "use_emulation = True\n", + "\n", + "my_session = Session(device_setup=device_setup)\n", + "my_session.connect(do_emulation=use_emulation)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 2. Defining a LabOne Q Backend" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Here, we add Gate and Pulse Definitions for Transpilation Support from QASM into LabOne Q" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def drive_pulse(qubit: Qubit, label: str, amplitude_scale=1.0):\n", + " \"\"\"Return a drive pulse for the given qubit.\n", + "\n", + " Pulse parameters are taken from the qubit parameters.\n", + " \"\"\"\n", + " return pulse_library.drag(\n", + " uid=f\"{qubit.uid}_{label}\",\n", + " length=qubit.parameters.user_defined[\"pulse_length\"],\n", + " amplitude=amplitude_scale * qubit.parameters.user_defined[\"amplitude_pi\"],\n", + " )" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def rz(qubit: Qubit):\n", + " \"\"\"Return a parameterized rotation (virtual z) gate for the specified qubit.\n", + "\n", + " The gate is a function that takes the angle to rotate and\n", + " returns a LabOne Q section that performs the rotation.\n", + " \"\"\"\n", + "\n", + " def rz_gate(angle: float):\n", + " \"\"\"Rz(theta).\n", + "\n", + " Theta is in radians - implements a virtual z-gate\n", + " \"\"\"\n", + " gate = Section(uid=id_generator(f\"p_{qubit.uid}_rz_{int(180 * angle / pi)}\"))\n", + " gate.play(\n", + " signal=qubit.signals[\"drive\"],\n", + " pulse=None,\n", + " increment_oscillator_phase=angle,\n", + " )\n", + " return gate\n", + "\n", + " return rz_gate" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def measurement(qubit: Qubit):\n", + " \"\"\"Return a measurement operation of the specified qubit.\n", + "\n", + " The operation is a function that takes the measurement handle (a string)\n", + " and returns a LabOne Q section that performs the measurement.\n", + " \"\"\"\n", + "\n", + " def measurement_gate(handle: str):\n", + " \"\"\"Perform a measurement.\n", + "\n", + " Handle is the name of where to store the measurement result. E.g. \"meas[0]\".\n", + " \"\"\"\n", + " measure_pulse = pulse_library.gaussian_square(\n", + " uid=f\"{qubit.uid}_readout_pulse\",\n", + " length=qubit.parameters.user_defined[\"readout_length\"],\n", + " amplitude=qubit.parameters.user_defined[\"readout_amplitude\"],\n", + " zero_boundaries=True,\n", + " )\n", + " integration_kernel = pulse_library.const(\n", + " uid=f\"{qubit.uid}_integration_kernel\",\n", + " length=qubit.parameters.user_defined[\"readout_length\"],\n", + " )\n", + "\n", + " gate = Section(uid=id_generator(f\"meas_{qubit.uid}_{handle}\"))\n", + " gate.reserve(signal=qubit.signals[\"drive\"])\n", + " gate.play(signal=qubit.signals[\"measure\"], pulse=measure_pulse)\n", + " gate.acquire(\n", + " signal=qubit.signals[\"acquire\"],\n", + " handle=handle,\n", + " kernel=integration_kernel,\n", + " )\n", + " return gate\n", + "\n", + " return measurement_gate" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def reset(qubit: Qubit, reset_pulse):\n", + " \"\"\"Reset the specified qubit to the ground state with the supplied reset pulse.\n", + "\n", + " The reset gate function takes no arguments and returns a LabOne Q section that performs\n", + " the reset.\n", + " \"\"\"\n", + "\n", + " def reset_gate():\n", + " sig = qubit.signals\n", + " # Reset Section\n", + " reset = Section(uid=f\"{qubit.uid}_reset\")\n", + " # qubit state readout\n", + " readout = measurement(qubit)(f\"{qubit.uid}_qubit_state\")\n", + " # delay after measurement\n", + " readout.delay(\n", + " signal=sig[\"acquire\"],\n", + " time=qubit.parameters.user_defined[\"reset_delay_length\"],\n", + " )\n", + " # real-time feedback, fetching the measurement data identified by handle locally from the QA unit of the SHFQC\n", + " match_case = Match(\n", + " uid=f\"{qubit.uid}_feedback\",\n", + " handle=f\"{qubit.uid}_qubit_state\",\n", + " play_after=readout,\n", + " )\n", + " # measurement result 0 - ground state\n", + " case_0 = Case(uid=f\"{qubit.uid}_0_Case\", state=0)\n", + " case_0.play(signal=sig[\"drive\"], pulse=reset_pulse, amplitude=0.01)\n", + " # measurement result 1 - excited state\n", + " case_1 = Case(uid=f\"{qubit.uid}_1_Case\", state=1)\n", + " # play x180 pulse\n", + " case_1.play(signal=sig[\"drive\"], pulse=reset_pulse)\n", + " match_case.add(case_0)\n", + " match_case.add(case_1)\n", + "\n", + " reset.add(readout)\n", + " reset.add(match_case)\n", + " return reset\n", + "\n", + " return reset_gate" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def cx(control: Qubit, target: Qubit):\n", + " \"\"\"Return a controlled X gate for the specified control and target qubits.\n", + "\n", + " The CX gate function takes the control and target qubit and returns a LabOne Q section that performs\n", + " a controlled X gate between these two qubits using a cross-resonance scheme.\n", + " \"\"\"\n", + "\n", + " def cx_gate():\n", + " cx_id = f\"cx_{control.uid}_{target.uid}\"\n", + "\n", + " gate = Section(uid=id_generator(cx_id))\n", + "\n", + " # define X pulses for target and control\n", + " x180_pulse_control = drive_pulse(control, label=\"x180\")\n", + " x180_pulse_target = drive_pulse(target, label=\"x180\")\n", + "\n", + " # define cancellation pulses for target and control\n", + " cancellation_control_n = pulse_library.gaussian_square(uid=\"CR-\")\n", + " cancellation_control_p = pulse_library.gaussian_square(uid=\"CR+\")\n", + " cancellation_target_p = pulse_library.gaussian_square(uid=\"q1+\")\n", + " cancellation_target_n = pulse_library.gaussian_square(uid=\"q1-\")\n", + "\n", + " # play X pulses on both target and control\n", + " x180_both = Section(uid=id_generator(f\"{cx_id}_x_both\"))\n", + " x180_both.play(signal=control.signals[\"drive\"], pulse=x180_pulse_control)\n", + " x180_both.play(signal=target.signals[\"drive\"], pulse=x180_pulse_target)\n", + " gate.add(x180_both)\n", + "\n", + " # First cross-resonance component\n", + " cancellation_p = Section(\n", + " uid=id_generator(f\"{cx_id}_canc_p\"), play_after=x180_both.uid\n", + " )\n", + " cancellation_p.play(signal=target.signals[\"drive\"], pulse=cancellation_target_p)\n", + " cancellation_p.play(\n", + " signal=control.signals[\"flux\"], pulse=cancellation_control_n\n", + " )\n", + " gate.add(cancellation_p)\n", + "\n", + " # play X pulse on control\n", + " x180_control = Section(\n", + " uid=id_generator(f\"{cx_id}_x_q0\"), play_after=cancellation_p.uid\n", + " )\n", + " x180_control.play(signal=control.signals[\"drive\"], pulse=x180_pulse_control)\n", + " gate.add(x180_control)\n", + "\n", + " # Second cross-resonance component\n", + " cancellation_n = Section(\n", + " uid=id_generator(f\"cx_{cx_id}_canc_n\"), play_after=x180_control.uid\n", + " )\n", + " cancellation_n.play(signal=target.signals[\"drive\"], pulse=cancellation_target_n)\n", + " cancellation_n.play(\n", + " signal=control.signals[\"flux\"], pulse=cancellation_control_p\n", + " )\n", + " gate.add(cancellation_n)\n", + "\n", + " return gate\n", + "\n", + " return cx_gate" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 3. Randomised benchmarking circuits from Qiskit" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "You'll start by creating Standard RB experiments from the Qiskit Experiment Library [here](https://qiskit.org/ecosystem/experiments/stubs/qiskit_experiments.library.randomized_benchmarking.StandardRB.html#qiskit_experiments.library.randomized_benchmarking.StandardRB). \n", + "We do this for one and two qubits for a few different sequence lengths.\n", + "\n", + "Note that most circuits that can be generated in Qiskit and converted to OpenQASM could be adapted to be run in a similar way in LabOne Q! " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Use Qiskit Experiment Library to Generate RB\n", + "rb1_qiskit_circuits = randomized_benchmarking.StandardRB(\n", + " physical_qubits=[0],\n", + " lengths=[4, 8, 16, 1024, 2048],\n", + " num_samples=1,\n", + ").circuits()\n", + "\n", + "rb2_qiskit_circuits = randomized_benchmarking.StandardRB(\n", + " physical_qubits=[0, 1],\n", + " lengths=[4, 8, 16, 1024],\n", + " num_samples=1,\n", + ").circuits()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "When efficiently importing and executing a list of quantum circuits, there currently are strong limitations as to how the measurements are scheduled in these experiment. \n", + "We strip them here from the Qiskit circuit. We will re-add them to the LabOne Q experiment separately when doing the import. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "for circuit in rb1_qiskit_circuits:\n", + " circuit.remove_final_measurements()\n", + "\n", + "for circuit in rb2_qiskit_circuits:\n", + " circuit.remove_final_measurements()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "rb1_qiskit_circuits[2].draw()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "rb2_qiskit_circuits[2].draw()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "You can then use the Qiskit `transpile` function to obtain a representation of the circuits in your favorite set of basis gates." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Choose basis gates\n", + "rb1_transpiled_circuits = transpile(\n", + " rb1_qiskit_circuits, basis_gates=[\"id\", \"sx\", \"x\", \"rz\", \"cx\"]\n", + ")\n", + "\n", + "rb2_transpiled_circuits = transpile(\n", + " rb2_qiskit_circuits, basis_gates=[\"id\", \"sx\", \"x\", \"rz\", \"cx\"]\n", + ")\n", + "\n", + "rb1_transpiled_circuits[2].draw()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "rb1_program_list = []\n", + "for circuit in rb1_transpiled_circuits:\n", + " rb1_program_list.append(qasm3.dumps(circuit))\n", + "\n", + "rb2_program_list = []\n", + "for circuit in rb2_transpiled_circuits:\n", + " rb2_program_list.append(qasm3.dumps(circuit))\n", + "\n", + "\n", + "print(rb1_program_list[2])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 4. Execute one Qubit RB" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### Define Gates, Load QASM 3 Program, and Go!" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now, you'll map your OpenQASM gates to signals produced on the instruments using `register_gate` and `register_gate_section` functions. \n", + "\n", + "Once you've done that, you can compile your experiment and plot the output using the LabOne Q simulator." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "rb1_gate_store = GateStore()\n", + "\n", + "# Note: the below may need to be updated to match the\n", + "# names of your qubits from your QASM circuit!\n", + "rb1_qubit_map = {\"q[0]\": q0}\n", + "\n", + "# Single qubit gates:\n", + "for oq3_qubit, l1q_qubit in rb1_qubit_map.items():\n", + " rb1_gate_store.register_gate(\n", + " \"sx\",\n", + " oq3_qubit,\n", + " drive_pulse(l1q_qubit, label=\"sx\", amplitude_scale=0.5),\n", + " signal=l1q_qubit.signals[\"drive\"],\n", + " )\n", + " rb1_gate_store.register_gate(\n", + " \"x\",\n", + " oq3_qubit,\n", + " drive_pulse(l1q_qubit, label=\"x\"),\n", + " signal=l1q_qubit.signals[\"drive\"],\n", + " )\n", + " rb1_gate_store.register_gate_section(\"rz\", (oq3_qubit,), rz(l1q_qubit))\n", + " rb1_gate_store.register_gate_section(\n", + " \"measure\", (oq3_qubit,), measurement(l1q_qubit)\n", + " )" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 4.1 Compile and execute a single QASM program" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "rb1_exp = exp_from_qasm(\n", + " rb1_program_list[2], qubits=rb1_qubit_map, gate_store=rb1_gate_store\n", + ")\n", + "rb1_compiled_exp = my_session.compile(rb1_exp)\n", + "\n", + "# _ = my_session.run(rb1_compiled_exp)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# KNOWN ISSUE - plot simulation not working if no measurement is defined\n", + "plot_simulation(\n", + " rb1_compiled_exp,\n", + " length=1.6e-6,\n", + " plot_width=12,\n", + " plot_height=3,\n", + " signals=[\n", + " \"/logical_signal_groups/q0/drive_line\",\n", + " ],\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### Draw the circuit from above" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "rb1_transpiled_circuits[2].draw()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### Look at the pulse sheet" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "show_pulse_sheet(name=\"1-qubit RB\", compiled_experiment=rb1_compiled_exp)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 4.2 Compile and execute a list of QASM programs" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "exp = exp_from_qasm_list(\n", + " rb1_program_list,\n", + " qubits=rb1_qubit_map,\n", + " gate_store=rb1_gate_store,\n", + " repetition_time=20e-5,\n", + " batch_execution_mode=\"pipeline\",\n", + " do_reset=False,\n", + " count=1,\n", + ")\n", + "compiled_exp = my_session.compile(exp)\n", + "\n", + "# _ = my_session.run(rb2_compiled_exp)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "## KNOWN ISSUE - pulse sheet viewer and output simulation are not available" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 4.3 Compile and execute a list of QASM programs - including active qubit reset" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# add reset operation to the gate store\n", + "for oq3_qubit, l1q_qubit in rb1_qubit_map.items():\n", + " rb1_gate_store.register_gate_section(\n", + " \"reset\", (oq3_qubit,), reset(l1q_qubit, drive_pulse(l1q_qubit, \"reset\"))\n", + " )" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "exp = exp_from_qasm_list(\n", + " rb1_program_list,\n", + " qubits=rb1_qubit_map,\n", + " gate_store=rb1_gate_store,\n", + " repetition_time=20e-5,\n", + " batch_execution_mode=\"pipeline\",\n", + " do_reset=True,\n", + " count=1,\n", + ")\n", + "compiled_exp = my_session.compile(exp)\n", + "\n", + "# _ = my_session.run(rb2_compiled_exp)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 5. Execute two Qubit RB" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### Define Gates, Load QASM 3 Program, and Go!" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now, you'll map your OpenQASM gates to signals produced on the instruments using `register_gate` and `register_gate_section` functions. \n", + "\n", + "Once you've done that, you can compile your experiment and plot the output using the LabOne Q simulator." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "rb2_gate_store = GateStore()\n", + "\n", + "# Note: the below may need to be updated to match the\n", + "# names of your qubits from your QASM circuit!\n", + "rb2_qubit_map = {\"q[0]\": q0, \"q[1]\": q1}\n", + "\n", + "# Single qubit gates:\n", + "for oq3_qubit, l1q_qubit in rb2_qubit_map.items():\n", + " rb2_gate_store.register_gate(\n", + " \"sx\",\n", + " oq3_qubit,\n", + " drive_pulse(l1q_qubit, label=\"sx\", amplitude_scale=0.5),\n", + " signal=l1q_qubit.signals[\"drive\"],\n", + " )\n", + " rb2_gate_store.register_gate(\n", + " \"x\",\n", + " oq3_qubit,\n", + " drive_pulse(l1q_qubit, label=\"x\"),\n", + " signal=l1q_qubit.signals[\"drive\"],\n", + " )\n", + " rb2_gate_store.register_gate_section(\"rz\", (oq3_qubit,), rz(l1q_qubit))\n", + " rb2_gate_store.register_gate_section(\n", + " \"measure\", (oq3_qubit,), measurement(l1q_qubit)\n", + " )\n", + "\n", + "# Two qubit gates:\n", + "rb2_gate_store.register_gate_section(\"cx\", (\"q[0]\", \"q[1]\"), cx(q0, q1))\n", + "rb2_gate_store.register_gate_section(\"cx\", (\"q[1]\", \"q[0]\"), cx(q1, q0))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 5.1 Compile and execute a single QASM program" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "rb2_exp = exp_from_qasm(\n", + " rb2_program_list[2], qubits=rb2_qubit_map, gate_store=rb2_gate_store\n", + ")\n", + "rb2_compiled_exp = my_session.compile(rb2_exp)\n", + "\n", + "# _ = my_session.run(rb2_compiled_exp)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "plot_simulation(\n", + " rb2_compiled_exp,\n", + " length=20e-6,\n", + " plot_width=12,\n", + " plot_height=3,\n", + " signals=[\n", + " \"/logical_signal_groups/q0/flux_line\",\n", + " \"/logical_signal_groups/q1/flux_line\",\n", + " \"/logical_signal_groups/q0/drive_line\",\n", + " \"/logical_signal_groups/q1/drive_line\",\n", + " ],\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### Draw the circuit from above" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "rb2_transpiled_circuits[2].draw()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### Look at the pulse sheet" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "show_pulse_sheet(\n", + " name=\"2-qubit RB\", compiled_experiment=rb2_compiled_exp, max_events_to_publish=10e4\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 5.2 Compile and execute a list of QASM programs" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "exp = exp_from_qasm_list(\n", + " rb2_program_list,\n", + " qubits=rb2_qubit_map,\n", + " gate_store=rb2_gate_store,\n", + " repetition_time=100e-5,\n", + " batch_execution_mode=\"pipeline\",\n", + " do_reset=False,\n", + " count=1,\n", + ")\n", + "compiled_exp = my_session.compile(exp)\n", + "\n", + "# _ = my_session.run(rb2_compiled_exp)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "## KNOWN ISSUE - pulse sheet viewer and output simulation are not available" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 5.3 Compile and execute a list of QASM programs - including active qubit reset" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# add reset operation to the gate store\n", + "for oq3_qubit, l1q_qubit in rb2_qubit_map.items():\n", + " rb2_gate_store.register_gate_section(\n", + " \"reset\", (oq3_qubit,), reset(l1q_qubit, drive_pulse(l1q_qubit, \"reset\"))\n", + " )" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "exp = exp_from_qasm_list(\n", + " rb2_program_list,\n", + " qubits=rb2_qubit_map,\n", + " gate_store=rb2_gate_store,\n", + " repetition_time=100e-5,\n", + " batch_execution_mode=\"pipeline\",\n", + " do_reset=True,\n", + " count=1,\n", + ")\n", + "compiled_exp = my_session.compile(exp)\n", + "\n", + "# _ = my_session.run(rb2_compiled_exp)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "zi-py310", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.1" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/examples/06_qasm/02_Two_Qubit_RB_Qiskit.ipynb b/examples/06_qasm/02_Two_Qubit_RB_Qiskit.ipynb deleted file mode 100644 index a2284cf..0000000 --- a/examples/06_qasm/02_Two_Qubit_RB_Qiskit.ipynb +++ /dev/null @@ -1,621 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Two Qubit Randomized Benchmarking in LabOne Q with Qiskit" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "In this notebook, you'll use the [Qiskit Experiment Library](https://qiskit.org/ecosystem/experiments/apidocs/library.html) to generate a two qubit randomized benchmarking experiment. You'll then export the generated experiment to [OpenQASM](https://openqasm.com/), import your OpenQASM experiment into LabOne Q, compile, and simulate the output signals." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Python Imports" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# LabOne Q:\n", - "from laboneq.simple import *\n", - "\n", - "# plotting functionality\n", - "from laboneq.contrib.example_helpers.plotting.plot_helpers import *\n", - "\n", - "# device setup and descriptor\n", - "from laboneq._utils import id_generator\n", - "from laboneq.contrib.example_helpers.generate_descriptor import generate_descriptor\n", - "\n", - "# open qasm importer\n", - "from laboneq.openqasm3.gate_store import GateStore\n", - "\n", - "# qiskit\n", - "from qiskit import qasm3, transpile\n", - "from qiskit_experiments.library import randomized_benchmarking\n", - "\n", - "# additional imports\n", - "from math import pi" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Set up Qiskit-generated RB" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "You'll start by creating a Standard RB experiment from the Qiskit Experiment Library [here](https://qiskit.org/ecosystem/experiments/stubs/qiskit_experiments.library.randomized_benchmarking.StandardRB.html#qiskit_experiments.library.randomized_benchmarking.StandardRB). For two qubits for a few different clifford lengths.\n", - "\n", - "Note that most circuits that can be generated in Qiskit and converted to OpenQASM could be adapted to be run in a similar way in LabOne Q! " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Use Qiskit Experiment Library to Generate RB\n", - "qiskit_experiment = randomized_benchmarking.StandardRB(\n", - " physical_qubits=[0, 1], lengths=[4, 8, 12]\n", - ").circuits()\n", - "\n", - "qiskit_experiment[2].draw()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "You can then use the Qiskit `transpile` function to obtain your favorite set of basis gates." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Choose basis gates\n", - "transpiled_circuit = transpile(\n", - " qiskit_experiment, basis_gates=[\"id\", \"sx\", \"x\", \"rz\", \"cx\"]\n", - ")\n", - "\n", - "transpiled_circuit[0].draw()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "program_list = []\n", - "for circuit in transpiled_circuit:\n", - " program_list.append(qasm3.dumps(circuit))\n", - "print(program_list[0])" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## LabOne Q Experiment" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Setup, Calibration & Configuration" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "You'll define your device setup and calibration below, as well as a function to generate a LabOne Q experiment using the built-in `OpenQasm3Importer`." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "generate_descriptor(\n", - " pqsc=[\"DEV10056\"],\n", - " shfqc_6=[\"DEV12108\"],\n", - " hdawg_8=[\"DEV8138\"],\n", - " number_data_qubits=3,\n", - " number_flux_lines=3,\n", - " multiplex=True,\n", - " number_multiplex=3,\n", - " save=True,\n", - " filename=\"SeaCucumber_SHF_HD_PQSC\",\n", - " include_cr_lines=True,\n", - ")\n", - "\n", - "device_setup = DeviceSetup.from_yaml(\n", - " filepath=\"./Descriptors/SeaCucumber_SHF_HD_PQSC.yaml\",\n", - " server_host=\"ip_address\",\n", - " server_port=\"8004\",\n", - " setup_name=\"my_setup_name\",\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "q0 = Transmon.from_logical_signal_group(\n", - " \"q0\",\n", - " lsg=device_setup.logical_signal_groups[\"q0\"],\n", - " parameters=TransmonParameters(\n", - " resonance_frequency_ge=6.15e9,\n", - " resonance_frequency_ef=5.85e9,\n", - " drive_lo_frequency=6.1e9,\n", - " readout_resonator_frequency=6.4e9,\n", - " readout_lo_frequency=6.3e9,\n", - " user_defined={\n", - " \"cross_resonance_frequency\": 200e6,\n", - " \"amplitude_pi\": 0.5,\n", - " \"pulse_length\": 50e-9,\n", - " \"readout_len\": 5e-7,\n", - " \"readout_amp\": 0.2,\n", - " \"reset_length\": 200e-9,\n", - " },\n", - " ),\n", - ")\n", - "\n", - "q1 = Transmon.from_logical_signal_group(\n", - " \"q1\",\n", - " lsg=device_setup.logical_signal_groups[\"q1\"],\n", - " parameters=TransmonParameters(\n", - " resonance_frequency_ge=6.25e9,\n", - " resonance_frequency_ef=5.95e9,\n", - " drive_lo_frequency=6.1e9,\n", - " readout_resonator_frequency=6.4e9,\n", - " readout_lo_frequency=6.3e9,\n", - " user_defined={\n", - " \"cross_resonance_frequency\": -200e6,\n", - " \"amplitude_pi\": 0.6,\n", - " \"pulse_length\": 50e-9,\n", - " \"readout_len\": 5e-7,\n", - " \"readout_amp\": 0.2,\n", - " \"reset_length\": 200e-9,\n", - " },\n", - " ),\n", - ")\n", - "\n", - "qubits = [q0, q1]\n", - "for qubit in qubits:\n", - " device_setup.set_calibration(qubit.calibration())\n", - " # set calibration of cross resonance signal lines - not currently included in TransmonQubit calibration method\n", - " device_setup.logical_signal_groups[qubit.uid].logical_signals[\n", - " \"drive_line_cr\"\n", - " ].calibration = SignalCalibration(\n", - " oscillator=Oscillator(\n", - " frequency=qubit.parameters.user_defined[\"cross_resonance_frequency\"],\n", - " modulation_type=ModulationType.HARDWARE,\n", - " )\n", - " )" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Transpilation Support (Gate Definitions)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "You'll now define functions to generate pulses and gates from the OpenQASM program text." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "def drive_pulse(qubit: Qubit, label, length=50e-9, amplitude=0.6):\n", - " \"\"\"Return a drive pulse for the given qubit.\n", - "\n", - " In practice different drive pulses would be specified for each qubit and operation.\n", - " \"\"\"\n", - " return pulse_library.drag(\n", - " uid=f\"{qubit.uid}_{label}\",\n", - " length=qubit.parameters.user_defined[\"pulse_length\"],\n", - " amplitude=qubit.parameters.user_defined[\"amplitude_pi\"],\n", - " )" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "def drive_pulse_root(qubit: Qubit, label, length=50e-9, amplitude=0.6):\n", - " \"\"\"Return a root drive pulse for the given qubit.\n", - "\n", - " In practice different drive pulses would be specified for each qubit and operation.\n", - " \"\"\"\n", - " return pulse_library.drag(\n", - " uid=f\"{qubit.uid}_{label}\",\n", - " length=qubit.parameters.user_defined[\"pulse_length\"],\n", - " amplitude=(qubit.parameters.user_defined[\"amplitude_pi\"]) / 2,\n", - " )" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "def rz(qubit: Qubit):\n", - " \"\"\"Return a parameterized Rz gate for the specified qubit.\n", - "\n", - " The gate is a function that takes the angle to rotate and\n", - " returns a LabOne Q section that performs the rotation.\n", - " \"\"\"\n", - "\n", - " def rz_gate(angle: float):\n", - " \"\"\"Rz(theta).\n", - "\n", - " Theta is in radians - implements a virtual z-gate\n", - " \"\"\"\n", - " gate = Section(uid=id_generator(f\"p_{qubit.uid}_rz_{int(180 * angle / pi)}\"))\n", - " gate.play(\n", - " signal=qubit.signals[\"drive\"],\n", - " pulse=None,\n", - " increment_oscillator_phase=angle,\n", - " )\n", - " return gate\n", - "\n", - " return rz_gate" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "def measurement(qubit: Qubit):\n", - " \"\"\"Return a measurement operation of the specified qubit.\n", - "\n", - " The operation is a function that takes the measurement handle (a string)\n", - " and returns a LabOne Q section that performs the measurement.\n", - " \"\"\"\n", - "\n", - " def measurement_gate(handle: str):\n", - " \"\"\"Perform a measurement.\n", - "\n", - " Handle is the name of where to store the measurement result. E.g. \"meas[0]\".\n", - " \"\"\"\n", - " measure_pulse = pulse_library.gaussian_square(\n", - " uid=f\"{qubit.uid}_readout_pulse\",\n", - " length=qubit.parameters.user_defined[\"readout_len\"],\n", - " amplitude=qubit.parameters.user_defined[\"readout_amp\"],\n", - " )\n", - " integration_kernel = pulse_library.const(\n", - " uid=f\"{qubit.uid}_integration_kernel\",\n", - " length=qubit.parameters.user_defined[\"readout_len\"],\n", - " )\n", - "\n", - " gate = Section(uid=id_generator(f\"meas_{qubit.uid}_{handle}\"))\n", - " gate.reserve(signal=qubit.signals[\"drive\"])\n", - " gate.play(signal=qubit.signals[\"measure\"], pulse=measure_pulse)\n", - " gate.acquire(\n", - " signal=qubit.signals[\"acquire\"],\n", - " handle=handle,\n", - " kernel=integration_kernel,\n", - " )\n", - " return gate\n", - "\n", - " return measurement_gate" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "def cx(control: Qubit, target: Qubit):\n", - " \"\"\"Return a controlled X gate for the specified control and target qubits.\n", - "\n", - " The CX gate function takes no arguments and returns a LabOne Q section that performs\n", - " the controllex X gate.\n", - " \"\"\"\n", - "\n", - " def cx_gate():\n", - " cx_id = f\"cx_{control.uid}_{target.uid}\"\n", - "\n", - " gate = Section(uid=id_generator(cx_id))\n", - "\n", - " # define X pulses for target and control\n", - " x180_pulse_control = drive_pulse(control, label=\"x180\")\n", - " x180_pulse_target = drive_pulse(target, label=\"x180\")\n", - "\n", - " # define cancellation pulses for target and control\n", - " cancellation_control_n = pulse_library.gaussian_square(uid=\"CR-\")\n", - " cancellation_control_p = pulse_library.gaussian_square(uid=\"CR+\")\n", - " cancellation_target_p = pulse_library.gaussian_square(uid=\"q1+\")\n", - " cancellation_target_n = pulse_library.gaussian_square(uid=\"q1-\")\n", - "\n", - " # play X pulses on both target and control\n", - " x180_both = Section(uid=id_generator(f\"{cx_id}_x_both\"))\n", - " x180_both.play(signal=control.signals[\"drive\"], pulse=x180_pulse_control)\n", - " x180_both.play(signal=target.signals[\"drive\"], pulse=x180_pulse_target)\n", - " gate.add(x180_both)\n", - "\n", - " # First cross-resonance component\n", - " cancellation_p = Section(\n", - " uid=id_generator(f\"{cx_id}_canc_p\"), play_after=x180_both.uid\n", - " )\n", - " cancellation_p.play(signal=target.signals[\"drive\"], pulse=cancellation_target_p)\n", - " cancellation_p.play(\n", - " signal=control.signals[\"flux\"], pulse=cancellation_control_n\n", - " )\n", - " gate.add(cancellation_p)\n", - "\n", - " # play X pulse on control\n", - " x180_control = Section(\n", - " uid=id_generator(f\"{cx_id}_x_q0\"), play_after=cancellation_p.uid\n", - " )\n", - " x180_control.play(signal=control.signals[\"drive\"], pulse=x180_pulse_control)\n", - " gate.add(x180_control)\n", - "\n", - " # Second cross-resonance component\n", - " cancellation_n = Section(\n", - " uid=id_generator(f\"cx_{cx_id}_canc_n\"), play_after=x180_control.uid\n", - " )\n", - " cancellation_n.play(signal=target.signals[\"drive\"], pulse=cancellation_target_n)\n", - " cancellation_n.play(\n", - " signal=control.signals[\"flux\"], pulse=cancellation_control_p\n", - " )\n", - " gate.add(cancellation_n)\n", - "\n", - " return gate\n", - "\n", - " return cx_gate" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Two Qubit RB" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "You're almost ready to run your experiment!" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "#### Connect to Session" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "You'll need to start a LabOne Q session. Here, you'll run the session in emulation mode. If you've modified the descriptor to run on your own devices above, you could connect to them here instead." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "my_session = Session(device_setup=device_setup)\n", - "my_session.connect(do_emulation=True)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "#### Define Gates, Load QASM 3 Program, and Go!" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now, you'll map your OpenQASM gates to signals produced on the instruments using `register_gate` and `register_gate_section` functions. \n", - "\n", - "Once you've done that, you can compile your experiment and plot the output using the LabOne Q simulator." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "gate_store = GateStore()\n", - "\n", - "# Note: the below may need to be updated to match the\n", - "# names of your qubits from your QASM circuit!\n", - "qubit_map = {\"q[0]\": q0, \"q[1]\": q1}\n", - "\n", - "# Single qubit gates:\n", - "\n", - "for oq3_qubit, l1q_qubit in qubit_map.items():\n", - " gate_store.register_gate(\n", - " \"sx\",\n", - " oq3_qubit,\n", - " drive_pulse_root(l1q_qubit, label=\"sx\"),\n", - " signal=l1q_qubit.signals[\"drive\"],\n", - " )\n", - " gate_store.register_gate(\n", - " \"x\",\n", - " oq3_qubit,\n", - " drive_pulse(l1q_qubit, label=\"x\"),\n", - " signal=l1q_qubit.signals[\"drive\"],\n", - " )\n", - " gate_store.register_gate_section(\"rz\", (oq3_qubit,), rz(l1q_qubit))\n", - " gate_store.register_gate_section(\"measure\", (oq3_qubit,), measurement(l1q_qubit))\n", - "\n", - "# Two qubit gates:\n", - "gate_store.register_gate_section(\"cx\", (\"q[0]\", \"q[1]\"), cx(q0, q1))\n", - "gate_store.register_gate_section(\"cx\", (\"q[1]\", \"q[0]\"), cx(q1, q0))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "exp = exp_from_qasm(program_list[0], qubits=qubit_map, gate_store=gate_store)\n", - "compiled_exp = my_session.compile(exp)\n", - "\n", - "plot_simulation(compiled_exp, length=100e-6)\n", - "\n", - "my_results = my_session.run(compiled_exp)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "#### Draw the circuit from above" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "You can also draw the circuit corresponding to the simulated signals you just produced!" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "transpiled_circuit[0].draw()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "#### Compile and draw more circuits in the list" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "You can do this for any circuit you've generated in the list." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "exp_1 = exp_from_qasm(program_list[1], qubits=qubit_map, gate_store=gate_store)\n", - "compiled_exp_1 = my_session.compile(exp_1)\n", - "\n", - "plot_simulation(compiled_exp_1, length=100e-6)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "transpiled_circuit[1].draw()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "exp_2 = exp_from_qasm(program_list[2], qubits=qubit_map, gate_store=gate_store)\n", - "compiled_exp_2 = my_session.compile(exp_2)\n", - "\n", - "plot_simulation(compiled_exp_2, length=100e-6)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "plot_simulation(compiled_exp_2, length=1000e-6)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "transpiled_circuit[2].draw()" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "zi-py310", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.10.10" - } - }, - "nbformat": 4, - "nbformat_minor": 4 -} diff --git a/laboneq/VERSION.txt b/laboneq/VERSION.txt index da26612..f8f895c 100644 --- a/laboneq/VERSION.txt +++ b/laboneq/VERSION.txt @@ -1 +1 @@ -2.23.0 \ No newline at end of file +2.24.0 \ No newline at end of file diff --git a/laboneq/compiler/code_generator/analyze_playback.py b/laboneq/compiler/code_generator/analyze_playback.py index ff7da62..d931358 100644 --- a/laboneq/compiler/code_generator/analyze_playback.py +++ b/laboneq/compiler/code_generator/analyze_playback.py @@ -875,6 +875,8 @@ def analyze_play_wave_times( "signal_id": signal_id, }, ) + if len(signal_ids) > 1: + interval_event.params["multiplexed_signal_ids"] = signal_ids interval_events.add(start, interval_event) # A value of 'None' indicates that we do not know the current value (e.g. after a diff --git a/laboneq/compiler/code_generator/code_generator.py b/laboneq/compiler/code_generator/code_generator.py index efad278..068d30a 100644 --- a/laboneq/compiler/code_generator/code_generator.py +++ b/laboneq/compiler/code_generator/code_generator.py @@ -716,13 +716,10 @@ def gen_seq_c(self, events: List[Any], pulse_defs: Dict[str, PulseDef]): ): self._gen_seq_c_per_awg(awg, events, pulse_defs) - for ( - awg, - target_fb_register, - ) in self._feedback_register_allocator.target_feedback_registers.items(): - self._feedback_register_config[ - awg - ].target_feedback_register = target_fb_register + tgt_feedback_regs = self._feedback_register_allocator.target_feedback_registers + for awg, target_fb_register in tgt_feedback_regs.items(): + feedback_reg_config = self._feedback_register_config[awg] + feedback_reg_config.target_feedback_register = target_fb_register @staticmethod def _calc_global_awg_params(awg: AWGInfo) -> Tuple[float, float]: @@ -1399,9 +1396,10 @@ def _gen_seq_c_per_awg( ) raise LabOneQException(f"Compiler error. {msg}") from error - self._feedback_register_config[ - awg.key - ].command_table_offset = handler.command_table_match_offset + fb_register_config = self._feedback_register_config[awg.key] + fb_register_config.command_table_offset = handler.command_table_match_offset + if handler.use_zsync_feedback is False: + fb_register_config.source_feedback_register = "local" _logger.debug( "*** Finished event processing, loop_stack_generators: %s", @@ -1834,7 +1832,7 @@ def post_process_sampled_events( start = None end = None acquisition_types = set() - feedback_registers = list() + feedback_register: None | int = None play: List[AWGEvent] = [] acquire: List[AWGEvent] = [] for x in sampled_event_list: @@ -1846,9 +1844,16 @@ def post_process_sampled_events( start = x.start if "acquisition_type" in x.params: acquisition_types.update(x.params["acquisition_type"]) - feedback_register = x.params.get("feedback_register") - if feedback_register is not None: - feedback_registers.append(feedback_register) + this_feedback_register = x.params.get("feedback_register") + if this_feedback_register is not None: + if ( + feedback_register is not None + and feedback_register != this_feedback_register + ): + raise LabOneQException( + "Conflicting feedback register allocation detected, please contact development." + ) + feedback_register = this_feedback_register acquire.append(x) end = x.end if end is None else max(end, x.end) if len(play) > 0 and len(acquire) == 0 and has_acquire: @@ -1856,10 +1861,6 @@ def post_process_sampled_events( for log_event in sampled_event_list: _logger.warning(" %s", log_event) raise Exception("Play and acquire must happen at the same time") - if len(feedback_registers) > 1: - _logger.warning( - "Conflicting feedback register allocation detected, please contact development." - ) if len(play) > 0 or len(acquire) > 0: end = ( round(end / DeviceType.SHFQA.sample_multiple) @@ -1876,9 +1877,7 @@ def post_process_sampled_events( "acquire_handles": [ a.params["acquire_handles"][0] for a in acquire ], - "feedback_register": None - if len(feedback_registers) == 0 - else feedback_registers[0], + "feedback_register": feedback_register, }, ) diff --git a/laboneq/compiler/code_generator/sampled_event_handler.py b/laboneq/compiler/code_generator/sampled_event_handler.py index a3e21d1..be6fe7e 100644 --- a/laboneq/compiler/code_generator/sampled_event_handler.py +++ b/laboneq/compiler/code_generator/sampled_event_handler.py @@ -142,6 +142,11 @@ def __init__( self.last_event: Optional[AWGEvent] = None self.match_parent_event: Optional[AWGEvent] = None self.command_table_match_offset = None + + # If true, this AWG sources feedback data from Zsync. If False, it sources data + # from the local bus. None means neither source is used. Using both is illegal. + self.use_zsync_feedback: bool | None = None + self.match_command_table_entries: dict[ int, tuple ] = {} # For feedback or prng match @@ -330,7 +335,6 @@ def handle_playwave_on_feedback( assert self.use_command_table assert self.match_parent_event is not None state = signature.state - signal_id = sampled_event.params["signal_id"] if state in self.match_command_table_entries: if self.match_command_table_entries[state] != ( @@ -348,9 +352,15 @@ def handle_playwave_on_feedback( wave_index, sampled_event.start - self.match_parent_event.start, ) + + if "multiplexed_signal_ids" in sampled_event.params: + drive_signal_ids = sampled_event.params["multiplexed_signal_ids"] + else: + drive_signal_ids = (sampled_event.params["signal_id"],) + self.feedback_connections.setdefault( self.match_parent_event.params["handle"], FeedbackConnection(None) - ).drive.add(signal_id) + ).drive.update(drive_signal_ids) def handle_playwave_on_user_register( self, @@ -960,6 +970,12 @@ def close_event_list_for_handle(self): + (f"+ {latency}" if latency >= 0 else f"- {-latency}"), comment="Match handle " + handle, ) + use_zsync: bool = not ev.params["local"] + if self.use_zsync_feedback is not None and self.use_zsync_feedback != use_zsync: + raise LabOneQException( + "Mixed feedback paths (global and local) are illegal" + ) + self.use_zsync_feedback = use_zsync self.seqc_tracker.add_timing_comment(ev.end) self.seqc_tracker.flush_deferred_function_calls() self.seqc_tracker.current_time = self.match_parent_event.end diff --git a/laboneq/compiler/experiment_access/experiment_dao.py b/laboneq/compiler/experiment_access/experiment_dao.py index 4f8b416..93f5858 100644 --- a/laboneq/compiler/experiment_access/experiment_dao.py +++ b/laboneq/compiler/experiment_access/experiment_dao.py @@ -241,12 +241,10 @@ def section_signals(self, section_id): @cached_method() def section_signals_with_children(self, section_id): - retval = set() - section_with_children = self.all_section_children(section_id) - section_with_children.add(section_id) - for child in section_with_children: - retval |= self.section_signals(child) - return retval + signals = set(self.section_signals(section_id)) + for child in self.all_section_children(section_id): + signals |= self.section_signals(child) + return signals def pulses(self) -> list[str]: return list(self._data["pulses"].keys()) diff --git a/laboneq/compiler/feedback_router/feedback_router.py b/laboneq/compiler/feedback_router/feedback_router.py index 6ca3b34..a3a0895 100644 --- a/laboneq/compiler/feedback_router/feedback_router.py +++ b/laboneq/compiler/feedback_router/feedback_router.py @@ -8,9 +8,7 @@ from typing import Literal, Union from laboneq._utils import cached_method -from laboneq.compiler.common.awg_info import AWGInfo, AwgKey -from laboneq.compiler.common.awg_signal_type import AWGSignalType -from laboneq.compiler.common.device_type import DeviceType +from laboneq.compiler.common.awg_info import AwgKey from laboneq.compiler.common.feedback_connection import FeedbackConnection from laboneq.compiler.common.feedback_register_config import FeedbackRegisterConfig from laboneq.compiler.common.signal_obj import SignalObj @@ -60,11 +58,14 @@ def calculate_feedback_routing(self): for awg in self._awgs.values(): if (tx_qa := self._transmitter_qa_for_awg(awg.key)) is None: continue + feedback_register_config = feedback_register_configs[awg.key] qa_awg, qa_signal = tx_qa register = feedback_register_configs[qa_awg].target_feedback_register assert register is not None - use_local_feedback = self._local_feedback_allowed(awg, self._awgs[qa_awg]) + use_local_feedback = ( + feedback_register_config.source_feedback_register == "local" + ) register_bitshift, width, mask = self._register_bitshift( register, @@ -87,7 +88,6 @@ def calculate_feedback_routing(self): "Measurement result must not span across indices" ) - feedback_register_config = feedback_register_configs[awg.key] feedback_register_config.source_feedback_register = register feedback_register_config.codeword_bitshift = codeword_bitshift feedback_register_config.register_index_select = register_index_select @@ -97,11 +97,7 @@ def calculate_feedback_routing(self): def _transmitter_qa_for_awg(self, awg_key: AwgKey) -> tuple[AwgKey, str] | None: """Find the QA core that is transmitting feedback data to this AWG.""" awg = self._awgs[AwgKey(awg_key.device_id, awg_key.awg_number)] - signal_type = awg.signal_type - if signal_type == AWGSignalType.DOUBLE: - awg_signals = {f"{awg.signal_channels[0][0]}_{awg.signal_channels[1][0]}"} - else: - awg_signals = {c for c, _ in awg.signal_channels} + awg_signals = {c for c, _ in awg.signal_channels} qa_signal_ids = { h.acquire for h in self._feedback_connections.values() @@ -178,16 +174,6 @@ def _register_bitshift( raise AssertionError(f"Signal {qa_signal} not found in register {register}") return register_bitshift, width, mask - @staticmethod - def _local_feedback_allowed(sg_awg: AWGInfo, qa_awg: AWGInfo): - # todo: this check for QC is quite brittle - - return ( - sg_awg.device_type == DeviceType.SHFSG - and qa_awg.device_type == DeviceType.SHFQA - and sg_awg.device_id == f"{qa_awg.device_id}_sg" - ) - @singledispatch def _do_compute_feedback_routing( diff --git a/laboneq/compiler/scheduler/match_schedule.py b/laboneq/compiler/scheduler/match_schedule.py index c99ceaa..eda0553 100644 --- a/laboneq/compiler/scheduler/match_schedule.py +++ b/laboneq/compiler/scheduler/match_schedule.py @@ -8,7 +8,7 @@ from typing import TYPE_CHECKING, Iterable, List, Tuple from attrs import define -from zhinst.utils.feedback_model import ( +from zhinst.timing_models import ( FeedbackPath, PQSCMode, QAType, @@ -179,6 +179,10 @@ def _compute_start_with_latency( time_of_arrival_at_register + EXECUTETABLEENTRY_LATENCY ) + # Extra slack to avoid issues with marginal model. + # See HBAR-1934 + time_of_pulse_played += 5 + sg_seq_rate = schedule_data.sampling_rate_tracker.sequencer_rate_for_device( sg_signal_obj.awg.device_id ) diff --git a/laboneq/compiler/scheduler/preorder_map.py b/laboneq/compiler/scheduler/preorder_map.py index ccdf7b5..c486546 100644 --- a/laboneq/compiler/scheduler/preorder_map.py +++ b/laboneq/compiler/scheduler/preorder_map.py @@ -1,29 +1,51 @@ # Copyright 2022 Zurich Instruments AG # SPDX-License-Identifier: Apache-2.0 -from typing import Dict +from __future__ import annotations import intervaltree from laboneq.compiler.scheduler.interval_schedule import IntervalSchedule +from laboneq.compiler.scheduler.loop_iteration_schedule import LoopIterationSchedule from laboneq.compiler.scheduler.loop_schedule import LoopSchedule from laboneq.compiler.scheduler.section_schedule import SectionSchedule def calculate_preorder_map( - schedule: IntervalSchedule, preorder_map: Dict, current_depth=0 + schedule: IntervalSchedule, + preorder_map: dict[str, int], + section_children: dict[str, set[str]], + current_depth=0, ) -> int: - max_depth = current_depth - intervals = intervaltree.IntervalTree() if not isinstance(schedule, SectionSchedule): return current_depth + max_depth = current_depth + intervals = intervaltree.IntervalTree() if isinstance(schedule, LoopSchedule): - # In the PSV, we do not consider the loop and the loop iteration separately - schedule = schedule.children[0] + # Normally we only need to look at the first loop iteration to find all the + # sections. When there are statically resolved branches however, not every + # iteration may contain all the subsections. + for child in schedule.children: + assert isinstance(child, LoopIterationSchedule) + # In the PSV, we do not consider the loop and the loop iteration separately, so + # we immediately pass to the children without incrementing the depth. + max_depth = max( + max_depth, + calculate_preorder_map( + child, preorder_map, section_children, current_depth + ), + ) + if section_children[schedule.section].issubset(preorder_map.keys()): + break + else: + # When we sweep a parameter in near-time (or the pipeliner), a section can + # legitimately be absent from the schedule we just generated. This is not + # an error. + pass + return max_depth if isinstance(schedule, SectionSchedule): # Draw the section on this row - assert schedule.section not in preorder_map preorder_map[schedule.section] = current_depth current_depth += 1 @@ -37,13 +59,18 @@ def calculate_preorder_map( if not intervals.overlap(c_start, c_end): # Place child in this row max_depth = max( - max_depth, calculate_preorder_map(c, preorder_map, current_depth) + max_depth, + calculate_preorder_map( + c, preorder_map, section_children, current_depth + ), ) else: # Place child in next free row max_depth = max( max_depth, - calculate_preorder_map(c, preorder_map, max_depth + 1), + calculate_preorder_map( + c, preorder_map, section_children, max_depth + 1 + ), ) if c_start != c_end: intervals.addi(c_start, c_end, data=c.section) diff --git a/laboneq/compiler/scheduler/schedule_data.py b/laboneq/compiler/scheduler/schedule_data.py index 3fb1bfa..452f2fb 100644 --- a/laboneq/compiler/scheduler/schedule_data.py +++ b/laboneq/compiler/scheduler/schedule_data.py @@ -26,3 +26,8 @@ class ScheduleData: def __post_init__(self): self.TINYSAMPLE = self.settings.TINYSAMPLE + + def reset(self): + """`ScheduleData` is persistent between scheduler runs, so we must clear the + cache of acquire pulses that are included in the schedule.""" + self.acquire_pulses = {} diff --git a/laboneq/compiler/scheduler/scheduler.py b/laboneq/compiler/scheduler/scheduler.py index 1e5de61..1e1c90b 100644 --- a/laboneq/compiler/scheduler/scheduler.py +++ b/laboneq/compiler/scheduler/scheduler.py @@ -157,6 +157,7 @@ def __init__( def run(self, nt_parameters: Optional[ParameterStore] = None): if nt_parameters is None: nt_parameters = ParameterStore() + self._schedule_data.reset() self._root_schedule = self._schedule_root(nt_parameters) _logger.info("Schedule completed") for ( @@ -295,10 +296,9 @@ def _schedule_section( section_info.match_handle is not None or section_info.match_user_register is not None or section_info.match_prng_sample is not None + or section_info.match_sweep_parameter is not None ): - schedule = self._schedule_match( - section_id, section_info, current_parameters - ) + schedule = self._schedule_match(section_info, current_parameters) else: # regular section children_schedules = self._collect_children_schedules( section_id, current_parameters @@ -873,22 +873,26 @@ def _schedule_acquire_group( def _schedule_match( self, - section_id: str, section_info: SectionInfo, current_parameters: ParameterStore[str, float], - ) -> MatchSchedule: + ) -> SectionSchedule: assert ( section_info.match_handle is not None or section_info.match_user_register is not None or section_info.match_prng_sample is not None + or section_info.match_sweep_parameter is not None ) handle: str | None = section_info.match_handle user_register: Optional[int] = section_info.match_user_register prng_sample = section_info.match_prng_sample + match_sweep_parameter = section_info.match_sweep_parameter local: Optional[bool] = section_info.local + if match_sweep_parameter is not None: + return self._schedule_static_branch(section_info, current_parameters) + dao = self._schedule_data.experiment_dao - section_children = dao.direct_section_children(section_id) + section_children = dao.direct_section_children(section_info.uid) if len(section_children) == 0: raise LabOneQException("Must provide at least one branch option") children_schedules = [ @@ -954,7 +958,7 @@ def _schedule_match( signals=signals, children=children_schedules, right_aligned=False, - section=section_id, + section=section_info.uid, play_after=play_after, handle=handle, user_register=user_register, @@ -963,6 +967,22 @@ def _schedule_match( compressed_loop_grid=compressed_loop_grid, ) + def _schedule_static_branch( + self, + section_info: SectionInfo, + current_parameters: ParameterStore, + ) -> SectionSchedule: + match_sweep_parameter = section_info.match_sweep_parameter + val = current_parameters[match_sweep_parameter.uid] + + for case in section_info.children: + if case.state == val: + case_schedule = self._schedule_section(case.uid, current_parameters) + match_schedule = self._schedule_children( + section_info.uid, section_info, [case_schedule] + ) + return match_schedule + def _schedule_case( self, section_id: str, current_parameters: ParameterStore ) -> CaseSchedule: @@ -993,6 +1013,8 @@ def _schedule_case( raise LabOneQException( "Only pulses, not sections, are allowed inside a case" ) + if cs.is_acquire: + raise LabOneQException("No acquisitions can happen in a case block") if cs.increment_oscillator_phase or cs.set_oscillator_phase: for s in cs.signals: s = self._experiment_dao.signal_info(s) @@ -1227,6 +1249,12 @@ def search_lowest_level_loop(section): def preorder_map(self): preorder_map = {} assert self._root_schedule is not None + + section_children = { + s: self._experiment_dao.all_section_children(s) + for s in self._experiment_dao.sections() + } + for s in self._root_schedule.children: - calculate_preorder_map(s, preorder_map) + calculate_preorder_map(s, preorder_map, section_children) return preorder_map diff --git a/laboneq/compiler/workflow/compiler.py b/laboneq/compiler/workflow/compiler.py index 2e19d02..466fbe2 100644 --- a/laboneq/compiler/workflow/compiler.py +++ b/laboneq/compiler/workflow/compiler.py @@ -49,6 +49,7 @@ from laboneq.compiler.workflow.realtime_compiler import RealtimeCompiler from laboneq.compiler.workflow.recipe_generator import RecipeGenerator from laboneq.compiler.workflow.rt_linker import CombinedRTCompilerOutputContainer +import laboneq.compiler.workflow.reporter # noqa: F401 from laboneq.core.exceptions import LabOneQException from laboneq.core.types.compiled_experiment import CompiledExperiment from laboneq.core.types.enums.acquisition_type import AcquisitionType, is_spectroscopy @@ -169,8 +170,7 @@ def _analyze_setup(self): used_devices == {"hdawg"} or used_devices == {"shfsg"} or used_devices == {"shfqa"} - or used_devices == {"shfqa", "shfsg"} - or standalone_qc + or (used_devices == {"shfqa", "shfsg"} and standalone_qc) or used_devices == {"hdawg", "uhfqa"} or (used_devices == {"uhfqa"} and has_hdawg) # No signal on leader ) @@ -270,7 +270,7 @@ def _process_experiment(self): self._signal_objects, self._settings, ) - executor = NtCompilerExecutor(rt_compiler) + executor = NtCompilerExecutor(rt_compiler, self._settings) executor.run(self._execution) self._combined_compiler_output = executor.combined_compiler_output() if self._combined_compiler_output is None: @@ -282,6 +282,7 @@ def _process_experiment(self): self._combined_compiler_output = rt_linker.from_single_run( rt_compiler_output, [0] ) + executor.finalize() compute_feedback_routing( signal_objs=self._signal_objects, @@ -289,9 +290,6 @@ def _process_experiment(self): combined_compiler_output=self._combined_compiler_output, ) - if self._settings.LOG_REPORT: - executor.report() - @staticmethod def _get_total_rounded_delay(delay, signal_id, device_type, sampling_rate): if delay < 0: diff --git a/laboneq/compiler/workflow/neartime_execution.py b/laboneq/compiler/workflow/neartime_execution.py index d5005f6..6e1e3f9 100644 --- a/laboneq/compiler/workflow/neartime_execution.py +++ b/laboneq/compiler/workflow/neartime_execution.py @@ -3,17 +3,18 @@ from __future__ import annotations +import abc from builtins import frozenset from dataclasses import dataclass, field from typing import Any, Dict, List, Optional, Set from numpy.typing import ArrayLike +from laboneq.compiler import CompilerSettings from laboneq.compiler.scheduler.parameter_store import ParameterStore from laboneq.compiler.workflow import rt_linker from laboneq.compiler.workflow.compiler_output import RTCompilerOutputContainer from laboneq.compiler.workflow.realtime_compiler import RealtimeCompiler -from laboneq.compiler.workflow.reporter import CompilationReportGenerator from laboneq.compiler.workflow.rt_linker import CombinedRTCompilerOutputContainer from laboneq.executor.executor import ( ExecRT, @@ -66,10 +67,27 @@ def legacy_execution_program(): ) +class NtCompilerExecutorDelegate(abc.ABC): + @abc.abstractmethod + def __init__(self, settings: CompilerSettings): + raise NotImplementedError + + @abc.abstractmethod + def after_compilation_run(self, new: RTCompilerOutputContainer, indices: list[int]): + raise NotImplementedError + + @abc.abstractmethod + def after_final_run(self, combined: CombinedRTCompilerOutputContainer): + raise NotImplementedError + + class NtCompilerExecutor(ExecutorBase): - def __init__(self, rt_compiler: RealtimeCompiler): + _delegates_types: list[type[NtCompilerExecutorDelegate]] = [] + + def __init__(self, rt_compiler: RealtimeCompiler, settings: CompilerSettings): super().__init__(looping_mode=LoopingMode.NEAR_TIME_ONLY) self._rt_compiler = rt_compiler + self._settings = settings self._iteration_stack = IterationStack() self._compiler_output_by_param_values: Dict[ @@ -80,7 +98,14 @@ def __init__(self, rt_compiler: RealtimeCompiler): self._combined_compiler_output: Optional[ CombinedRTCompilerOutputContainer ] = None - self._compiler_report_generator = CompilationReportGenerator() + + self._delegates = [ + Delegate(self._settings) for Delegate in self._delegates_types + ] + + @classmethod + def register_hook(cls, delegate_class: type[NtCompilerExecutorDelegate]): + cls._delegates_types.append(delegate_class) def set_sw_param_handler( self, @@ -154,7 +179,10 @@ def rt_entry_handler( nt_step_indices, ) self._combined_compiler_output.add_total_execution_time(new_compiler_output) - self._compiler_report_generator.update(new_compiler_output, nt_step_indices) + + for delegate in self._delegates: + delegate.after_compilation_run(new_compiler_output, nt_step_indices) + self._last_compiler_output = new_compiler_output def _frozen_required_parameters(self): @@ -167,9 +195,7 @@ def _frozen_required_parameters(self): def combined_compiler_output(self): return self._combined_compiler_output - def report(self): + def finalize(self): if self._combined_compiler_output is not None: - self._compiler_report_generator.calculate_total( - self._combined_compiler_output - ) - return self._compiler_report_generator.log_report() + for delegate in self._delegates: + delegate.after_final_run(self._combined_compiler_output) diff --git a/laboneq/compiler/workflow/realtime_compiler.py b/laboneq/compiler/workflow/realtime_compiler.py index ec7c5c6..f500b32 100644 --- a/laboneq/compiler/workflow/realtime_compiler.py +++ b/laboneq/compiler/workflow/realtime_compiler.py @@ -143,10 +143,10 @@ def _lower_ir_to_pulse_sheet(self, ir: IR): section_info_out = {} section_signals_with_children = {} - for section_info in [ + for section_info in ( ir.root_section, *ir.root_section_children, - ]: + ): section_display_name = section_info.uid section_signals_with_children[section_info.uid] = list( ir.section_signals_with_chidlren_ids[section_info.uid] diff --git a/laboneq/compiler/workflow/recipe_generator.py b/laboneq/compiler/workflow/recipe_generator.py index 8117742..493e359 100644 --- a/laboneq/compiler/workflow/recipe_generator.py +++ b/laboneq/compiler/workflow/recipe_generator.py @@ -23,7 +23,7 @@ ) from laboneq.core.exceptions import LabOneQException from laboneq.core.types.enums.acquisition_type import is_spectroscopy -from laboneq.data.calibration import PortMode +from laboneq.data.calibration import PortMode, CancellationSource from laboneq.data.compilation_job import DeviceInfo, DeviceInfoType, ParameterInfo from laboneq.data.recipe import ( AWG, @@ -169,17 +169,25 @@ def add_connectivity_from_experiment( amplifier_pump = experiment_dao.amplifier_pump(signal) if amplifier_pump is None: continue - amplifier_pump_dict: dict[str, str | float | bool | int] = { - "cancellation": amplifier_pump.cancellation, - "alc_engaged": amplifier_pump.alc_engaged, - "use_probe": amplifier_pump.use_probe, + amplifier_pump_dict: dict[ + str, str | float | bool | int | CancellationSource | None + ] = { + "pump_on": amplifier_pump.pump_on, + "cancellation_on": amplifier_pump.cancellation_on, + "cancellation_source": amplifier_pump.cancellation_source, + "cancellation_source_frequency": amplifier_pump.cancellation_source_frequency, + "alc_on": amplifier_pump.alc_on, + "pump_filter_on": amplifier_pump.pump_filter_on, + "probe_on": amplifier_pump.probe_on, "channel": amplifier_pump.channel, } for field in [ - "pump_freq", + "pump_frequency", "pump_power", "probe_frequency", "probe_power", + "cancellation_phase", + "cancellation_attenuation", ]: val = getattr(amplifier_pump, field) if val is None: diff --git a/laboneq/compiler/workflow/reporter.py b/laboneq/compiler/workflow/reporter.py index b1de0e9..aee7701 100644 --- a/laboneq/compiler/workflow/reporter.py +++ b/laboneq/compiler/workflow/reporter.py @@ -2,9 +2,7 @@ # SPDX-License-Identifier: Apache-2.0 from __future__ import annotations -from functools import singledispatch -import logging from dataclasses import dataclass from io import StringIO from typing import Optional @@ -13,17 +11,23 @@ from rich.console import Console from rich.table import Table +from laboneq.compiler import CompilerSettings from laboneq.compiler.common.awg_info import AwgKey from laboneq.compiler.workflow.compiler_output import ( SeqCGenOutput, CombinedRTCompilerOutputContainer, CombinedRTOutputSeqC, - CombinedRTOutputPrettyPrinter, - PrettyPrinterOutput, RTCompilerOutputContainer, ) +from laboneq.laboneq_logging import get_logger -_logger = logging.getLogger(__name__) +_logger = get_logger(__name__) + + +from laboneq.compiler.workflow.neartime_execution import ( + NtCompilerExecutorDelegate, + NtCompilerExecutor, +) def _count_samples(waves, wave_index): @@ -43,6 +47,8 @@ def _count_samples(waves, wave_index): multiplier = 2 # two samples per clock cycle elif wave_type == "single": waveform_name = f"{wave_name}.wave" + else: + raise ValueError("invalid wave type") waveform = waves[waveform_name] return len(waveform["samples"]) * multiplier @@ -58,124 +64,87 @@ class ReportEntry: waveform_samples: int -@singledispatch -def _do_calculate_total(compiler_output): - raise NotImplementedError() - - -@singledispatch -def _do_get_report(compiler_output, step_indices): - raise NotImplementedError() - - -@_do_calculate_total.register -def _(compiler_output: CombinedRTOutputSeqC) -> ReportEntry: - total_seqc = sum(len(s["text"].splitlines()) for s in compiler_output.src) - total_ct = sum(len(ct["ct"]) for ct in compiler_output.command_tables) - total_wave_idx = sum(len(wi) for wi in compiler_output.wave_indices) - total_samples = sum( - _count_samples(compiler_output.waves, wi) - for wil in compiler_output.wave_indices - for wi in wil["value"].items() - ) - - return ReportEntry( - nt_step_indices=(), - awg=None, - seqc_loc=total_seqc, - command_table_entries=total_ct, - wave_indices=total_wave_idx, - waveform_samples=total_samples, - ) - - -@_do_calculate_total.register -def _(compiler_output: CombinedRTOutputPrettyPrinter) -> ReportEntry: - total_src = sum(len(s) for s in compiler_output.src.items()) - - return ReportEntry( - nt_step_indices=(), - awg=None, - seqc_loc=total_src, - command_table_entries=0, - wave_indices=0, - waveform_samples=0, - ) - - -@_do_get_report.register -def _( - compiler_output: PrettyPrinterOutput, step_indices: list[int] -) -> list[ReportEntry]: - return [ - ReportEntry( - awg=AwgKey("pp", 0), - nt_step_indices=tuple(step_indices), - seqc_loc=1, - command_table_entries=0, - wave_indices=0, - waveform_samples=0, - ) - ] - - -@_do_get_report.register -def _(compiler_output: SeqCGenOutput, step_indices: list[int]) -> list[ReportEntry]: - report = [] - for awg, awg_src in compiler_output.src.items(): - seqc_loc = len(awg_src["text"].splitlines()) - ct = compiler_output.command_tables.get(awg, {"ct": []})["ct"] - ct_len = len(ct) - wave_indices = compiler_output.wave_indices[awg]["value"] - wave_indices_count = len(wave_indices) - sample_count = 0 - for wave_index in wave_indices.items(): - sample_count += _count_samples(compiler_output.waves, wave_index) - report.append( - ReportEntry( - awg=awg, - nt_step_indices=tuple(step_indices), - seqc_loc=seqc_loc, - command_table_entries=ct_len, - wave_indices=wave_indices_count, - waveform_samples=sample_count, - ) - ) - return report - - -class CompilationReportGenerator: - def __init__(self): +class CompilationReportGenerator(NtCompilerExecutorDelegate): + def __init__(self, settings: CompilerSettings): + self._settings = settings self._data: list[ReportEntry] = [] self._total: ReportEntry | None = None + self._pulse_waveform_count = {} + self._pulse_map = {} + + def after_compilation_run(self, new: RTCompilerOutputContainer, indices: list[int]): + self.update(new, indices) + + def after_final_run(self, combined: CombinedRTCompilerOutputContainer): + for co in combined.combined_output.values(): + if isinstance(co, CombinedRTOutputSeqC): + compiler_output = co + break + else: + return + if self._settings.LOG_REPORT: + self.compute_pulse_map_statistics(compiler_output) + self.calculate_total(compiler_output) + self.log_report() + def update( self, rt_compiler_output: RTCompilerOutputContainer, step_indices: list[int] ): - for output in rt_compiler_output.codegen_output.values(): - report = _do_get_report(output, step_indices=step_indices) - self._data += report - - def calculate_total(self, compiler_output: CombinedRTCompilerOutputContainer): - totals = [ - _do_calculate_total(tot) for tot in compiler_output.combined_output.values() - ] - tot = ReportEntry( - (), - None, - seqc_loc=0, - command_table_entries=0, - wave_indices=0, - waveform_samples=0, + for co in rt_compiler_output.codegen_output.values(): + if isinstance(co, SeqCGenOutput): + compiler_output = co + break + else: + return + report = [] + for awg, awg_src in compiler_output.src.items(): + seqc_loc = len(awg_src["text"].splitlines()) + ct = compiler_output.command_tables.get(awg, {"ct": []})["ct"] + ct_len = len(ct) + wave_indices = compiler_output.wave_indices[awg]["value"] + wave_indices_count = len(wave_indices) + sample_count = 0 + for wave_index in wave_indices.items(): + sample_count += _count_samples(compiler_output.waves, wave_index) + report.append( + ReportEntry( + awg=awg, + nt_step_indices=tuple(step_indices), + seqc_loc=seqc_loc, + command_table_entries=ct_len, + wave_indices=wave_indices_count, + waveform_samples=sample_count, + ) + ) + self._data += report + + def calculate_total(self, compiler_output: CombinedRTOutputSeqC): + total_seqc = sum(len(s["text"].splitlines()) for s in compiler_output.src) + total_ct = sum(len(ct["ct"]) for ct in compiler_output.command_tables) + total_wave_idx = sum(len(wi) for wi in compiler_output.wave_indices) + total_samples = sum( + _count_samples(compiler_output.waves, wi) + for wil in compiler_output.wave_indices + for wi in wil["value"].items() ) - for t in totals: - tot.seqc_loc += t.seqc_loc - tot.command_table_entries += t.command_table_entries - tot.wave_indices += t.wave_indices - tot.waveform_samples += t.waveform_samples - self._total = tot - - def create_table(self) -> Table: + self._total = ReportEntry( + nt_step_indices=(), + awg=None, + seqc_loc=total_seqc, + command_table_entries=total_ct, + wave_indices=total_wave_idx, + waveform_samples=total_samples, + ) + + def compute_pulse_map_statistics(self, compiler_output: CombinedRTOutputSeqC): + for pulse_id, pulse_map in compiler_output.pulse_map.items(): + self._pulse_map[pulse_id] = ( + len(pulse_map.waveforms), + [i for wf in pulse_map.waveforms.values() for i in wf.instances], + ) + + def create_resource_usage_table(self) -> Table: entries = sorted(self._data) all_nt_steps = set(e.nt_step_indices for e in entries) include_nt_step = len(all_nt_steps) > 1 @@ -242,13 +211,54 @@ def create_table(self) -> Table: return table - def __str__(self): - table = self.create_table() + def resource_table_as_str(self): + table = self.create_resource_usage_table() + + with StringIO() as buffer: + console = Console(file=buffer, force_jupyter=False) + console.print(table) + return buffer.getvalue() + + def create_pulse_map_table(self): + table = Table(box=box.HORIZONTALS) + table.add_column("Pulse UID") + table.add_column("Waveforms", justify="right") + table.add_section() + table.add_column("Offsets", justify="right") + table.add_column("Amplitudes", justify="right") + table.add_column("Phases", justify="right") + table.add_column("Lengths", justify="right") + + for pulse_id, (waveform_count, instances) in self._pulse_map.items(): + offsets_count = len({inst.offset_samples for inst in instances}) + amplitudes_count = len({inst.amplitude for inst in instances}) + phases_count = len({inst.modulation_phase for inst in instances}) + lengths_count = len({inst.length for inst in instances}) + fields = [ + str(n) if n > 1 else "" + for n in [offsets_count, amplitudes_count, phases_count, lengths_count] + ] + + table.add_row(pulse_id, str(waveform_count), *fields) + + return table + + def pulse_map_table_as_str(self): + table = self.create_pulse_map_table() + with StringIO() as buffer: console = Console(file=buffer, force_jupyter=False) console.print(table) return buffer.getvalue() def log_report(self): - for line in str(self).splitlines(): + for line in self.resource_table_as_str().splitlines(): _logger.info(line) + + _logger.diagnostic("") + _logger.diagnostic(" Waveform usage across pulses:") + for line in self.pulse_map_table_as_str().splitlines(): + _logger.diagnostic(line) + + +NtCompilerExecutor.register_hook(CompilationReportGenerator) diff --git a/laboneq/compiler/workflow/rt_linker.py b/laboneq/compiler/workflow/rt_linker.py index d99b94f..11fe17a 100644 --- a/laboneq/compiler/workflow/rt_linker.py +++ b/laboneq/compiler/workflow/rt_linker.py @@ -195,6 +195,12 @@ def _( ) } + for pulse_id, entry in new.pulse_map.items(): + if pulse_id not in this.pulse_map: + this.pulse_map[pulse_id] = entry + else: + this.pulse_map[pulse_id].waveforms.update(entry.waveforms) + if ( previous_src == awg_src and previous_ct == new_ct diff --git a/laboneq/contrib/example_helpers/plotting/plot_helpers.py b/laboneq/contrib/example_helpers/plotting/plot_helpers.py index 7127fa9..2082672 100644 --- a/laboneq/contrib/example_helpers/plotting/plot_helpers.py +++ b/laboneq/contrib/example_helpers/plotting/plot_helpers.py @@ -58,8 +58,12 @@ def _integration_weights_by_signal( if iw["filename"] not in kernel_indices_ref: continue - # discard all but the first kernel in case of MSD - kernel_name_by_signal.update({k: v[0] for k, v in iw["signals"].items()}) + if iw["signals"]: + # discard all but the first kernel in case of MSD + for k, v in iw["signals"].items(): + # ensure no failure if no integration kernel is defined + if v: + kernel_name_by_signal.update({k: v[0]}) kernel_samples_by_signal: dict[str, np.ndarray] = {} for signal, kernel in kernel_name_by_signal.items(): diff --git a/laboneq/controller/__init__.py b/laboneq/controller/__init__.py index 7f128c8..d5cc30e 100644 --- a/laboneq/controller/__init__.py +++ b/laboneq/controller/__init__.py @@ -2,6 +2,5 @@ # SPDX-License-Identifier: Apache-2.0 from .controller import Controller, ControllerRunParameters, _stop_controller -from .laboneq_logging import initialize_logging from .toolkit_adapter import MockedToolkit, ToolkitDevices from .util import LabOneQControllerException diff --git a/laboneq/controller/attribute_value_tracker.py b/laboneq/controller/attribute_value_tracker.py index a8a958f..188e524 100644 --- a/laboneq/controller/attribute_value_tracker.py +++ b/laboneq/controller/attribute_value_tracker.py @@ -14,7 +14,9 @@ class AttributeName(Enum): OUTPUT_PORT_DELAY = auto() INPUT_SCHEDULER_PORT_DELAY = auto() INPUT_PORT_DELAY = auto() - PPC_PUMP_FREQ = auto() + PPC_CANCELLATION_PHASE = auto() + PPC_CANCELLATION_ATTENUATION = auto() + PPC_PUMP_FREQUENCY = auto() PPC_PUMP_POWER = auto() PPC_PROBE_FREQUENCY = auto() PPC_PROBE_POWER = auto() diff --git a/laboneq/controller/communication.py b/laboneq/controller/communication.py index 2b07328..e9fc167 100644 --- a/laboneq/controller/communication.py +++ b/laboneq/controller/communication.py @@ -14,9 +14,9 @@ import numpy as np import zhinst.core from zhinst.toolkit import Session as TKSession -from laboneq.controller.devices.device_utils import calc_dev_type +from laboneq.controller.devices.device_utils import zhinst_core_version -from laboneq.controller.devices.zi_emulator import ziDAQServerEmulator +from laboneq.controller.devices.zi_emulator import EmulatorState, ziDAQServerEmulator from laboneq.controller.devices.zi_node_monitor import NodeMonitor from .cache import Cache @@ -24,7 +24,7 @@ from .versioning import LabOneVersion if TYPE_CHECKING: - from laboneq.controller.devices.device_zi import DeviceQualifier + pass _logger = logging.getLogger(__name__) @@ -204,8 +204,7 @@ def clear_cache(self): @dataclass class ServerQualifier: - dry_run: bool = True - host: str = None + host: str | None = None port: int = 8004 ignore_version_mismatch: bool = False @@ -218,31 +217,21 @@ def __init__(self, name, server_qualifier: ServerQualifier): self._server_qualifier = server_qualifier self._dataserver_version = LabOneVersion.LATEST self._vector_counter = 0 - self.node_monitor = None - - ZiApiClass = ( - ziDAQServerEmulator if server_qualifier.dry_run else zhinst.core.ziDAQServer - ) + self._zi_api_object = self._make_zi_api() + self.node_monitor = NodeMonitor(self._zi_api_object) + def _make_zi_api(self): try: - self._zi_api_object = ZiApiClass( + return zhinst.core.ziDAQServer( self.server_qualifier.host, self.server_qualifier.port, self._API_LEVEL, ) - self.node_monitor = NodeMonitor(self._zi_api_object) except RuntimeError as exp: raise LabOneQControllerException(str(exp)) from None async def validate_connection(self): - [major, minor] = zhinst.core.__version__.split(".")[0:2] - zhinst_core_version_str = f"{major}.{minor}" - - if self._server_qualifier.dry_run: - # Ensure emulated data server version matches installed zhinst.core - self._zi_api_object.set_option( - "ZI", "about/version", zhinst_core_version_str - ) + zhinst_core_version_str = zhinst_core_version() path = "/zi/about/version" result = await self.batch_get([DaqNodeGetAction(self, path)]) @@ -397,18 +386,26 @@ async def batch_get(self, daq_actions): class DaqWrapperDryRun(DaqWrapper): - def __init__(self, name, server_qualifier: ServerQualifier = None): + def __init__( + self, + name, + server_qualifier: ServerQualifier | None = None, + emulator_state: EmulatorState | None = None, + ): if server_qualifier is None: server_qualifier = ServerQualifier() - assert server_qualifier.dry_run is True + if emulator_state is None: + emulator_state = EmulatorState() + self._emulator_state = emulator_state super().__init__(name, server_qualifier) - -def map_device_type(daq: Any, device_qualifier: DeviceQualifier): - assert isinstance(daq, ziDAQServerEmulator) - daq.map_device_type( - device_qualifier.options.serial, calc_dev_type(device_qualifier) - ) + def _make_zi_api(self): + return ziDAQServerEmulator( + self.server_qualifier.host, + self.server_qualifier.port, + self._API_LEVEL, + self._emulator_state, + ) async def batch_set(all_actions: List[DaqNodeSetAction]): diff --git a/laboneq/controller/controller.py b/laboneq/controller/controller.py index 3b63e14..0986b7d 100644 --- a/laboneq/controller/controller.py +++ b/laboneq/controller/controller.py @@ -18,14 +18,13 @@ from laboneq._observability import tracing from laboneq.controller.communication import ( DaqNodeSetAction, - DaqWrapper, batch_set, batch_set_multiple, ) from laboneq.controller.devices.async_support import gather_and_apply from laboneq.controller.devices.device_collection import DeviceCollection from laboneq.controller.devices.device_zi import DeviceZI -from laboneq.controller.devices.zi_node_monitor import ResponseWaiter +from laboneq.controller.devices.zi_node_monitor import NodeMonitorBase, ResponseWaiter from laboneq.controller.near_time_runner import NearTimeRunner from laboneq.controller.recipe_processor import ( RecipeData, @@ -34,7 +33,7 @@ ) from laboneq.controller.results import build_partial_result, make_acquired_result from laboneq.controller.util import LabOneQControllerException, SweepParamsTracker -from laboneq.controller.versioning import LabOneVersion +from laboneq.controller.versioning import LabOneVersion, SetupCaps from laboneq.core.exceptions import AbortExecution from laboneq.core.types.enums.acquisition_type import AcquisitionType from laboneq.core.types.enums.averaging_mode import AveragingMode @@ -80,11 +79,11 @@ def _stop_controller(controller: "Controller"): class Controller: def __init__( self, - run_parameters: ControllerRunParameters | None = None, - target_setup: TargetSetup | None = None, + run_parameters: ControllerRunParameters, + target_setup: TargetSetup, neartime_callbacks: dict[str, Callable] | None = None, ): - self._run_parameters = run_parameters or ControllerRunParameters() + self._run_parameters = run_parameters self._devices = DeviceCollection( target_setup=target_setup, dry_run=self._run_parameters.dry_run, @@ -92,12 +91,15 @@ def __init__( ) self._dataserver_version: LabOneVersion | None = None + self._setup_caps = SetupCaps(None) - self._last_connect_check_ts: float = None + self._last_connect_check_ts: float | None = None # Waves which are uploaded to the devices via pulse replacements self._current_waves = [] - self._neartime_callbacks: dict[str, Callable] = neartime_callbacks + self._neartime_callbacks: dict[str, Callable] = ( + {} if neartime_callbacks is None else neartime_callbacks + ) self._nodes_from_neartime_callbacks: list[DaqNodeSetAction] = [] self._recipe_data: RecipeData = None self._session: Any = None @@ -150,7 +152,7 @@ async def _perform_awg_upload( ] ], ): - elf_upload_conditions: dict[DaqWrapper, dict[str, Any]] = defaultdict(dict) + elf_upload_conditions: dict[NodeMonitorBase, dict[str, Any]] = defaultdict(dict) elf_node_settings: list[DaqNodeSetAction] = [] wf_node_settings: list[DaqNodeSetAction] = [] @@ -158,12 +160,14 @@ async def _perform_awg_upload( elf_node_settings.extend(elf_nodes) wf_node_settings.extend(wf_nodes) if len(upload_ready_conditions) > 0: - elf_upload_conditions[device.daq].update(upload_ready_conditions) + elf_upload_conditions[device.node_monitor].update( + upload_ready_conditions + ) # Upload AWG programs, waveforms, and command tables: if len(elf_upload_conditions) > 0: - for daq in elf_upload_conditions.keys(): - daq.node_monitor.flush() + for node_monitor in elf_upload_conditions.keys(): + await node_monitor.flush() _logger.debug("Started upload of AWG programs...") with tracing.get_tracer().start_span("upload-awg-programs") as _: @@ -172,13 +176,13 @@ async def _perform_awg_upload( if len(elf_upload_conditions) > 0: _logger.debug("Waiting for devices...") response_waiter = ResponseWaiter() - for daq, conditions in elf_upload_conditions.items(): + for node_monitor, conditions in elf_upload_conditions.items(): response_waiter.add( - target=daq.node_monitor, + target=node_monitor, conditions=conditions, ) timeout_s = 10 - if not response_waiter.wait_all(timeout=timeout_s): + if not await response_waiter.wait_all(timeout=timeout_s): raise LabOneQControllerException( f"AWGs not in ready state within timeout ({timeout_s} s). " f"Not fulfilled:\n{response_waiter.remaining_str()}" @@ -286,12 +290,12 @@ async def _execute_one_step_followers(self, with_pipeliner: bool): response_waiter = ResponseWaiter() for _, device in self._devices.followers: response_waiter.add( - target=device.daq.node_monitor, + target=device.node_monitor, conditions=await device.conditions_for_execution_ready( with_pipeliner=with_pipeliner ), ) - if not response_waiter.wait_all(timeout=2): + if not await response_waiter.wait_all(timeout=2): _logger.warning( "Conditions to start RT on followers still not fulfilled after 2" " seconds, nonetheless trying to continue..." @@ -316,9 +320,14 @@ async def _execute_one_step_leaders(self, with_pipeliner: bool): await batch_set(nodes_to_execute) async def _wait_execution_to_stop( - self, acquisition_type: AcquisitionType, with_pipeliner: bool + self, acquisition_type: AcquisitionType, rt_execution_info: RtExecutionInfo ): min_wait_time = self._recipe_data.recipe.max_step_execution_time + if rt_execution_info.with_pipeliner: + pipeliner_reload_worst_case = 1500e-6 + min_wait_time = ( + min_wait_time + pipeliner_reload_worst_case + ) * rt_execution_info.pipeliner_jobs if min_wait_time > 5: # Only inform about RT executions taking longer than 5s _logger.info("Estimated RT execution time: %.2f s.", min_wait_time) guarded_wait_time = round( @@ -328,12 +337,12 @@ async def _wait_execution_to_stop( response_waiter = ResponseWaiter() for _, device in self._devices.followers: response_waiter.add( - target=device.daq.node_monitor, + target=device.node_monitor, conditions=await device.conditions_for_execution_done( - acquisition_type, with_pipeliner=with_pipeliner + acquisition_type, with_pipeliner=rt_execution_info.with_pipeliner ), ) - if not response_waiter.wait_all(timeout=guarded_wait_time): + if not await response_waiter.wait_all(timeout=guarded_wait_time): _logger.warning( ( "Stop conditions still not fulfilled after %f s, estimated" @@ -374,24 +383,31 @@ async def _execute_one_step( ): _logger.debug("Step executing") - self._devices.flush_monitor() + await self._devices.flush_monitor() rt_execution_info = self._recipe_data.rt_execution_infos.get(rt_section_uid) - with_pipeliner = rt_execution_info.pipeliner_chunk_count is not None - await self._setup_one_step_execution(with_pipeliner=with_pipeliner) + await self._setup_one_step_execution( + with_pipeliner=rt_execution_info.with_pipeliner + ) # Can't batch everything together, because PQSC needs to be executed after HDs # otherwise it can finish before AWGs are started, and the trigger is lost - await self._execute_one_step_followers(with_pipeliner=with_pipeliner) - await self._execute_one_step_leaders(with_pipeliner=with_pipeliner) + await self._execute_one_step_followers( + with_pipeliner=rt_execution_info.with_pipeliner + ) + await self._execute_one_step_leaders( + with_pipeliner=rt_execution_info.with_pipeliner + ) _logger.debug("Execution started") await self._wait_execution_to_stop( - acquisition_type, with_pipeliner=with_pipeliner + acquisition_type, rt_execution_info=rt_execution_info + ) + await self._teardown_one_step_execution( + with_pipeliner=rt_execution_info.with_pipeliner ) - await self._teardown_one_step_execution(with_pipeliner=with_pipeliner) _logger.debug("Execution stopped") @@ -407,14 +423,14 @@ async def _connect_async(self, reset_devices: bool = False): await self._devices.connect(reset_devices=reset_devices) try: - self._dataserver_version = next(self._devices.leaders)[ - 1 - ].daq._dataserver_version + self._dataserver_version = next(self._devices.all)[1].daq.dataserver_version except StopIteration: # It may happen in emulation mode, mainly for tests # We use LATEST in emulation mode, keeping the consistency here. self._dataserver_version = LabOneVersion.LATEST + self._setup_caps = SetupCaps(self._dataserver_version) + self._last_connect_check_ts = now def disable_outputs( @@ -448,7 +464,7 @@ def disconnect(self): async def _disconnect_async(self): _logger.info("Disconnecting from all devices and servers...") - self._devices.disconnect() + await self._devices.disconnect() self._last_connect_check_ts = None _logger.info("Successfully disconnected from all devices and servers.") @@ -473,6 +489,7 @@ async def _execute_compiled_legacy_async( compiled_experiment.scheduled_experiment, self._devices, execution, + self._setup_caps, ) self._session = session @@ -492,6 +509,7 @@ async def _execute_compiled_async(self, job: ExecutionPayload): job.scheduled_experiment, self._devices, job.scheduled_experiment.execution, + self._setup_caps, ) self._session = None await self._execute_compiled_impl() @@ -653,6 +671,7 @@ async def _prepare_rt_execution( effective_averages, effective_averaging_mode, rt_execution_info.acquisition_type, + rt_execution_info.with_pipeliner, ) ) return nodes_to_prepare_rt @@ -765,7 +784,7 @@ async def _read_one_step_results(self, nt_step: NtStepKey, rt_section_uid: str): ) raw_results = await device.get_measurement_data( awg_key.awg_index, - rt_execution_info.acquisition_type, + rt_execution_info, result_indices, awg_config.result_length, effective_averages, diff --git a/laboneq/controller/devices/async_support.py b/laboneq/controller/devices/async_support.py index e3ae3f7..4bcb4c5 100644 --- a/laboneq/controller/devices/async_support.py +++ b/laboneq/controller/devices/async_support.py @@ -6,6 +6,8 @@ from contextlib import asynccontextmanager from typing import TYPE_CHECKING, Any, Callable, Coroutine, TypeVar +from laboneq.controller.devices.zi_emulator import EmulatorState + if TYPE_CHECKING: from laboneq.controller.communication import ServerQualifier from laboneq.controller.devices.device_zi import DeviceQualifier @@ -15,8 +17,11 @@ async def create_device_kernel_session( - *, server_qualifier: ServerQualifier, device_qualifier: DeviceQualifier -): + *, + server_qualifier: ServerQualifier, + device_qualifier: DeviceQualifier, + emulator_state: EmulatorState | None, +) -> None: return None # TODO(2K): stub, will return the real async api kernel session @@ -34,3 +39,11 @@ async def gather_and_apply(func: Callable[[list[U]], Coroutine[Any, Any, None]]) awaitables: list[Coroutine[Any, Any, U]] = [] yield awaitables await func(await _gather(*awaitables)) + + +async def set_parallel(api: Any, *node_sets): + return # TODO(2K): stub + + +async def get_raw(api: Any, path: str) -> dict[str, Any]: + return {} # TODO(2K): stub diff --git a/laboneq/controller/devices/device_collection.py b/laboneq/controller/devices/device_collection.py index b06b986..68b7ec1 100644 --- a/laboneq/controller/devices/device_collection.py +++ b/laboneq/controller/devices/device_collection.py @@ -21,10 +21,13 @@ ) from laboneq.controller.devices.device_factory import DeviceFactory from laboneq.controller.devices.device_setup_dao import DeviceSetupDAO +from laboneq.controller.devices.device_utils import prepare_emulator_state from laboneq.controller.devices.device_zi import DeviceZI +from laboneq.controller.devices.zi_emulator import EmulatorState from laboneq.controller.devices.zi_node_monitor import ( ConditionsChecker, NodeControlBase, + NodeMonitorBase, ResponseWaiter, filter_commands, filter_settings, @@ -51,15 +54,21 @@ def __init__( ): self._ds = DeviceSetupDAO( target_setup=target_setup, - dry_run=dry_run, ignore_version_mismatch=ignore_version_mismatch, ) self._dry_run = dry_run + self._emulator_state: EmulatorState | None = None self._ignore_version_mismatch = ignore_version_mismatch self._daqs: dict[str, DaqWrapper] = {} self._devices: dict[str, DeviceZI] = {} self._monitor_started = False + @property + def emulator_state(self) -> EmulatorState | None: + if self._emulator_state is None and self._dry_run: + self._emulator_state = prepare_emulator_state(self._ds) + return self._emulator_state + @property def all(self) -> Iterator[tuple[str, DeviceZI]]: for uid, device in self._devices.items(): @@ -77,6 +86,13 @@ def followers(self) -> Iterator[tuple[str, DeviceZI]]: if device.is_follower(): yield uid, device + @property + def node_monitors(self) -> set[NodeMonitorBase]: + all_monitors: set[NodeMonitorBase] = set() + for device in self._devices.values(): + all_monitors.add(device.node_monitor) + return all_monitors + def find_by_uid(self, device_uid) -> DeviceZI: device = self._devices.get(device_uid) if device is None: @@ -102,8 +118,8 @@ async def connect(self, reset_devices: bool = False): self._validate_dataserver_device_fw_compatibility() # TODO(2K): Uses zhinst utils -> async api version? self._prepare_devices() for _, device in self.all: - await device.connect() - self.start_monitor() + await device.connect(self.emulator_state) + await self.start_monitor() await self.configure_device_setup(reset_devices) async def _configure_parallel( @@ -136,14 +152,14 @@ def _add_set_nodes(daq, nodes: list[NodeControlBase]): # 1a. Unconditional command _add_set_nodes(device.daq, filter_commands(dev_nodes)) response_waiter.add( - target=device.daq.node_monitor, + target=device.node_monitor, conditions={n.path: n.value for n in filter_responses(dev_nodes)}, ) else: # 1b. Verify if device is already configured as desired dev_conditions_checker = ConditionsChecker() dev_conditions_checker.add( - target=device.daq.node_monitor, + target=device.node_monitor, conditions={n.path: n.value for n in filter_conditions(dev_nodes)}, ) conditions_checker.add_from(dev_conditions_checker) @@ -154,7 +170,7 @@ def _add_set_nodes(daq, nodes: list[NodeControlBase]): failed_paths = [path for path, _ in failed] failed_nodes = [n for n in dev_nodes if n.path in failed_paths] response_waiter.add( - target=device.daq.node_monitor, + target=device.node_monitor, conditions={ n.path: n.value for n in filter_wait_conditions(failed_nodes) }, @@ -162,7 +178,7 @@ def _add_set_nodes(daq, nodes: list[NodeControlBase]): _add_set_nodes(device.daq, filter_settings(dev_nodes)) response_waiter.add( - target=device.daq.node_monitor, + target=device.node_monitor, conditions={n.path: n.value for n in filter_responses(dev_nodes)}, ) @@ -176,7 +192,7 @@ def _add_set_nodes(daq, nodes: list[NodeControlBase]): return timeout = 10 - if not response_waiter.wait_all(timeout=timeout): + if not await response_waiter.wait_all(timeout=timeout): raise LabOneQControllerException( f"Internal error: {config_name} for devices " f"{[d.dev_repr for d in devices]} is not complete within {timeout}s. " @@ -195,7 +211,7 @@ def _add_set_nodes(daq, nodes: list[NodeControlBase]): async def configure_device_setup(self, reset_devices: bool): _logger.info("Configuring the device setup") - self.flush_monitor() # Ensure status is up-to-date + await self.flush_monitor() # Ensure status is up-to-date if reset_devices: await self._configure_parallel( @@ -203,7 +219,9 @@ async def configure_device_setup(self, reset_devices: bool): lambda d: cast(DeviceZI, d).load_factory_preset_control_nodes(), "Reset to factory defaults", ) - self.flush_monitor() # Consume any updates resulted from the above reset + await ( + self.flush_monitor() + ) # Consume any updates resulted from the above reset # TODO(2K): Error check for daq in self._daqs.values(): daq.clear_cache() @@ -247,12 +265,13 @@ async def configure_device_setup(self, reset_devices: bool): targets = children _logger.info("The device setup is configured") - def disconnect(self): - self.reset_monitor() + async def disconnect(self): + await self.reset_monitor() for device in self._devices.values(): device.disconnect() self._devices = {} self._daqs = {} + self._emulator_state = None async def disable_outputs( self, @@ -306,19 +325,19 @@ async def on_experiment_end(self): all_actions.extend(await device.maybe_async(device.on_experiment_end())) await batch_set(all_actions) - def start_monitor(self): + async def start_monitor(self): if self._monitor_started: return response_waiter = ResponseWaiter() - for daq in self._daqs.values(): - daq.node_monitor.start() + for node_monitor in self.node_monitors: + await node_monitor.start() response_waiter.add( - target=daq.node_monitor, - conditions={path: None for path in daq.node_monitor._nodes}, + target=node_monitor, + conditions={path: None for path in node_monitor._nodes}, ) - if not response_waiter.wait_all(timeout=2): + if not await response_waiter.wait_all(timeout=2): raise LabOneQControllerException( f"Internal error: Didn't get all the status node values within 2s. " f"Missing:\n{response_waiter.remaining_str()}" @@ -326,13 +345,13 @@ def start_monitor(self): self._monitor_started = True - def flush_monitor(self): - for daq in self._daqs.values(): - daq.node_monitor.flush() + async def flush_monitor(self): + for node_monitor in self.node_monitors: + await node_monitor.flush() - def reset_monitor(self): - for daq in self._daqs.values(): - daq.node_monitor.reset() + async def reset_monitor(self): + for node_monitor in self.node_monitors: + await node_monitor.reset() self._monitor_started = False def _validate_dataserver_device_fw_compatibility(self): @@ -410,8 +429,11 @@ async def _prepare_daqs(self): server_qualifier.host, server_qualifier.port, ) - if server_qualifier.dry_run: - daq = DaqWrapperDryRun(server_uid, server_qualifier) + daq: DaqWrapper + if self._dry_run: + daq = DaqWrapperDryRun( + server_uid, server_qualifier, self.emulator_state + ) else: daq = DaqWrapper(server_uid, server_qualifier) await daq.validate_connection() @@ -432,13 +454,13 @@ async def check_errors(self, raise_on_error: bool = True) -> str | None: return msg async def update_warning_nodes(self): - for daq in self._daqs.values(): - daq.node_monitor.poll() + for node_monitor in self.node_monitors: + await node_monitor.poll() for _, device in self.all: device.update_warning_nodes( { - node: daq.node_monitor.get_last(node) + node: device.node_monitor.get_last(node) for node in device.collect_warning_nodes() } ) diff --git a/laboneq/controller/devices/device_hdawg.py b/laboneq/controller/devices/device_hdawg.py index 4b42bb8..09e155f 100644 --- a/laboneq/controller/devices/device_hdawg.py +++ b/laboneq/controller/devices/device_hdawg.py @@ -492,7 +492,7 @@ def _collect_dio_configuration_nodes( nc.add("dios/0/mode", 3) nc.add("dios/0/drive", 0xC) - # Loop over at least AWG instance to cover the case that the instrument is only used + # Loop over at least one AWG instance to cover the case that the instrument is only used # as a communication proxy. Some of the nodes on the AWG branch are needed to get # proper communication between HDAWG and UHFQA. for awg_index in ( diff --git a/laboneq/controller/devices/device_pqsc.py b/laboneq/controller/devices/device_pqsc.py index 24ac2bf..eae99b6 100644 --- a/laboneq/controller/devices/device_pqsc.py +++ b/laboneq/controller/devices/device_pqsc.py @@ -182,18 +182,3 @@ async def collect_reset_nodes(self) -> list[DaqNodeSetAction]: reset_nodes = await super().collect_reset_nodes() reset_nodes.extend(await self.maybe_async(nc)) return reset_nodes - - def _prepare_emulator(self): - super()._prepare_emulator() - - # Make emulated PQSC aware of the down-stream devices - if self.dry_run: - enabled_zsyncs = {} - for port, _, to_dev in self.downlinks(): - if enabled_zsyncs.get(port.lower()) == to_dev.serial: - continue - enabled_zsyncs[port.lower()] = to_dev.serial - self._set_emulation_option( - option=f"{port.lower()}/connection/serial", - value=to_dev.serial[3:], - ) diff --git a/laboneq/controller/devices/device_pretty_printer.py b/laboneq/controller/devices/device_pretty_printer.py index 8274166..c36aca4 100644 --- a/laboneq/controller/devices/device_pretty_printer.py +++ b/laboneq/controller/devices/device_pretty_printer.py @@ -24,9 +24,10 @@ class DevicePRETTYPRINTER(DeviceZI): def __init__(self, device_qualifier: DeviceQualifier, daq: DaqWrapper): super().__init__(device_qualifier=device_qualifier, daq=daq) + self._node_monitor = self.daq.node_monitor self._device_class = 0x1 - async def connect(self): + async def connect(self, emulator_state: Any): _logger.info( "%s: Connected to %s", self.dev_repr, diff --git a/laboneq/controller/devices/device_setup_dao.py b/laboneq/controller/devices/device_setup_dao.py index 97c1dfd..97bf176 100644 --- a/laboneq/controller/devices/device_setup_dao.py +++ b/laboneq/controller/devices/device_setup_dao.py @@ -6,7 +6,7 @@ import copy import logging import math -from typing import TYPE_CHECKING, Iterator +from typing import TYPE_CHECKING, ItemsView, Iterator from laboneq.controller.communication import ServerQualifier from laboneq.controller.devices.device_zi import DeviceOptions, DeviceQualifier @@ -22,11 +22,8 @@ _logger = logging.getLogger(__name__) -def _make_server_qualifier( - server: TargetServer, dry_run: bool, ignore_version_mismatch: bool -): +def _make_server_qualifier(server: TargetServer, ignore_version_mismatch: bool): return ServerQualifier( - dry_run=dry_run, host=server.host, port=server.port, ignore_version_mismatch=ignore_version_mismatch, @@ -34,7 +31,7 @@ def _make_server_qualifier( def _make_device_qualifier( - target_device: TargetDevice, dry_run: bool, has_shf: bool + target_device: TargetDevice, has_shf: bool ) -> DeviceQualifier: driver = target_device.device_type.name options = DeviceOptions( @@ -58,7 +55,6 @@ def _make_device_qualifier( server_uid=target_device.server.uid, driver=driver, options=options, - dry_run=dry_run, ) @@ -66,14 +62,12 @@ class DeviceSetupDAO: def __init__( self, target_setup: TargetSetup, - dry_run: bool = True, ignore_version_mismatch: bool = False, ): self._target_setup = target_setup self._servers: dict[str, ServerQualifier] = { server.uid: _make_server_qualifier( server=server, - dry_run=dry_run, ignore_version_mismatch=ignore_version_mismatch, ) for server in target_setup.servers @@ -89,9 +83,7 @@ def __init__( break self._devices: list[DeviceQualifier] = [ - _make_device_qualifier( - target_device=device, dry_run=dry_run, has_shf=has_shf - ) + _make_device_qualifier(target_device=device, has_shf=has_shf) for device in target_setup.devices ] self._used_outputs: dict[str, dict[str, list[int]]] = { @@ -107,17 +99,17 @@ def __init__( } @property - def servers(self) -> Iterator[tuple[str, ServerQualifier]]: + def servers(self) -> ItemsView[str, ServerQualifier]: return self._servers.items() @property def instruments(self) -> Iterator[DeviceQualifier]: return iter(self._devices) - def downlinks_by_device_uid(self, device_uid: str) -> list[str]: + def downlinks_by_device_uid(self, device_uid: str) -> list[tuple[str, str]]: return self._downlinks[device_uid] - def resolve_ls_path_outputs(self, ls_path: str) -> tuple[str, set[int]]: + def resolve_ls_path_outputs(self, ls_path: str) -> tuple[str | None, set[int]]: for device_uid, used_outputs in self._used_outputs.items(): outputs = used_outputs.get(ls_path) if outputs: diff --git a/laboneq/controller/devices/device_shfppc.py b/laboneq/controller/devices/device_shfppc.py index f5e5420..cabd075 100644 --- a/laboneq/controller/devices/device_shfppc.py +++ b/laboneq/controller/devices/device_shfppc.py @@ -3,8 +3,10 @@ from __future__ import annotations +import math from typing import Iterator +from laboneq.controller.util import LabOneQControllerException from laboneq.controller.attribute_value_tracker import ( AttributeName, DeviceAttribute, @@ -13,12 +15,15 @@ from laboneq.controller.communication import DaqNodeSetAction from laboneq.controller.devices.device_zi import DeviceZI, NodeCollector from laboneq.controller.recipe_processor import DeviceRecipeData, RecipeData +from laboneq.data.calibration import CancellationSource from laboneq.data.recipe import Initialization class DeviceSHFPPC(DeviceZI): attribute_keys = { - "pump_freq": AttributeName.PPC_PUMP_FREQ, + "cancellation_phase": AttributeName.PPC_CANCELLATION_PHASE, + "cancellation_attenuation": AttributeName.PPC_CANCELLATION_ATTENUATION, + "pump_frequency": AttributeName.PPC_PUMP_FREQUENCY, "pump_power": AttributeName.PPC_PUMP_POWER, "probe_frequency": AttributeName.PPC_PROBE_FREQUENCY, "probe_power": AttributeName.PPC_PROBE_POWER, @@ -33,12 +38,17 @@ def __init__(self, *args, **kwargs): def _key_to_path(self, key: str, ch: int): keys_to_paths = { - "_on": f"/{self.serial}/ppchannels/{ch}/synthesizer/pump/on", - "pump_freq": f"/{self.serial}/ppchannels/{ch}/synthesizer/pump/freq", + "pump_on": f"/{self.serial}/ppchannels/{ch}/synthesizer/pump/on", + "pump_frequency": f"/{self.serial}/ppchannels/{ch}/synthesizer/pump/freq", "pump_power": f"/{self.serial}/ppchannels/{ch}/synthesizer/pump/power", - "cancellation": f"/{self.serial}/ppchannels/{ch}/cancellation/on", - "alc_engaged": f"/{self.serial}/ppchannels/{ch}/synthesizer/pump/alc", - "use_probe": f"/{self.serial}/ppchannels/{ch}/synthesizer/probe/on", + "pump_filter_on": f"/{self.serial}/ppchannels/{ch}/synthesizer/pump/filter", + "cancellation_on": f"/{self.serial}/ppchannels/{ch}/cancellation/on", + "cancellation_source": f"/{self.serial}/ppchannels/{ch}/cancellation/source", + "cancellation_source_frequency": f"/{self.serial}/ppchannels/{ch}/cancellation/sourcefreq", + "cancellation_phase": f"/{self.serial}/ppchannels/{ch}/cancellation/phaseshift", + "cancellation_attenuation": f"/{self.serial}/ppchannels/{ch}/cancellation/attenuation", + "alc_on": f"/{self.serial}/ppchannels/{ch}/synthesizer/pump/alc", + "probe_on": f"/{self.serial}/ppchannels/{ch}/synthesizer/probe/on", "probe_frequency": f"/{self.serial}/ppchannels/{ch}/synthesizer/probe/freq", "probe_power": f"/{self.serial}/ppchannels/{ch}/synthesizer/probe/power", } @@ -71,18 +81,44 @@ async def collect_initialization_nodes( recipe_data: RecipeData, ) -> list[DaqNodeSetAction]: nc = NodeCollector() - ppchannels = initialization.ppchannels or [] + ppchannels = { + settings["channel"]: settings + for settings in initialization.ppchannels or [] + } def _convert(value): if isinstance(value, bool): return 1 if value else 0 return value - for settings in ppchannels: - ch = settings["channel"] - nc.add(self._key_to_path("_on", ch), 1) + # each channel uses the neighboring channel's synthesizer for generating the pump tone + probe_synth_channel = [1, 0, 3, 2] + + for ch, settings in ppchannels.items(): for key, value in settings.items(): - if value is None or key in [*DeviceSHFPPC.attribute_keys, "channel"]: + if key == "channel": + continue + if key == "probe_on" and value: + probe_channel = ppchannels.get(probe_synth_channel[ch]) + if probe_channel is not None and probe_channel["pump_on"]: + raise LabOneQControllerException( + f"{self.dev_repr}: cannot use probe tone on" + f" channel {ch} while the pump tone generation is also" + f" enabled on channel {probe_synth_channel[ch]}" + ) + elif key == "cancellation_source": + if value == CancellationSource.INTERNAL: + value = 0 + else: + assert value == CancellationSource.EXTERNAL + value = 1 + if settings.get("cancellation_source_frequency") is None: + raise LabOneQControllerException( + f"{self.dev_repr}: Using the external" + f" cancellation source requires specifying the" + f" cancellation frequency" + ) + if value is None or key in DeviceSHFPPC.attribute_keys: # Skip not set values, or values that are bound to sweep params and will # be set during the NT execution. continue @@ -97,6 +133,8 @@ def collect_prepare_nt_step_nodes( for ch in range(self._channels): for key, attr_name in DeviceSHFPPC.attribute_keys.items(): [value], updated = attributes.resolve(keys=[(attr_name, ch)]) + if value is not None and key == "cancellation_phase": + value *= 180 / math.pi if updated: path = self._key_to_path(key, ch) nc.add(path, value) diff --git a/laboneq/controller/devices/device_shfqa.py b/laboneq/controller/devices/device_shfqa.py index 51d0aa1..f58e496 100644 --- a/laboneq/controller/devices/device_shfqa.py +++ b/laboneq/controller/devices/device_shfqa.py @@ -2,6 +2,7 @@ # SPDX-License-Identifier: Apache-2.0 from __future__ import annotations +import asyncio import itertools import logging @@ -110,6 +111,7 @@ def __init__(self, *args, **kwargs): self.dev_type = "SHFQA4" self.dev_opts = [] self._channels = 4 + self._integrators = 16 self._wait_for_awgs = True self._emit_trigger = False self.pipeliner_set_node_base(f"/{self.serial}/qachannels") @@ -135,6 +137,10 @@ def _process_dev_opts(self): self.dev_type, ) self._channels = 4 + if "16W" in self.dev_opts or self.dev_type == "SHFQA4": + self._integrators = 16 + else: + self._integrators = 8 def _get_sequencer_type(self) -> str: return "qa" @@ -217,9 +223,14 @@ def _nodes_to_monitor_impl(self) -> list[str]: f"/{self.serial}/qachannels/{awg}/generator/ready", f"/{self.serial}/qachannels/{awg}/spectroscopy/psd/enable", f"/{self.serial}/qachannels/{awg}/spectroscopy/result/enable", + f"/{self.serial}/qachannels/{awg}/spectroscopy/result/data/wave", f"/{self.serial}/qachannels/{awg}/readout/result/enable", ] ) + for result_index in range(self._integrators): + nodes.append( + f"/{self.serial}/qachannels/{awg}/readout/result/data/{result_index}/wave", + ) nodes.extend(self.pipeliner_control_nodes(awg)) return nodes @@ -231,28 +242,30 @@ async def configure_acquisition( averages: int, averaging_mode: AveragingMode, acquisition_type: AcquisitionType, + with_pipeliner: bool, ) -> list[DaqNodeSetAction]: nc = NodeCollector() - average_mode = 0 if averaging_mode == AveragingMode.CYCLIC else 1 - nc.extend( - self._configure_readout( - acquisition_type, - awg_key, - awg_config, - integrator_allocations, - averages, - average_mode, + if not with_pipeliner: + average_mode = 0 if averaging_mode == AveragingMode.CYCLIC else 1 + nc.extend( + self._configure_readout( + acquisition_type, + awg_key, + awg_config, + integrator_allocations, + averages, + average_mode, + ) ) - ) - nc.extend( - self._configure_spectroscopy( - acquisition_type, - awg_key.awg_index, - awg_config.result_length, - averages, - average_mode, + nc.extend( + self._configure_spectroscopy( + acquisition_type, + awg_key.awg_index, + awg_config.result_length, + averages, + average_mode, + ) ) - ) nc.extend( self._configure_scope( enable=acquisition_type == AcquisitionType.RAW, @@ -447,19 +460,6 @@ async def conditions_for_execution_done( for awg_index in self._allocated_awgs } ) - - for awg_index in self._allocated_awgs: - if is_spectroscopy(acquisition_type): - conditions[ - f"/{self.serial}/qachannels/{awg_index}/spectroscopy/result/enable" - ] = 0 - elif acquisition_type in [ - AcquisitionType.INTEGRATION, - AcquisitionType.DISCRIMINATION, - ]: - conditions[ - f"/{self.serial}/qachannels/{awg_index}/readout/result/enable" - ] = 0 return await self.maybe_async_wait(conditions) def _validate_initialization(self, initialization: Initialization): @@ -761,6 +761,11 @@ def prepare_upload_all_binary_waves( ) ) else: + nc.add( + f"/{self.serial}/qachannels/{awg_index}/generator/clearwave", + 1, + cache=False, + ) max_len = MAX_WAVEFORM_LENGTH_INTEGRATION for wave in waves: wave_len = len(wave.samples) @@ -803,6 +808,56 @@ def prepare_upload_all_integration_weights( return nc + def prepare_pipeliner_job_nodes( + self, + recipe_data: RecipeData, + rt_section_uid: str, + awg_key: AwgKey, + pipeliner_job: int, + ) -> NodeCollector: + nc = NodeCollector() + + rt_execution_info = recipe_data.rt_execution_infos[rt_section_uid] + + if not rt_execution_info.with_pipeliner: + return nc + + if not rt_execution_info.result_logger_pipelined and pipeliner_job > 0: + return nc + + awg_config = recipe_data.awg_configs[awg_key] + + # TODO(2K): code duplication with Controller._prepare_rt_execution + if rt_execution_info.averaging_mode == AveragingMode.SINGLE_SHOT: + effective_averages = 1 + effective_averaging_mode = AveragingMode.CYCLIC + # TODO(2K): handle sequential + else: + effective_averages = rt_execution_info.averages + effective_averaging_mode = rt_execution_info.averaging_mode + + average_mode = 0 if effective_averaging_mode == AveragingMode.CYCLIC else 1 + nc.extend( + self._configure_readout( + rt_execution_info.acquisition_type, + awg_key, + awg_config, + recipe_data.recipe.integrator_allocations, + effective_averages, + average_mode, + ) + ) + nc.extend( + self._configure_spectroscopy( + rt_execution_info.acquisition_type, + awg_key.awg_index, + awg_config.result_length, + effective_averages, + average_mode, + ) + ) + return nc + def _integrator_has_consistent_msd_num_state( self, integrator_allocation: IntegratorAllocation.Data ): @@ -1013,7 +1068,7 @@ async def collect_trigger_configuration_nodes( async def get_measurement_data( self, channel: int, - acquisition_type: AcquisitionType, + rt_execution_info: RtExecutionInfo, result_indices: list[int], num_results: int, hw_averages: int, @@ -1021,31 +1076,92 @@ async def get_measurement_data( assert len(result_indices) == 1 result_path = f"/{self.serial}/qachannels/{channel}/" + ( "spectroscopy/result/data/wave" - if is_spectroscopy(acquisition_type) + if is_spectroscopy(rt_execution_info.acquisition_type) else f"readout/result/data/{result_indices[0]}/wave" ) - attempts = 3 # Hotfix HBAR-949 - while attempts > 0: - attempts -= 1 - # @TODO(andreyk): replace the raw daq reply parsing on site here and hide it - # inside Communication class - data_node_query = await self.get_raw(result_path) - actual_num_measurement_points = len( - data_node_query[result_path][0]["vector"] - ) - if actual_num_measurement_points < num_results: - time.sleep(0.1) - continue - break - assert actual_num_measurement_points == num_results, ( - f"number of measurement points {actual_num_measurement_points} returned by daq " - f"from device '{self.dev_repr}' does not match length of recipe " - f"measurement_map which is {num_results}" + ch_repr = ( + f"{self.dev_repr}:ch{channel}:spectroscopy" + if is_spectroscopy(rt_execution_info.acquisition_type) + else f"{self.dev_repr}:ch{channel}:readout{result_indices[0]}" ) - result: npt.ArrayLike = data_node_query[result_path][0]["vector"] - if acquisition_type == AcquisitionType.DISCRIMINATION: - return result.real - return result + + pipeliner_jobs = ( + rt_execution_info.pipeliner_jobs + if rt_execution_info.result_logger_pipelined + else 1 + ) + + rt_result: npt.ArrayLike = np.empty( + pipeliner_jobs * num_results, dtype=np.complex128 + ) + rt_result[:] = np.nan + jobs_processed: set[int] = set() + + expected_job_id = 0 # TODO(2K): For compatibility with 23.10 + + read_result_timeout_s = 5 + last_result_received = None + while True: + job_result = self.node_monitor.pop(result_path) + if job_result is None: + if len(jobs_processed) == pipeliner_jobs: + break + now = time.monotonic() + if last_result_received is None: + last_result_received = now + if now - last_result_received > read_result_timeout_s: + _logger.error( + f"{ch_repr}: Failed to receive all results within {read_result_timeout_s} s, timing out." + ) + break + await asyncio.sleep(0.1) + await self.node_monitor.poll() + continue + else: + last_result_received = None + + job_id = job_result["properties"].get("jobid", expected_job_id) + expected_job_id += 1 + if job_id in jobs_processed: + _logger.error( + f"{ch_repr}: Ignoring duplicate job id {job_id} in the results." + ) + continue + if job_id >= pipeliner_jobs: + _logger.error( + f"{ch_repr}: Ignoring job id {job_id} in the results, as it " + f"falls outside the defined range of {pipeliner_jobs} jobs." + ) + continue + jobs_processed.add(job_id) + + num_samples = job_result["properties"].get("numsamples", num_results) + + if num_samples != num_results: + _logger.error( + f"{ch_repr}: The number of measurements acquired ({num_samples}) " + f"does not match the number of measurements defined ({num_results}). " + "Possibly the time between measurements within a loop is too short, " + "or the measurement was not started." + ) + + valid_samples = min(num_results, num_samples) + np.put( + rt_result, + range(job_id * num_results, job_id * num_results + valid_samples), + job_result["vector"][:valid_samples], + mode="clip", + ) + + missing_jobs = set(range(pipeliner_jobs)) - jobs_processed + if len(missing_jobs) > 0: + _logger.error( + f"{ch_repr}: Results for job id(s) {missing_jobs} are missing." + ) + + if rt_execution_info.acquisition_type == AcquisitionType.DISCRIMINATION: + return rt_result.real + return rt_result async def get_input_monitor_data(self, channel: int, num_results: int): result_path_ch = f"/{self.serial}/scopes/0/channels/{channel}/wave" @@ -1053,24 +1169,6 @@ async def get_input_monitor_data(self, channel: int, num_results: int): data = node_data[result_path_ch][0]["vector"][0:num_results] return data - async def check_results_acquired_status( - self, channel, acquisition_type: AcquisitionType, result_length, hw_averages - ): - unit = "spectroscopy" if is_spectroscopy(acquisition_type) else "readout" - results_acquired_path = ( - f"/{self.serial}/qachannels/{channel}/{unit}/result/acquired" - ) - batch_get_results = await self.get_raw_values(results_acquired_path) - actual_results = batch_get_results[results_acquired_path] - expected_results = result_length * hw_averages - if actual_results != expected_results: - raise LabOneQControllerException( - f"The number of measurements ({actual_results}) executed for device {self.serial} " - f"on channel {channel} does not match the number of measurements " - f"defined ({expected_results}). Probably the time between measurements or within " - f"a loop is too short. Please contact Zurich Instruments." - ) - async def collect_reset_nodes(self) -> list[DaqNodeSetAction]: nc = NodeCollector(base=f"/{self.serial}/") # Reset pipeliner first, attempt to set generator enable leads to FW error if pipeliner was enabled. diff --git a/laboneq/controller/devices/device_uhfqa.py b/laboneq/controller/devices/device_uhfqa.py index a5de5d3..a82b299 100644 --- a/laboneq/controller/devices/device_uhfqa.py +++ b/laboneq/controller/devices/device_uhfqa.py @@ -142,6 +142,7 @@ async def configure_acquisition( averages: int, averaging_mode: AveragingMode, acquisition_type: AcquisitionType, + with_pipeliner: bool, ) -> list[DaqNodeSetAction]: nc = NodeCollector() nc.extend( @@ -585,22 +586,23 @@ async def _get_integrator_measurement_data( # Communication class data_node_query = await self.get_raw(result_path) assert len(data_node_query[result_path][0]["vector"]) == num_results, ( - "number of measurement points returned by daq from device " - "'{self.uid}' does not match length of recipe" - " measurement_map" + f"{self.dev_repr}: number of measurement points returned" + " does not match length of recipe measurement_map" ) return data_node_query[result_path][0]["vector"] / averages_divider async def get_measurement_data( self, channel: int, - acquisition_type: AcquisitionType, + rt_execution_info: RtExecutionInfo, result_indices: list[int], num_results: int, hw_averages: int, ): averages_divider = ( - 1 if acquisition_type == AcquisitionType.DISCRIMINATION else hw_averages + 1 + if rt_execution_info.acquisition_type == AcquisitionType.DISCRIMINATION + else hw_averages ) assert len(result_indices) <= 2 if len(result_indices) == 1: diff --git a/laboneq/controller/devices/device_utils.py b/laboneq/controller/devices/device_utils.py index c1c07cc..61f257e 100644 --- a/laboneq/controller/devices/device_utils.py +++ b/laboneq/controller/devices/device_utils.py @@ -2,10 +2,15 @@ # SPDX-License-Identifier: Apache-2.0 from __future__ import annotations -from typing import TYPE_CHECKING, Any +from typing import TYPE_CHECKING + +import zhinst.core + +from laboneq.controller.devices.zi_emulator import EmulatorState if TYPE_CHECKING: - from laboneq.controller.devices.device_zi import DeviceQualifier, DeviceZI + from laboneq.controller.devices.device_setup_dao import DeviceSetupDAO + from laboneq.controller.devices.device_zi import DeviceQualifier def calc_dev_type(device_qualifier: DeviceQualifier) -> str: @@ -15,6 +20,53 @@ def calc_dev_type(device_qualifier: DeviceQualifier) -> str: return device_qualifier.driver -def dev_api(device: DeviceZI) -> tuple[Any, str]: - """Temporary helper to unify emulation interface for the async API.""" - return (device._api or device._daq._zi_api_object, device.serial) +def zhinst_core_version() -> str: + [major, minor] = zhinst.core.__version__.split(".")[0:2] + return f"{major}.{minor}" + + +def prepare_emulator_state(ds: DeviceSetupDAO) -> EmulatorState: + emulator_state = EmulatorState() + + # Ensure emulated data server version matches installed zhinst.core + emulator_state.set_option("ZI", "about/version", zhinst_core_version()) + + for device_qualifier in ds.instruments: + options = device_qualifier.options + dev_type = calc_dev_type(device_qualifier) + emulator_state.map_device_type(options.serial, dev_type) + emulator_state.set_option(options.serial, "dev_type", options.dev_type) + if options.expected_installed_options is not None: + exp_opts = options.expected_installed_options.upper().split("/") + if len(exp_opts) > 0 and exp_opts[0] == "": + exp_opts.pop(0) + if len(exp_opts) > 0: + emulator_state.set_option( + options.serial, "features/devtype", exp_opts.pop(0) + ) + if len(exp_opts) > 0: + emulator_state.set_option( + options.serial, "features/options", "\n".join(exp_opts) + ) + + if dev_type == "PQSC": + enabled_zsyncs: dict[str, str] = {} + for from_port, to_dev_uid in ds.downlinks_by_device_uid( + device_qualifier.uid + ): + to_dev_qualifier = next( + (i for i in ds.instruments if i.uid == to_dev_uid), None + ) + if to_dev_qualifier is None: + continue + to_dev_serial = to_dev_qualifier.options.serial.lower() + if enabled_zsyncs.get(from_port.lower()) == to_dev_serial: + continue + enabled_zsyncs[from_port.lower()] = to_dev_serial + emulator_state.set_option( + options.serial, + option=f"{from_port.lower()}/connection/serial", + value=to_dev_serial[3:], + ) + + return emulator_state diff --git a/laboneq/controller/devices/device_zi.py b/laboneq/controller/devices/device_zi.py index b1350c2..eda0f69 100644 --- a/laboneq/controller/devices/device_zi.py +++ b/laboneq/controller/devices/device_zi.py @@ -28,11 +28,17 @@ CachingStrategy, DaqNodeSetAction, DaqWrapper, - map_device_type, ) -from laboneq.controller.devices.device_utils import dev_api -from laboneq.controller.devices.zi_emulator import set_emulation_option -from laboneq.controller.devices.zi_node_monitor import NodeControlBase +from laboneq.controller.devices.async_support import ( + create_device_kernel_session, + get_raw, + set_parallel, +) +from laboneq.controller.devices.zi_emulator import EmulatorState +from laboneq.controller.devices.zi_node_monitor import ( + NodeControlBase, + NodeMonitorBase, +) from laboneq.controller.pipeliner_reload_tracker import PipelinerReloadTracker from laboneq.controller.recipe_processor import ( AwgConfig, @@ -95,7 +101,6 @@ class DeviceQualifier: server_uid: str driver: str options: DeviceOptions - dry_run: bool = True @dataclass @@ -200,6 +205,7 @@ def __init__(self, device_qualifier: DeviceQualifier, daq: DaqWrapper): self._daq = daq self._api = None # TODO(2K): Add type labone.Instrument + self._node_monitor: NodeMonitorBase | None = None self.dev_type: str | None = None self.dev_opts: list[str] = [] self._connected = False @@ -229,10 +235,6 @@ def __init__(self, device_qualifier: DeviceQualifier, daq: DaqWrapper): def device_qualifier(self): return self._device_qualifier - @property - def dry_run(self): - return self._device_qualifier.dry_run - @property def dev_repr(self) -> str: return f"{self._device_qualifier.driver.upper()}:{self.serial}" @@ -261,6 +263,11 @@ def interface(self): def daq(self): return self._daq + @property + def node_monitor(self) -> NodeMonitorBase: + assert self._node_monitor is not None + return self._node_monitor + @property def is_secondary(self) -> bool: return False @@ -268,7 +275,7 @@ def is_secondary(self) -> bool: def to_daq_actions(self, nodes: NodeCollector) -> list[DaqNodeSetAction]: return [ DaqNodeSetAction( - self._daq, + self.daq, node.path, node.value, caching_strategy=CachingStrategy.CACHE @@ -281,9 +288,7 @@ def to_daq_actions(self, nodes: NodeCollector) -> list[DaqNodeSetAction]: async def maybe_async(self, nodes: NodeCollector) -> list[DaqNodeSetAction]: if self._api is not None: - # await set_parallel( - # self._api, *[(node.path, node.value) for node in nodes()] - # ) + await set_parallel(self._api, *[(node.path, node.value) for node in nodes]) return [] return self.to_daq_actions(nodes) @@ -376,7 +381,14 @@ def is_follower(self): return len(self._uplinks) > 0 or self.is_standalone() def is_standalone(self): - return len(self._uplinks) == 0 and len(self._downlinks) == 0 + def is_ppc(dev): + return ( + getattr(getattr(dev, "device_qualifier", None), "driver", None) + == "SHFPPC" + ) + + no_ppc_uplinks = [u for u in self._uplinks if u() and not is_ppc(u())] + return len(no_ppc_uplinks) == 0 and len(self._downlinks) == 0 def _validate_initialization(self, initialization: Initialization): pass @@ -425,34 +437,20 @@ async def collect_trigger_configuration_nodes( ) -> list[DaqNodeSetAction]: return [] - def _set_emulation_option(self, option: str, value: Any): - set_emulation_option(*dev_api(self), option, value) - - def _prepare_emulator(self): - if not self.dry_run: - return - - if self._api is None: - map_device_type(self._daq._zi_api_object, self.device_qualifier) - - self._set_emulation_option("dev_type", self.options.dev_type) - if self.options.expected_installed_options is not None: - exp_opts = self.options.expected_installed_options.upper().split("/") - if len(exp_opts) > 0 and exp_opts[0] == "": - exp_opts.pop(0) - if len(exp_opts) > 0: - self._set_emulation_option("features/devtype", exp_opts.pop(0)) - if len(exp_opts) > 0: - self._set_emulation_option("features/options", "\n".join(exp_opts)) - - async def _connect_to_data_server(self): + async def _connect_to_data_server(self, emulator_state: EmulatorState | None): if self._connected: return _logger.debug("%s: Connecting to %s interface.", self.dev_repr, self.interface) try: - self._prepare_emulator() - self._daq.connectDevice(self.serial, self.interface) + self._api = await create_device_kernel_session( + device_qualifier=self._device_qualifier, + server_qualifier=self.daq._server_qualifier, + emulator_state=emulator_state, + ) + if self._api is None: + self.daq.connectDevice(self.serial, self.interface) + self._node_monitor = self.daq.node_monitor except RuntimeError as exc: raise LabOneQControllerException( f"{self.dev_repr}: Connecting failed" @@ -473,15 +471,18 @@ async def _connect_to_data_server(self): self._connected = True - async def connect(self): - await self._connect_to_data_server() - self._daq.node_monitor.add_nodes(self.nodes_to_monitor()) + async def connect(self, emulator_state: EmulatorState | None): + await self._connect_to_data_server(emulator_state) + self.node_monitor.add_nodes(self.nodes_to_monitor()) def disconnect(self): if not self._connected: return - self._daq.disconnectDevice(self.serial) + if self._api is None: + self.daq.disconnectDevice(self.serial) + else: + self._api = None # TODO(2K): Proper disconnect? self._connected = False async def disable_outputs( @@ -603,11 +604,14 @@ async def configure_acquisition( averages: int, averaging_mode: AveragingMode, acquisition_type: AcquisitionType, + with_pipeliner: bool, ) -> list[DaqNodeSetAction]: return [] async def get_raw(self, path: str) -> dict[str, Any]: - return self._daq.get_raw(path) + if self._api is not None: + return await get_raw(self._api, path) + return self.daq.get_raw(path) async def get_raw_values(self, path: str) -> dict[str, Any]: return {p: v["value"][-1] for p, v in (await self.get_raw(path)).items()} @@ -615,7 +619,7 @@ async def get_raw_values(self, path: str) -> dict[str, Any]: async def get_measurement_data( self, channel: int, - acquisition_type: AcquisitionType, + rt_execution_info: RtExecutionInfo, result_indices: list[int], num_results: int, hw_averages: int, @@ -688,9 +692,8 @@ async def prepare_artifacts( ]: artifacts = recipe_data.scheduled_experiment.artifacts rt_execution_info = recipe_data.rt_execution_infos[rt_section_uid] - with_pipeliner = rt_execution_info.pipeliner_chunk_count is not None - if with_pipeliner and not self.has_pipeliner: + if rt_execution_info.with_pipeliner and not self.has_pipeliner: raise LabOneQControllerException( f"{self.dev_repr}: Pipeliner is not supported by the device." ) @@ -700,13 +703,13 @@ async def prepare_artifacts( wf_eff = self._choose_wf_collector(elf_nodes, wf_nodes) upload_ready_conditions: dict[str, Any] = {} - if with_pipeliner: + if rt_execution_info.with_pipeliner: elf_nodes.extend(self.pipeliner_prepare_for_upload(awg_index)) - for pipeline_chunk in range(rt_execution_info.pipeliner_chunk_count or 1): + for pipeliner_job in range(rt_execution_info.pipeliner_jobs): effective_nt_step = ( - NtStepKey(indices=tuple([*nt_step.indices, pipeline_chunk])) - if with_pipeliner + NtStepKey(indices=tuple([*nt_step.indices, pipeliner_job])) + if rt_execution_info.with_pipeliner else nt_step ) rt_exec_step = next( @@ -720,33 +723,16 @@ async def prepare_artifacts( None, ) - if with_pipeliner: + if rt_execution_info.with_pipeliner: rt_exec_step = self._pipeliner_reload_tracker[awg_index].calc_next_step( - pipeline_chunk=pipeline_chunk, + pipeliner_job=pipeliner_job, rt_exec_step=rt_exec_step, ) if rt_exec_step is None: continue - seqc_code = self.prepare_seqc( - artifacts, - rt_exec_step.seqc_ref, - ) - waves = self.prepare_waves( - artifacts, - rt_exec_step.wave_indices_ref, - ) - command_table = self.prepare_command_table( - artifacts, - rt_exec_step.wave_indices_ref, - ) - integration_weights = self.prepare_integration_weights( - artifacts, - recipe_data.recipe.integrator_allocations, - rt_exec_step.kernel_indices_ref, - ) - + seqc_code = self.prepare_seqc(artifacts, rt_exec_step.seqc_ref) if seqc_code is not None: seqc_item = SeqCCompileItem( dev_type=self.dev_type, @@ -765,6 +751,7 @@ async def prepare_artifacts( ) upload_ready_conditions.update(self._elf_upload_condition(awg_index)) + waves = self.prepare_waves(artifacts, rt_exec_step.wave_indices_ref) if waves is not None: acquisition_type = RtExecutionInfo.get_acquisition_type_def( rt_execution_info @@ -774,21 +761,41 @@ async def prepare_artifacts( awg_index, waves, acquisition_type ) ) + + command_table = self.prepare_command_table( + artifacts, rt_exec_step.wave_indices_ref + ) if command_table is not None: wf_eff.extend( self.prepare_upload_command_table(awg_index, command_table) ) + + integration_weights = self.prepare_integration_weights( + artifacts, + recipe_data.recipe.integrator_allocations, + rt_exec_step.kernel_indices_ref, + ) if integration_weights is not None: wf_eff.extend( self.prepare_upload_all_integration_weights( awg_index, integration_weights ) ) - if with_pipeliner: + + wf_eff.extend( + self.prepare_pipeliner_job_nodes( + recipe_data, + rt_section_uid, + AwgKey(initialization.device_uid, awg_index), + pipeliner_job, + ) + ) + + if rt_execution_info.with_pipeliner: # For devices with pipeliner, wf_eff == elf_nodes wf_eff.extend(self.pipeliner_commit(awg_index)) - if with_pipeliner: + if rt_execution_info.with_pipeliner: upload_ready_conditions.update(self.pipeliner_ready_conditions(awg_index)) elf_nodes_actions = await self.maybe_async(elf_nodes) @@ -1086,6 +1093,15 @@ def prepare_upload_all_integration_weights( ) -> NodeCollector: raise NotImplementedError + def prepare_pipeliner_job_nodes( + self, + recipe_data: RecipeData, + rt_section_uid: str, + awg_key: AwgKey, + pipeliner_job: int, + ) -> NodeCollector: + return NodeCollector() + def pipeliner_prepare_for_upload(self, index: int) -> NodeCollector: return [] @@ -1125,12 +1141,9 @@ async def collect_execution_nodes( nc = NodeCollector(base=f"/{self.serial}/") _logger.debug("%s: Executing AWGS...", self.dev_repr) - if self._daq is not None: - for awg_index in self._allocated_awgs: - _logger.debug( - "%s: Starting AWG #%d sequencer", self.dev_repr, awg_index - ) - nc.add(f"awgs/{awg_index}/enable", 1, cache=False) + for awg_index in self._allocated_awgs: + _logger.debug("%s: Starting AWG #%d sequencer", self.dev_repr, awg_index) + nc.add(f"awgs/{awg_index}/enable", 1, cache=False) return await self.maybe_async(nc) diff --git a/laboneq/controller/devices/zi_emulator.py b/laboneq/controller/devices/zi_emulator.py index 3f2899c..d05488c 100644 --- a/laboneq/controller/devices/zi_emulator.py +++ b/laboneq/controller/devices/zi_emulator.py @@ -2,6 +2,7 @@ # SPDX-License-Identifier: Apache-2.0 from __future__ import annotations +from collections import defaultdict import functools import json @@ -62,28 +63,32 @@ class NodeStr(NodeBase): @dataclass class NodeVectorBase(NodeBase): def node_value(self) -> Any: - return [{"vector": self.value}] + return [{"vector": self.value[0], "properties": self.value[1]}] @dataclass class NodeVectorFloat(NodeVectorBase): - value: npt.ArrayLike = field(default_factory=lambda: np.array([], dtype=np.float64)) + value: tuple[npt.ArrayLike, dict[str, Any]] = field( + default_factory=lambda: (np.array([], dtype=np.float64), {}) + ) @dataclass class NodeVectorInt(NodeVectorBase): - value: npt.ArrayLike = field(default_factory=lambda: np.array([], dtype=np.int64)) + value: tuple[npt.ArrayLike, dict[str, Any]] = field( + default_factory=lambda: (np.array([], dtype=np.int64), {}) + ) @dataclass class NodeVectorStr(NodeVectorBase): - value: str = "" + value: tuple[str, dict[str, Any]] = ("", {}) @dataclass class NodeVectorComplex(NodeVectorBase): - value: npt.ArrayLike = field( - default_factory=lambda: np.array([], dtype=np.complex128) + value: tuple[npt.ArrayLike, dict[str, Any]] = field( + default_factory=lambda: (np.array([], dtype=np.complex128), {}) ) @@ -166,12 +171,10 @@ class PollEvent: class DevEmu(ABC): "Base class emulating a device, specialized per device type." - def __init__( - self, serial: str, scheduler: sched.scheduler, dev_opts: dict[str, Any] - ): + def __init__(self, serial: str, emulator_state: EmulatorState): self._serial = serial - self._scheduler = scheduler - self._dev_opts = dev_opts + self._emulator_state = emulator_state + self._dev_opts = emulator_state.get_options(serial) self._node_tree: dict[str, NodeBase] = {} self._poll_queue: list[PollEvent] = [] self._total_subscribed: int = 0 @@ -180,6 +183,11 @@ def __init__( def serial(self) -> str: return self._serial + def schedule(self, delay, action, argument=()): + self._emulator_state.scheduler.enter( + delay=delay, priority=0, action=action, argument=argument + ) + @abstractmethod def _node_def(self) -> dict[str, NodeInfo]: ... @@ -215,7 +223,7 @@ def _set_val(self, dev_path: str, value: Any) -> NodeBase: node.value = value if node.subscribed: self._poll_queue.append( - PollEvent(path=self._full_path(dev_path), value=value) + PollEvent(path=self._full_path(dev_path), value=node.node_value()) ) return node @@ -234,7 +242,7 @@ def unsubscribe(self, dev_path: str): def getAsEvent(self, dev_path: str): node = self._get_node(dev_path) self._poll_queue.append( - PollEvent(path=self._full_path(dev_path), value=node.value) + PollEvent(path=self._full_path(dev_path), value=node.node_value()) ) def poll(self) -> list[PollEvent]: @@ -253,6 +261,9 @@ def __init__( self._parent_ref = ref(parent) self._pipeliner_base = pipeliner_base self._pipeliner_stop_hook = pipeliner_stop_hook + self._staging_slot: dict[int, int] = defaultdict(lambda: 0) + # self._pipelined[][][] = + self._pipelined: dict[int, list[dict[str, Any]]] = defaultdict(list) @property def _parent(self) -> DevEmu: @@ -260,21 +271,37 @@ def _parent(self) -> DevEmu: assert parent is not None return parent + def is_active(self, channel) -> bool: + mode: int = self._parent._get_node( + f"{self._pipeliner_base}/{channel}/pipeliner/mode" + ).value + return mode > 0 + + def _pipeline(self, node: NodeBase, item: str, channel: int): + if self.is_active(channel): + pipelined = self._pipelined[channel] + staging_slot = self._staging_slot[channel] + while len(pipelined) <= staging_slot: + pipelined.append({}) + pipelined[-1][item] = node.value + + def _pipeliner_mode(self, node: NodeBase, channel: int): + self._staging_slot[channel] = 0 + self._pipelined[channel].clear() + def _pipeliner_committed(self, channel: int): - avail_slots: int = self._parent._get_node( - f"{self._pipeliner_base}/{channel}/pipeliner/availableslots" + max_slots: int = self._parent._get_node( + f"{self._pipeliner_base}/{channel}/pipeliner/maxslots" ).value self._parent._set_val( f"{self._pipeliner_base}/{channel}/pipeliner/availableslots", - avail_slots - 1, + max_slots - self._staging_slot[channel], ) def _pipeliner_commit(self, node: NodeBase, channel: int): - self._parent._scheduler.enter( - delay=0.001, - priority=0, - action=self._pipeliner_committed, - argument=(channel,), + self._staging_slot[channel] += 1 + self._parent.schedule( + delay=0.001, action=self._pipeliner_committed, argument=(channel,) ) def _pipeliner_reset(self, node: NodeBase, channel: int): @@ -285,6 +312,8 @@ def _pipeliner_reset(self, node: NodeBase, channel: int): self._parent._set_val( f"{self._pipeliner_base}/{channel}/pipeliner/availableslots", max_slots ) + self._staging_slot[channel] = 0 + self._pipelined[channel].clear() def _pipeliner_stop(self, channel: int): # idle @@ -295,16 +324,18 @@ def _pipeliner_stop(self, channel: int): def _pipeliner_enable(self, node: NodeBase, channel: int): # exec self._parent._set_val(f"{self._pipeliner_base}/{channel}/pipeliner/status", 1) - self._parent._scheduler.enter( - delay=0.001, - priority=0, - action=self._pipeliner_stop, - argument=(channel,), + self._parent.schedule( + delay=0.001, action=self._pipeliner_stop, argument=(channel,) ) def _node_def_pipeliner(self) -> dict[str, NodeInfo]: nd = {} for channel in range(8): + nd[f"{self._pipeliner_base}/{channel}/pipeliner/mode"] = NodeInfo( + type=NodeType.INT, + default=0, + handler=partial(self._pipeliner_mode, channel=channel), + ) nd[f"{self._pipeliner_base}/{channel}/pipeliner/maxslots"] = NodeInfo( type=NodeType.INT, default=1024, @@ -379,8 +410,15 @@ def _preset_load(self, node: NodeBase): for p in self._node_tree.keys(): if p not in ["system/preset/load", "system/preset/busy"]: node_info = self._cached_node_def().get(p) - self._set_val(p, 0 if node_info is None else node_info.default) - self._scheduler.enter(delay=0.001, priority=0, action=self._preset_loaded) + self._set_val( + p, + 0 + if node_info is None + else node_info.type.value()._value + if node_info.default is None + else node_info.default, + ) + self.schedule(delay=0.001, action=self._preset_loaded) def _node_def_common(self) -> dict[str, NodeInfo]: return { @@ -434,7 +472,9 @@ def _node_def_common(self) -> dict[str, NodeInfo]: # ] # } "raw/error/json/errors": NodeInfo( - type=NodeType.VECTOR_STR, read_only=True, default='{"messages":[]}' + type=NodeType.VECTOR_STR, + read_only=True, + default=('{"messages":[]}', {}), ), "system/preset/load": NodeInfo( type=NodeType.INT, default=0, handler=self._preset_load @@ -460,20 +500,14 @@ def _awg_stop(self, awg_idx): self._set_val(f"awgs/{awg_idx}/enable", 0) def _awg_execute(self, node: NodeBase, awg_idx): - self._scheduler.enter( - delay=0.001, priority=0, action=self._awg_stop, argument=(awg_idx,) - ) + self.schedule(delay=0.001, action=self._awg_stop, argument=(awg_idx,)) def _sample_clock_switched(self): self._set_val("system/clocks/sampleclock/status", 0) def _sample_clock(self, node: NodeBase): self._set_val("system/clocks/sampleclock/status", 2) - self._scheduler.enter( - delay=0.001, - priority=0, - action=self._sample_clock_switched, - ) + self.schedule(delay=0.001, action=self._sample_clock_switched) def _ref_clock_switched(self, source): self._set_val("system/clocks/referenceclock/status", 0) @@ -486,9 +520,8 @@ def _ref_clock_switched(self, source): self._set_val("system/clocks/referenceclock/freq", target_freq) def _ref_clock(self, node: NodeBase): - self._scheduler.enter( + self.schedule( delay=0.001, - priority=0, action=self._ref_clock_switched, argument=(cast(NodeInt, node).value,), ) @@ -563,21 +596,23 @@ def _awg_stop(self): integrator = result_index // 2 res_c = (42 + integrator + 1j * np.arange(length)).view(float) res = res_c[result_index % 2 :: 2] - self._set_val(f"qas/0/result/data/{result_index}/wave", np.array(res)) + self._set_val( + f"qas/0/result/data/{result_index}/wave", (np.array(res), {}) + ) if monitor_enable != 0: length = self._get_node("qas/0/monitor/length").value - self._set_val("qas/0/monitor/inputs/0/wave", [52] * length) - self._set_val("qas/0/monitor/inputs/1/wave", [52] * length) + self._set_val("qas/0/monitor/inputs/0/wave", ([52] * length, {})) + self._set_val("qas/0/monitor/inputs/1/wave", ([52] * length, {})) def _awg_execute(self, node: NodeBase): - self._scheduler.enter(delay=0.001, priority=0, action=self._awg_stop) + self.schedule(delay=0.001, action=self._awg_stop) def _awg_ready(self): self._set_val("awgs/0/ready", 1) def _elf_upload(self, node: NodeBase): self._set_val("awgs/0/ready", 0) - self._scheduler.enter(delay=0.001, priority=0, action=self._awg_ready) + self.schedule(delay=0.001, action=self._awg_ready) def _node_def(self) -> dict[str, NodeInfo]: nd = { @@ -594,7 +629,7 @@ def _node_def(self) -> dict[str, NodeInfo]: type=NodeType.INT, default=0, handler=self._awg_execute ), "awgs/0/elf/data": NodeInfo( - type=NodeType.VECTOR_INT, default=[], handler=self._elf_upload + type=NodeType.VECTOR_INT, default=([], {}), handler=self._elf_upload ), "awgs/0/ready": NodeInfo(type=NodeType.INT, default=0), "qas/0/monitor/inputs/0/wave": NodeInfo(type=NodeType.VECTOR_COMPLEX), @@ -618,9 +653,8 @@ def _ref_clock_switched(self, requested_source: int): self._set_val("system/clocks/referenceclock/in/freq", freq) def _ref_clock(self, node: NodeBase): - self._scheduler.enter( + self.schedule( delay=0.001, - priority=0, action=self._ref_clock_switched, argument=(cast(NodeInt, node).value,), ) @@ -648,7 +682,7 @@ def _trig_stop(self): self._set_val("execution/enable", 0) def _trig_execute(self, node: NodeBase): - self._scheduler.enter(delay=0.001, priority=0, action=self._trig_stop) + self.schedule(delay=0.001, action=self._trig_stop) def _node_def(self) -> dict[str, NodeInfo]: nd = { @@ -663,7 +697,10 @@ def _node_def(self) -> dict[str, NodeInfo]: ) nd[f"zsyncs/{zsync}/connection/serial"] = NodeInfo( type=NodeType.VECTOR_STR, - default=self._dev_opts.get(f"zsyncs/{zsync}/connection/serial", ""), + default=( + self._dev_opts.get(f"zsyncs/{zsync}/connection/serial", ""), + {}, + ), ) return nd @@ -674,9 +711,49 @@ def __init__(self, *args, **kwargs): self._qa_pipeliner = PipelinerEmu( parent=self, pipeliner_base="qachannels", - pipeliner_stop_hook=self._measurement_done, + pipeliner_stop_hook=self._pipeliner_done, ) + def _push_readout_result(self, channel: int, length: int, averages: int): + self._set_val( + f"qachannels/{channel}/readout/result/acquired", length * averages + ) + for integrator in range(16): + self._set_val( + f"qachannels/{channel}/readout/result/data/{integrator}/wave", + ((42 + integrator + 1j * np.arange(length)) / averages, {}), + ) + + def _push_spectroscopy_result(self, channel: int, length: int, averages: int): + self._set_val( + f"qachannels/{channel}/spectroscopy/result/acquired", length * averages + ) + self._set_val( + f"qachannels/{channel}/spectroscopy/result/data/wave", + (np.array([(42 + 42j)] * length), {}), + ) + + def _pipeliner_done(self, channel: int): + pipelined_nodes: dict[str, Any] = {} + for slot in self._qa_pipeliner._pipelined[channel]: + for path, value in slot.items(): + pipelined_nodes[path] = value + + readout_enable = pipelined_nodes.get("readout/result/enable", 0) + spectroscopy_enable = pipelined_nodes.get("spectroscopy/result/enable", 0) + pipelined_nodes["readout/result/enable"] = 0 + pipelined_nodes["spectroscopy/result/enable"] = 0 + + if readout_enable != 0: + length = pipelined_nodes.get("readout/result/length", 0) + averages = pipelined_nodes.get("readout/result/averages", 0) + self._push_readout_result(channel, length, averages) + + if spectroscopy_enable != 0: + length = pipelined_nodes.get("spectroscopy/result/length", 0) + averages = pipelined_nodes.get("spectroscopy/result/averages", 0) + self._push_spectroscopy_result(channel, length, averages) + def _measurement_done(self, channel: int): readout_enable = self._get_node( f"qachannels/{channel}/readout/result/enable" @@ -684,22 +761,19 @@ def _measurement_done(self, channel: int): spectroscopy_enable = self._get_node( f"qachannels/{channel}/spectroscopy/result/enable" ).value - scope_enable = self._get_node("scopes/0/enable").value self._set_val(f"qachannels/{channel}/readout/result/enable", 0) self._set_val(f"qachannels/{channel}/spectroscopy/result/enable", 0) + + if self._qa_pipeliner.is_active(channel): + return + if readout_enable != 0: length = self._get_node(f"qachannels/{channel}/readout/result/length").value averages = self._get_node( f"qachannels/{channel}/readout/result/averages" ).value - self._set_val( - f"qachannels/{channel}/readout/result/acquired", length * averages - ) - for integrator in range(16): - self._set_val( - f"qachannels/{channel}/readout/result/data/{integrator}/wave", - (42 + integrator + 1j * np.arange(length)) / averages, - ) + self._push_readout_result(channel, length, averages) + if spectroscopy_enable != 0: length = self._get_node( f"qachannels/{channel}/spectroscopy/result/length" @@ -707,14 +781,9 @@ def _measurement_done(self, channel: int): averages = self._get_node( f"qachannels/{channel}/spectroscopy/result/averages" ).value - self._set_val( - f"qachannels/{channel}/spectroscopy/result/acquired", length * averages - ) - self._set_val( - f"qachannels/{channel}/spectroscopy/result/data/wave", - np.array([(42 + 42j)] * length), - ) - if scope_enable != 0: + self._push_spectroscopy_result(channel, length, averages) + + if self._get_node("scopes/0/enable").value != 0: # Assuming here that the scope was triggered by AWG and channels configured to capture # QA channels 1:1. Not emulating various trigger, input source, etc. settings! scope_single = self._get_node("scopes/0/single").value @@ -724,7 +793,7 @@ def _measurement_done(self, channel: int): for scope_ch in range(4): self._set_val( f"scopes/0/channels/{scope_ch}/wave", - np.array([(52 + 52j)] * length), + (np.array([(52 + 52j)] * length), {}), ) def _awg_stop_qa(self, channel: int): @@ -732,11 +801,18 @@ def _awg_stop_qa(self, channel: int): self._measurement_done(channel) def _awg_execute_qa(self, node: NodeBase, channel: int): - self._scheduler.enter( - delay=0.001, priority=0, action=self._awg_stop_qa, argument=(channel,) - ) + if not self._qa_pipeliner.is_active(channel): + self.schedule(delay=0.001, action=self._awg_stop_qa, argument=(channel,)) def _node_def_qa(self) -> dict[str, NodeInfo]: + pipelineable_nodes: list[str] = [ + "readout/result/enable", + "readout/result/length", + "readout/result/averages", + "spectroscopy/result/enable", + "spectroscopy/result/length", + "spectroscopy/result/averages", + ] nd = self._qa_pipeliner._node_def_pipeliner() for channel in range(4): nd[f"qachannels/{channel}/generator/enable"] = NodeInfo( @@ -744,12 +820,14 @@ def _node_def_qa(self) -> dict[str, NodeInfo]: default=0, handler=partial(self._awg_execute_qa, channel=channel), ) - nd[f"qachannels/{channel}/readout/result/enable"] = NodeInfo( - type=NodeType.INT, default=0 - ) - nd[f"qachannels/{channel}/spectroscopy/result/enable"] = NodeInfo( - type=NodeType.INT, default=0 - ) + for path_part in pipelineable_nodes: + nd[f"qachannels/{channel}/{path_part}"] = NodeInfo( + type=NodeType.INT, + default=0, + handler=partial( + self._qa_pipeliner._pipeline, item=path_part, channel=channel + ), + ) for integrator in range(16): nd[ f"qachannels/{channel}/readout/result/data/{integrator}/wave" @@ -790,9 +868,7 @@ def _awg_stop_sg(self, channel: int): self._set_val(f"sgchannels/{channel}/awg/enable", 0) def _awg_execute_sg(self, node: NodeBase, channel: int): - self._scheduler.enter( - delay=0.001, priority=0, action=self._awg_stop_sg, argument=(channel,) - ) + self.schedule(delay=0.001, action=self._awg_stop_sg, argument=(channel,)) def _node_def_sg(self) -> dict[str, NodeInfo]: nd = self._sg_pipeliner._node_def_pipeliner() @@ -885,35 +961,58 @@ def _canonical_path_list(path: str | list[str]) -> list[str]: } +class EmulatorState: + def __init__(self): + self._dev_type_by_serial: dict[str, str] = {} + self._options: dict[str, dict[str, Any]] = defaultdict(dict) + self._scheduler = sched.scheduler() + + @property + def scheduler(self) -> sched.scheduler: + return self._scheduler + + def map_device_type(self, serial: str, type: str): + self._dev_type_by_serial[serial.upper()] = type.upper() + + def get_device_type(self, serial: str) -> str | None: + return self._dev_type_by_serial.get(serial.upper()) + + def set_option(self, serial: str, option: str, value: Any): + self._options[serial.upper()][option] = value + + def get_options(self, serial: str) -> dict[str, Any]: + return self._options[serial.upper()] + + class ziDAQServerEmulator: """A class replacing the 'zhinst.core.ziDAQServer', emulating its behavior to the extent required by LabOne Q SW without the real DataServer/HW. """ - def __init__(self, host: str, port: int, api_level: int): + def __init__( + self, + host: str, + port: int, + api_level: int, + emulator_state: EmulatorState | None = None, + ): + if emulator_state is None: + emulator_state = EmulatorState() + self._emulator_state = emulator_state + self._emulator_state.map_device_type("ZI", "ZI") + self._emulator_state.set_option("ZI", "emu_server", self) if api_level is None: api_level = 6 assert api_level == 6 assert isinstance(port, int) super().__init__() - self._scheduler = sched.scheduler() - self._dev_type_by_serial: dict[str, str] = {"ZI": "ZI"} self._devices: dict[str, DevEmu] = {} - self._options: dict[str, dict[str, Any]] = {"ZI": {"emu_server": self}} - - def map_device_type(self, serial: str, type: str): - self._dev_type_by_serial[serial.upper()] = type.upper() - - def set_option(self, serial: str, option: str, value: Any): - dev_opts = self._options.setdefault(serial.upper(), {}) - dev_opts[option] = value def _device_factory(self, serial: str) -> DevEmu: - dev_type = _dev_type_map.get(self._dev_type_by_serial.get(serial.upper())) + dev_type = _dev_type_map.get(self._emulator_state.get_device_type(serial)) if dev_type is None: dev_type = _serial_to_device_type(serial) - dev_opts = self._options.setdefault(serial.upper(), {}) - return dev_type(serial=serial, scheduler=self._scheduler, dev_opts=dev_opts) + return dev_type(serial=serial, emulator_state=self._emulator_state) def _device_lookup(self, serial: str, create: bool = True) -> DevEmu | None: serial = serial.upper() @@ -1069,8 +1168,12 @@ def poll( # TODO(2K): reshape results assert flat is True for event in events: - path_res = result.setdefault(event.path, {"value": []}) - path_res["value"].append(event.value) + if "value" in event.value: + path_res = result.setdefault(event.path, {"value": []}) + path_res["value"].extend(event.value["value"]) + else: + path_res = result.setdefault(event.path, []) + path_res.extend(event.value) return result def _progress_scheduler(self, wait_time: float = 0.0): @@ -1081,17 +1184,10 @@ def _delay(delay: float): start = time.perf_counter() while True: - delay_till_next_event = self._scheduler.run(blocking=False) + delay_till_next_event = self._emulator_state.scheduler.run(blocking=False) elapsed = time.perf_counter() - start remaining = wait_time - elapsed if delay_till_next_event is None or delay_till_next_event > remaining: _delay(remaining) break _delay(delay_till_next_event) - - -def set_emulation_option(api: Any, serial: str, option: str, value: Any): - if isinstance(api, ziDAQServerEmulator): - api.set_option(serial, option, value) - else: - raise AssertionError("Unexpected emulation implementation") diff --git a/laboneq/controller/devices/zi_node_monitor.py b/laboneq/controller/devices/zi_node_monitor.py index e2e280a..99c3337 100644 --- a/laboneq/controller/devices/zi_node_monitor.py +++ b/laboneq/controller/devices/zi_node_monitor.py @@ -2,6 +2,7 @@ # SPDX-License-Identifier: Apache-2.0 from __future__ import annotations +from abc import ABC, abstractmethod import logging import math @@ -36,9 +37,17 @@ def get_last(self) -> Any | None: def append(self, val: dict[str, Any] | list[dict[str, Any]]): if isinstance(val, dict): + # Scalar nodes, value is an array of consecutive updates self.values.extend(val["value"]) else: - self.values.append(val[0]["vector"]) + # Vector nodes + for v in val: + if isinstance(v["vector"], str): + # String value, ignore extra header + self.values.append(v["vector"]) + else: + # Other vector types, keep everything + self.values.append(v) self.last = self.values[-1] @@ -67,11 +76,22 @@ def _is_expected(val: Any, expected: Any | None | list[Any | None]) -> bool: return False -class NodeMonitor: - def __init__(self, daq): - self._daq = daq +class NodeMonitorBase(ABC): + def __init__(self): self._nodes: dict[str, Node] = {} + @abstractmethod + async def start(self): + ... + + @abstractmethod + async def stop(self): + ... + + @abstractmethod + async def poll(self): + ... + def _fail_on_missing_node(self, path: str): if path not in self._nodes: raise LabOneQControllerException( @@ -82,8 +102,8 @@ def _get_node(self, path: str) -> Node: self._fail_on_missing_node(path) return self._nodes[path] - def reset(self): - self.stop() + async def reset(self): + await self.stop() self._nodes.clear() def add_nodes(self, paths: list[str]): @@ -91,27 +111,8 @@ def add_nodes(self, paths: list[str]): if path not in self._nodes: self._nodes[path] = Node(path) - def start(self): - all_paths = [p for p in self._nodes.keys()] - if len(all_paths) > 0: - self._daq.subscribe(all_paths) - for path in all_paths: - self._daq.getAsEvent(path) - - def stop(self): - self._daq.unsubscribe("*") - self.flush() - - def poll(self): - while True: - data = self._daq.poll(1e-6, 100, flat=True) - if len(data) == 0: - break - for path, val in data.items(): - self._get_node(path).append(val) - - def flush(self): - self.poll() + async def flush(self): + await self.poll() for node in self._nodes.values(): node.flush() @@ -135,8 +136,10 @@ def check_last_for_conditions( failed.append((path, val)) return failed - def poll_and_check_conditions(self, conditions: dict[str, Any]) -> dict[str, Any]: - self.poll() + async def poll_and_check_conditions( + self, conditions: dict[str, Any] + ) -> dict[str, Any]: + await self.poll() remaining = {} for path, expected in conditions.items(): self._fail_on_missing_node(path) @@ -152,11 +155,36 @@ def poll_and_check_conditions(self, conditions: dict[str, Any]) -> dict[str, Any return remaining +class NodeMonitor(NodeMonitorBase): + def __init__(self, daq): + super().__init__() + self._daq = daq + + async def start(self): + all_paths = [p for p in self._nodes.keys()] + if len(all_paths) > 0: + self._daq.subscribe(all_paths) + for path in all_paths: + self._daq.getAsEvent(path) + + async def stop(self): + self._daq.unsubscribe("*") + await self.flush() + + async def poll(self): + while True: + data = self._daq.poll(1e-6, 100, flat=True) + if len(data) == 0: + break + for path, val in data.items(): + self._get_node(path).append(val) + + class MultiDeviceHandlerBase: def __init__(self): - self._conditions: dict[NodeMonitor, dict[str, Any]] = {} + self._conditions: dict[NodeMonitorBase, dict[str, Any]] = {} - def add(self, target: NodeMonitor, conditions: dict[str, Any]): + def add(self, target: NodeMonitorBase, conditions: dict[str, Any]): if conditions: daq_conditions: dict[str, Any] = self._conditions.setdefault(target, {}) daq_conditions.update(conditions) @@ -246,12 +274,14 @@ def __init__(self): self._timer = time.monotonic @trace("wait-for-all-nodes", disable_tracing_during=True) - def wait_all(self, timeout: float) -> bool: + async def wait_all(self, timeout: float) -> bool: start = self._timer() while True: remaining: dict[NodeMonitor, dict[str, Any]] = {} for node_monitor, daq_conditions in self._conditions.items(): - daq_remaining = node_monitor.poll_and_check_conditions(daq_conditions) + daq_remaining = await node_monitor.poll_and_check_conditions( + daq_conditions + ) if len(daq_remaining) > 0: remaining[node_monitor] = daq_remaining if len(remaining) == 0: diff --git a/laboneq/controller/near_time_runner.py b/laboneq/controller/near_time_runner.py index b644553..2010350 100644 --- a/laboneq/controller/near_time_runner.py +++ b/laboneq/controller/near_time_runner.py @@ -11,10 +11,10 @@ from numpy import typing as npt from laboneq.controller.communication import ( - CachingStrategy, DaqNodeSetAction, batch_set, ) +from laboneq.controller.devices.device_zi import NodeCollector from laboneq.controller.protected_session import ProtectedSession from laboneq.controller.util import LabOneQControllerException, SweepParamsTracker from laboneq.core.exceptions import AbortExecution @@ -33,21 +33,19 @@ class NearTimeRunner(AsyncExecutorBase): def __init__(self, controller: Controller): super().__init__(looping_mode=LoopingMode.NEAR_TIME_ONLY) self.controller = controller - self.user_set_nodes = [] + self.user_set_nodes: list[DaqNodeSetAction] = [] self.nt_loop_indices: list[int] = [] - self.pipeline_chunk: int = 0 + self.pipeliner_job: int = 0 self.sweep_params_tracker = SweepParamsTracker() def nt_step(self) -> NtStepKey: return NtStepKey(indices=tuple(self.nt_loop_indices)) async def set_handler(self, path: str, value): + nc = NodeCollector() + nc.add(path, value, cache=False) dev = self.controller._devices.find_by_node_path(path) - self.user_set_nodes.append( - DaqNodeSetAction( - dev._daq, path, value, caching_strategy=CachingStrategy.NO_CACHE - ) - ) + self.user_set_nodes.extend(await dev.maybe_async(nc)) async def nt_callback_handler(self, func_name: str, args: dict[str, Any]): func = self.controller._neartime_callbacks.get(func_name) @@ -93,7 +91,7 @@ async def for_loop_entry_handler( ): if loop_flags.is_pipeline: # Don't add the pipeliner loop index to NT indices - self.pipeline_chunk = index + self.pipeliner_job = index return self.nt_loop_indices.append(index) @@ -111,7 +109,7 @@ async def rt_entry_handler( averaging_mode: AveragingMode, acquisition_type: AcquisitionType, ): - if self.pipeline_chunk > 0: + if self.pipeliner_job > 0: # Skip the pipeliner loop iterations, except the first one - iterated by the pipeliner itself return diff --git a/laboneq/controller/pipeliner_reload_tracker.py b/laboneq/controller/pipeliner_reload_tracker.py index 764b7d1..1167faf 100644 --- a/laboneq/controller/pipeliner_reload_tracker.py +++ b/laboneq/controller/pipeliner_reload_tracker.py @@ -25,10 +25,10 @@ def __init__(self): def calc_next_step( self, - pipeline_chunk: int, + pipeliner_job: int, rt_exec_step: RealtimeExecutionInit | None, ) -> RealtimeExecutionInit: - """Constructs the current RT chunk of a pipeline (PL) from recipe data + trace from previous NT steps + """Constructs the current RT job of a pipeliner (PLn) from recipe data + trace from previous NT steps Assuming similar sequence of pipeliner jobs for each near-time step, and that any potential differences between identical pipeliner jobs across near-time steps would likely be minor @@ -37,50 +37,49 @@ def calc_next_step( Legend for the table below: * * - Full data must be available from the recipe - * < - Inherit from the previous pipeliner chunk in the same NT step - * ^ - Inherit from the same pipeliner chunk of the previous NT step + * < - Inherit from the previous pipeliner job in the same NT step + * ^ - Inherit from the same pipeliner job of the previous NT step * <+, ^+ - Same as above, but also apply any updates from the recipe | NT step | PL0 | PL1 | PL2 | Comment | |--------:|:---:|:---:|:---:|:--------| - | 0 | * | < | < | Only 1st PL step data in recipe, subsequent steps inherit it - | 1 | ^ | ^ | ^ | No change since previous NT step, inherit previous PL entirely - | 2 | ^+ | < | < | Update from recipe for the 1st PL step, start filling PL again - | 3 | ^ | ^+ | ^ | Update from recipe for a PL step > 1 + | 0 | * | < | < | Only 1st pipeliner job data in recipe, subsequent jobs inherit it + | 1 | ^ | ^ | ^ | No change since previous NT step, inherit previous pipeliner entirely + | 2 | ^+ | < | < | Update from recipe for the 1st pipeliner job, start filling pipeliner again + | 3 | ^ | ^+ | ^ | Update from recipe for a pipeliner job > 1 """ - assert pipeline_chunk >= 0 + assert pipeliner_job >= 0 last_rt_exec_steps = self.last_rt_exec_steps if rt_exec_step is None: - # No update from recipe - if pipeline_chunk < len(last_rt_exec_steps): - # Reuse respective chunk from previous PL - rt_exec_step = last_rt_exec_steps[pipeline_chunk] + # No update from the recipe + if pipeliner_job < len(last_rt_exec_steps): + # Reuse respective job from previous NT step pipeliner + rt_exec_step = last_rt_exec_steps[pipeliner_job] elif ( - pipeline_chunk == len(last_rt_exec_steps) - and len(last_rt_exec_steps) > 0 + pipeliner_job == len(last_rt_exec_steps) and len(last_rt_exec_steps) > 0 ): - # Reuse previous PL chunk + # Reuse previous pipeliner job rt_exec_step = last_rt_exec_steps[-1] last_rt_exec_steps.append(rt_exec_step) else: - # Unknown previous pipeline chunk + # Unknown previous pipeliner job raise LabOneQControllerException( "Internal error: Could not determine the RT execution params." ) else: # Update from recipe - if pipeline_chunk == 0: - # New pipeline and update recipe - construct fresh PL + if pipeliner_job == 0: + # New pipeline and update recipe - construct fresh pipeliner if len(last_rt_exec_steps) > 0: rt_exec_step = _merge(last_rt_exec_steps[0], rt_exec_step) last_rt_exec_steps.clear() last_rt_exec_steps.append(rt_exec_step) - elif pipeline_chunk < len(last_rt_exec_steps): - # Amend previous NT step pipeline chunk - rt_exec_step = _merge(last_rt_exec_steps[pipeline_chunk], rt_exec_step) - last_rt_exec_steps[pipeline_chunk] = rt_exec_step - elif pipeline_chunk == len(last_rt_exec_steps): - # Amend previous pipeline chunk + elif pipeliner_job < len(last_rt_exec_steps): + # Amend previous NT step pipeline job + rt_exec_step = _merge(last_rt_exec_steps[pipeliner_job], rt_exec_step) + last_rt_exec_steps[pipeliner_job] = rt_exec_step + elif pipeliner_job == len(last_rt_exec_steps): + # Amend previous pipeline job rt_exec_step = _merge(last_rt_exec_steps[-1], rt_exec_step) last_rt_exec_steps.append(rt_exec_step) diff --git a/laboneq/controller/recipe_processor.py b/laboneq/controller/recipe_processor.py index 736a5be..47f8300 100644 --- a/laboneq/controller/recipe_processor.py +++ b/laboneq/controller/recipe_processor.py @@ -17,6 +17,7 @@ DeviceAttribute, ) from laboneq.controller.util import LabOneQControllerException +from laboneq.controller.versioning import SetupCaps from laboneq.core.types.enums.acquisition_type import AcquisitionType from laboneq.core.types.enums.averaging_mode import AveragingMode from laboneq.data.recipe import IO, Initialization, Recipe, SignalType @@ -84,8 +85,9 @@ class RtExecutionInfo: averages: int averaging_mode: AveragingMode acquisition_type: AcquisitionType - pipeliner_chunk_count: int + pipeliner_job_count: int | None pipeliner_repetitions: int + result_logger_pipelined: bool # signal id -> set of section ids acquire_sections: dict[str, set[str]] = field(default_factory=dict) @@ -94,6 +96,14 @@ class RtExecutionInfo: # TODO(2K): to be replaced by event-based calculation in the compiler signal_result_map: dict[str, list[str]] = field(default_factory=dict) + @property + def with_pipeliner(self) -> bool: + return self.pipeliner_job_count is not None + + @property + def pipeliner_jobs(self) -> int: + return self.pipeliner_job_count or 1 + def add_acquire_section(self, signal_id: str, section_id: str): self.acquire_sections.setdefault(signal_id, set()).add(section_id) @@ -238,17 +248,19 @@ def axis(self) -> npt.ArrayLike | list[npt.ArrayLike]: class _LoopsPreprocessor(ExecutorBase): - def __init__(self): + def __init__(self, setup_caps: SetupCaps): super().__init__(looping_mode=LoopingMode.ONCE) + self._setup_caps = setup_caps + self.result_shapes: HandleResultShapes = {} self.rt_execution_infos: RtExecutionInfos = {} - self.pipeliner_chunk_count: int = None - self.pipeliner_repetitions: int = None + self.pipeliner_job_count: int | None = None + self.pipeliner_repetitions: int | None = None self._loop_stack: list[_LoopStackEntry] = [] - self._current_rt_uid: str = None - self._current_rt_info: RtExecutionInfo = None + self._current_rt_uid: str | None = None + self._current_rt_info: RtExecutionInfo | None = None def _single_shot_axis(self) -> npt.ArrayLike: return np.linspace( @@ -297,7 +309,7 @@ def set_sw_param_handler( def for_loop_entry_handler(self, count: int, index: int, loop_flags: LoopFlags): if loop_flags.is_pipeline: - self.pipeliner_chunk_count = count + self.pipeliner_job_count = count self.pipeliner_repetitions = math.prod( len(l.axis_points) for l in self._loop_stack ) @@ -316,7 +328,7 @@ def for_loop_entry_handler(self, count: int, index: int, loop_flags: LoopFlags): def for_loop_exit_handler(self, count: int, index: int, loop_flags: LoopFlags): if loop_flags.is_pipeline: - self.pipeliner_chunk_count = None + self.pipeliner_job_count = None self.pipeliner_repetitions = None return @@ -336,8 +348,9 @@ def rt_entry_handler( averages=count, averaging_mode=averaging_mode, acquisition_type=acquisition_type, - pipeliner_chunk_count=self.pipeliner_chunk_count, + pipeliner_job_count=self.pipeliner_job_count, pipeliner_repetitions=self.pipeliner_repetitions, + result_logger_pipelined=self._setup_caps.result_logger_pipelined, ), ) @@ -434,6 +447,11 @@ def awg_key_by_acquire_signal(signal_id: str) -> AwgKey: awg_config.result_length = ( len(any_awg_signal_result_map) * mapping_repeats ) + if ( + rt_execution_info.with_pipeliner + and not rt_execution_info.result_logger_pipelined + ): + awg_config.result_length *= rt_execution_info.pipeliner_jobs return awg_configs @@ -482,6 +500,7 @@ def pre_process_compiled( scheduled_experiment: ScheduledExperiment, devices: DeviceCollection, execution: Statement, + setup_caps: SetupCaps, ) -> RecipeData: recipe = scheduled_experiment.recipe @@ -492,7 +511,7 @@ def pre_process_compiled( initialization.device_uid ].iq_settings = _pre_process_iq_settings_hdawg(initialization) - lp = _LoopsPreprocessor() + lp = _LoopsPreprocessor(setup_caps) lp.run(execution) rt_execution_infos = lp.rt_execution_infos diff --git a/laboneq/controller/versioning.py b/laboneq/controller/versioning.py index eee7036..c677b76 100644 --- a/laboneq/controller/versioning.py +++ b/laboneq/controller/versioning.py @@ -1,6 +1,8 @@ # Copyright 2022 Zurich Instruments AG # SPDX-License-Identifier: Apache-2.0 +from __future__ import annotations + from enum import Enum from functools import total_ordering @@ -8,9 +10,8 @@ @total_ordering class LabOneVersion(Enum): UNKNOWN = "unknown" - V_23_06 = "23.06" - V_23_10 = "23.10" - LATEST = V_23_10 + V_24_01 = "24.01" + LATEST = V_24_01 def __eq__(self, other): return float(self.value) == float(other.value) @@ -19,10 +20,19 @@ def __lt__(self, other): return float(self.value) < float(other.value) @classmethod - def cast_if_supported(cls, version: str) -> "LabOneVersion": + def cast_if_supported(cls, version: str) -> LabOneVersion: try: labone_version = LabOneVersion(version) except ValueError as e: err_msg = f"Version {version} is not supported by LabOne Q." raise ValueError(err_msg) from e return labone_version + + +class SetupCaps: + def __init__(self, version: LabOneVersion | None): + self._version = version or LabOneVersion.LATEST + + @property + def result_logger_pipelined(self) -> bool: + return True diff --git a/laboneq/data/calibration/__init__.py b/laboneq/data/calibration/__init__.py index 98aed0b..c4c7b1a 100644 --- a/laboneq/data/calibration/__init__.py +++ b/laboneq/data/calibration/__init__.py @@ -82,14 +82,25 @@ class Precompensation: FIR: FIRCompensation | None = None +class CancellationSource(EnumReprMixin, Enum): + INTERNAL = "internal" + EXTERNAL = "external" + + @dataclass class AmplifierPump: uid: str = None - pump_freq: float | Parameter | None = None + pump_frequency: float | Parameter | None = None pump_power: float | Parameter | None = None - cancellation: bool = True - alc_engaged: bool = True - use_probe: bool = False + pump_on: bool = True + pump_filter_on: bool = True + cancellation_on: bool = True + cancellation_phase: float | Parameter | None = None + cancellation_attenuation: float | Parameter | None = None + cancellation_source: CancellationSource = CancellationSource.INTERNAL + cancellation_source_frequency: float | None = None + alc_on: bool = True + probe_on: bool = False probe_frequency: float | Parameter | None = None probe_power: float | Parameter | None = None diff --git a/laboneq/data/compilation_job.py b/laboneq/data/compilation_job.py index 0d32dd8..a4cbb7a 100644 --- a/laboneq/data/compilation_job.py +++ b/laboneq/data/compilation_job.py @@ -18,6 +18,8 @@ SectionAlignment, ) +from laboneq.data.calibration import CancellationSource + if TYPE_CHECKING: from numpy.typing import ArrayLike from laboneq.core.types.enums.acquisition_type import AcquisitionType @@ -172,6 +174,7 @@ class SectionInfo: match_handle: str | None = None match_user_register: int | None = None match_prng_sample: str | None = None + match_sweep_parameter: ParameterInfo | None = None local: bool | None = None state: int | None = None prng: PRNGInfo | None = None @@ -241,11 +244,17 @@ class SignalRange: @dataclass class AmplifierPumpInfo: device: DeviceInfo | None = None - pump_freq: float | ParameterInfo | None = None + pump_frequency: float | ParameterInfo | None = None pump_power: float | ParameterInfo | None = None - cancellation: bool = True - alc_engaged: bool = True - use_probe: bool = False + pump_on: bool = True + pump_filter_on: bool = True + cancellation_on: bool = True + cancellation_phase: float | ParameterInfo | None = None + cancellation_attenuation: float | ParameterInfo | None = None + cancellation_source: CancellationSource = CancellationSource.INTERNAL + cancellation_source_frequency: float | None = None + alc_on: bool = True + probe_on: bool = False probe_frequency: float | ParameterInfo | None = None probe_power: float | ParameterInfo | None = None channel: int | None = None diff --git a/laboneq/data/experiment_description/__init__.py b/laboneq/data/experiment_description/__init__.py index 37ca4f2..4ff3bea 100644 --- a/laboneq/data/experiment_description/__init__.py +++ b/laboneq/data/experiment_description/__init__.py @@ -130,6 +130,7 @@ class Match(Section): handle: str | None = None user_register: Optional[int] = None prng_sample: PRNGSample | None = None + sweep_parameter: Parameter | None = None local: Optional[bool] = None diff --git a/laboneq/data/scheduled_experiment.py b/laboneq/data/scheduled_experiment.py index 4e059e0..e688c96 100644 --- a/laboneq/data/scheduled_experiment.py +++ b/laboneq/data/scheduled_experiment.py @@ -107,17 +107,17 @@ class ScheduledExperiment: #: Instructions to the controller for running the experiment. recipe: Recipe = None - #: Compiler arteficts specific to backend(s) + #: Compiler artifacts specific to backend(s) artifacts: CompilerArtifact | dict[int, CompilerArtifact] = None def __getattr__(self, attr): return getattr(self.artifacts, attr) # @IgnoreException def __copy__(self): - new_artefacts = copy.copy(self.artifacts) + new_artifacts = copy.copy(self.artifacts) new_scheduled_experiment = self.__class__( uid=self.uid, - artifacts=new_artefacts, + artifacts=new_artifacts, schedule=self.schedule, execution=self.execution, compilation_job_hash=self.compilation_job_hash, diff --git a/laboneq/dsl/calibration/__init__.py b/laboneq/dsl/calibration/__init__.py index ade42b1..6c1aacd 100644 --- a/laboneq/dsl/calibration/__init__.py +++ b/laboneq/dsl/calibration/__init__.py @@ -1,7 +1,7 @@ # Copyright 2022 Zurich Instruments AG # SPDX-License-Identifier: Apache-2.0 -from .amplifier_pump import AmplifierPump +from .amplifier_pump import AmplifierPump, CancellationSource from .calibratable import Calibratable from .calibration import Calibration from .calibration_item import CalibrationItem diff --git a/laboneq/dsl/calibration/amplifier_pump.py b/laboneq/dsl/calibration/amplifier_pump.py index 277bce1..d2ed8ea 100644 --- a/laboneq/dsl/calibration/amplifier_pump.py +++ b/laboneq/dsl/calibration/amplifier_pump.py @@ -6,6 +6,7 @@ from dataclasses import dataclass, field from laboneq.core.utilities.dsl_dataclass_decorator import classformatter +from laboneq.data.calibration import CancellationSource from laboneq.dsl.parameter import Parameter amplifier_pump_id = 0 @@ -27,36 +28,63 @@ class AmplifierPump: uid (str): Unique identifier. If left blank, a new unique ID will be generated. - pump_freq (float | Parameter | None): - Sets the pump frequency node. Default `None`. + pump_frequency (float | Parameter | None): + Set the pump frequency node. Default `None`. pump_power (float | Parameter | None): - Sets the pump power node. Default `None`. - cancellation (bool): + Set the pump power node. Units: dBm. Default `None`. + pump_on (bool): + Enable the pump tone. Default `True`. + pump_filter_on (bool): + Enable the integrated low-pass filter for the pump tone. Default: `True`. + cancellation_on (bool): Enable pump tone cancellation. Default `True`. - alc_engaged (bool): - Enable the automatic level control for pump - tone output. Default `True`. - use_probe (bool): + cancellation_phase (float | Parameter | None): + Set the phase shift of the cancellation tone. Units: radians. Default `None`. + cancellation_attenuation (float | Parameter | None): + Set the attenuation of the cancellation tone. Positive values _reduce_ the + cancellation tone power. Default `None`. + cancellation_source (CancellationSource): + Set the source of the cancellation tone. Default: internal. + cancellation_source_frequency (float | None): + Specify the cancellation tone frequency when using the *external* + cancellation tone generator. Leave at `None` when supplying the + cancellation tone internally (the frequency then matches that of the + pump tone). + alc_on (bool): + Enable the automatic level control for pump tone output. Default `True`. + probe_on (bool): Enable probe tone output. Default `False`. probe_frequency (float | Parameter | None): - Sets the frequency of the generated probe tone. - Units: Hz. Default: `None`. + Set the frequency of the generated probe tone. Required if `probe_on` is + `True`. Units: Hz. Default: `None`. probe_power (float | Parameter | None): - Sets the output power of the generated probe tone. + Set the output power of the generated probe tone. Units: dBm. Default: `None`. Notes: - If an attribute is set to `None`, the corresponding node - is not set. + If an attribute is set to `None`, the corresponding node is not set. + + !!! version-changed "Some fields were renamed in version 2.24.0" + + - `AmplifierPump.pump_freq` is now `AmplifierPump.pump_frequency` + - `AmplifierPump.pump_engaged` is now `AmplifierPump.pump_on` + - `AmplifierPump.alc_engaged` is now `AmplifierPump.alc_on` + - `AmplifierPump.use_probe` is now `AmplifierPump.probe_on` """ #: Unique identifier. If left blank, a new unique ID will be generated. uid: str = field(default_factory=amplifier_pump_id_generator) - pump_freq: float | Parameter | None = None + pump_frequency: float | Parameter | None = None pump_power: float | Parameter | None = None - cancellation: bool = True - alc_engaged: bool = True - use_probe: bool = False + pump_on: bool = True + pump_filter_on: bool = True + cancellation_on: bool = True + cancellation_phase: float | Parameter | None = None + cancellation_attenuation: float | Parameter | None = None + cancellation_source: CancellationSource = CancellationSource.INTERNAL + cancellation_source_frequency: float | None = None + alc_on: bool = True + probe_on: bool = False probe_frequency: float | Parameter | None = None probe_power: float | Parameter | None = None diff --git a/laboneq/dsl/calibration/precompensation.py b/laboneq/dsl/calibration/precompensation.py index 3a66454..4968ec0 100644 --- a/laboneq/dsl/calibration/precompensation.py +++ b/laboneq/dsl/calibration/precompensation.py @@ -91,8 +91,6 @@ def __post_init__(self): FutureWarning, stacklevel=2, ) - else: - self.clearing = HighPassCompensationClearing.RISE super().__post_init__() diff --git a/laboneq/dsl/experiment/builtins.py b/laboneq/dsl/experiment/builtins.py index 913c104..63ae8d3 100644 --- a/laboneq/dsl/experiment/builtins.py +++ b/laboneq/dsl/experiment/builtins.py @@ -110,6 +110,7 @@ def match( handle: str | None = None, user_register: int | None = None, prng_sample: PRNGSample | None = None, + sweep_parameter: Parameter | None = None, uid: str | None = None, play_after: str | list[str] | None = None, local: bool | None = None, @@ -118,6 +119,7 @@ def match( handle=handle, user_register=user_register, prng_sample=prng_sample, + sweep_parameter=sweep_parameter, uid=uid, play_after=play_after, local=local, @@ -292,7 +294,9 @@ def prng_setup(range: int, seed=1, uid=None): def prng_loop(prng: PRNG, count=1, uid=None): from laboneq.dsl.prng import PRNGSample - prng_sample = PRNGSample(uid=uid, prng=prng, count=count) + maybe_uid = {"uid": uid} if uid is not None else {} + + prng_sample = PRNGSample(prng=prng, count=count, **maybe_uid) return PRNGLoopContextManager(prng_sample, uid) diff --git a/laboneq/dsl/experiment/experiment.py b/laboneq/dsl/experiment/experiment.py index 52f67fa..3a9da07 100644 --- a/laboneq/dsl/experiment/experiment.py +++ b/laboneq/dsl/experiment/experiment.py @@ -1040,6 +1040,7 @@ def match_local( uid=uid, handle=handle, user_register=None, + sweep_parameter=None, play_after=play_after, local=True, ) @@ -1076,6 +1077,7 @@ def match_global( uid=uid, handle=handle, user_register=None, + sweep_parameter=None, play_after=play_after, local=False, ) @@ -1087,17 +1089,21 @@ def __init__( uid, handle, user_register, + sweep_parameter, local, play_after=None, ): self.exp = experiment - args = {"handle": handle} + args = { + "handle": handle, + "local": local, + "user_register": user_register, + "sweep_parameter": sweep_parameter, + } if uid is not None: args["uid"] = uid if play_after is not None: args["play_after"] = play_after - args["local"] = local - args["user_register"] = user_register self.section = Match(**args) @@ -1112,6 +1118,7 @@ def match( self, handle: str | None = None, user_register: int | None = None, + sweep_parameter: Parameter | None = None, uid: str | None = None, play_after: str | Section | list[str | Section] | None = None, ): @@ -1140,6 +1147,7 @@ def match( handle: A unique identifier string that allows to retrieve the acquired data. user_register: The user register to use for the match. + sweep_parameter: The sweep_parameter to use for the match. play_after: Play this section after the end of the section(s) with the given ID(s). Defaults to None. @@ -1149,6 +1157,7 @@ def match( uid=uid, handle=handle, user_register=user_register, + sweep_parameter=sweep_parameter, play_after=play_after, local=None, ) diff --git a/laboneq/dsl/experiment/section.py b/laboneq/dsl/experiment/section.py index daac328..c7a2bb9 100644 --- a/laboneq/dsl/experiment/section.py +++ b/laboneq/dsl/experiment/section.py @@ -496,6 +496,8 @@ class Match(Section): User register on which to match. prng_sample (PRNGSample | None): PRNG sample to match. + sweep_parameter (SweepParameter | None): + Sweep parameter to match. local (bool): Whether to fetch the codeword via the PQSC (`False`), SHFQC-internal bus (`True`) or automatic (`None`). @@ -519,6 +521,9 @@ class Match(Section): # PRNG sample prng_sample: PRNGSample | None = None + # Sweep parameter + sweep_parameter: Parameter | None = None + # Whether to fetch the codeword via the PQSC (False), SHFQC-internal bus (True) or automatic (None) local: bool | None = None @@ -563,24 +568,17 @@ class Case(Section): may only be added to a [Match][laboneq.dsl.experiment.section.Match] section and not to any other kind of section. - A [Case][laboneq.dsl.experiment.section.Case] may only contain - `PlayPulse` and `Delay` operations and not other kinds of operations - or sections. + Unless matching a sweep parameter, a [Case][laboneq.dsl.experiment.section.Case] + may only contain `PlayPulse` and `Delay` operations and not other kinds of + operations or sections. """ state: int = 0 - def add(self, obj: Operation): - """Add an operation the Case section. - - Arguments: - obj: - The `PlayPulse` or `Delay` operation to be added. - """ - if not isinstance(obj, (PlayPulse, Delay)): - raise LabOneQException( - f"Trying to add object to section {self.uid}. Only ``play`` and ``delay`` are allowed." - ) + def add(self, obj: Operation | Section): + """Add a child to the Case section.""" + if isinstance(obj, Case): + raise LabOneQException("Case blocks can only be added to match blocks.") super().add(obj) @classmethod diff --git a/laboneq/dsl/experiment/section_context.py b/laboneq/dsl/experiment/section_context.py index 94af5fb..3a6be9b 100644 --- a/laboneq/dsl/experiment/section_context.py +++ b/laboneq/dsl/experiment/section_context.py @@ -13,6 +13,7 @@ ExecutionType, RepetitionMode, ) +from laboneq.dsl import Parameter from laboneq.dsl.experiment.context import ( Context, peek_context, @@ -222,6 +223,7 @@ def __init__( handle: str | None = None, user_register: int | None = None, prng_sample: PRNGSample | None = None, + sweep_parameter: Parameter | None = None, uid=None, play_after=None, local=None, @@ -237,6 +239,8 @@ def __init__( kwargs["user_register"] = user_register if prng_sample is not None: kwargs["prng_sample"] = prng_sample + if sweep_parameter is not None: + kwargs["sweep_parameter"] = sweep_parameter if local is not None: kwargs["local"] = local super().__init__(kwargs=kwargs) diff --git a/laboneq/dsl/laboneq_facade.py b/laboneq/dsl/laboneq_facade.py index f494c22..41116fd 100644 --- a/laboneq/dsl/laboneq_facade.py +++ b/laboneq/dsl/laboneq_facade.py @@ -21,6 +21,7 @@ from laboneq.implementation.legacy_adapters.device_setup_converter import ( convert_device_setup_to_setup, ) +from laboneq.laboneq_logging import initialize_logging if TYPE_CHECKING: from laboneq.dsl.experiment.pulse import Pulse @@ -105,6 +106,4 @@ def init_logging(log_level=None, performance_log=None): if "pytest" not in sys.modules: # Only initialize logging outside pytest # pytest initializes the logging itself - ctrl.initialize_logging( - log_level=log_level, performance_log=performance_log - ) + initialize_logging(log_level=log_level, performance_log=performance_log) diff --git a/laboneq/dsl/parameter.py b/laboneq/dsl/parameter.py index 8adc2a2..92d7b2d 100644 --- a/laboneq/dsl/parameter.py +++ b/laboneq/dsl/parameter.py @@ -48,7 +48,7 @@ class Parameter(ABC): class _ParameterArithmeticMixin(NDArrayOperatorsMixin): - """A mixin that implments arithmetic using numpy's ufunc hooks. + """A mixin that implements arithmetic using numpy's ufunc hooks. Classes that include this mixin should provide a `.values` attribute or property that gives an [ArrayLike][] containing diff --git a/laboneq/dsl/serialization/serializer.py b/laboneq/dsl/serialization/serializer.py index 0829df5..cbeee72 100644 --- a/laboneq/dsl/serialization/serializer.py +++ b/laboneq/dsl/serialization/serializer.py @@ -131,6 +131,7 @@ def to_json_file(serializable_object, filename: str): def classes_by_short_name(): dsl_modules = [ "laboneq.dsl.experiment", + "laboneq.dsl.calibration.amplifier_pump", "laboneq.dsl.calibration.oscillator", "laboneq.dsl.calibration.signal_calibration", "laboneq.dsl.result.results", diff --git a/laboneq/dsl/session.py b/laboneq/dsl/session.py index 5222cbc..d9bdda5 100644 --- a/laboneq/dsl/session.py +++ b/laboneq/dsl/session.py @@ -78,7 +78,7 @@ class Session: def __init__( self, device_setup: DeviceSetup | None = None, - log_level: int = logging.INFO, + log_level: int = None, performance_log: bool = False, configure_logging: bool = True, _last_results=None, @@ -93,7 +93,8 @@ def __init__( of the object. log_level: Log level of the session. If no log level is specified, the session will use the logging.INFO level. - Other possible levels refer to the logging python package. + Other possible levels refer to the logging python package and + `laboneq.laboneq_logging`. performance_log: Flag to enable performance logging. When True, the system creates a separate logfile containing logs aimed to analyze system performance. configure_logging: @@ -117,8 +118,7 @@ def __init__( LabOneQFacade.init_logging( log_level=log_level, performance_log=performance_log ) - self._logger = logging.getLogger(__name__) - self._logger.setLevel(log_level) + self._logger = logging.getLogger("laboneq") else: self._logger = logging.getLogger("null") self._neartime_callbacks: Dict[str, Callable] = {} @@ -548,6 +548,10 @@ def log_level(self) -> int: """The current log level.""" return self._logger.level + @log_level.setter + def log_level(self, value: int): + self._logger.setLevel(value) + @property def logger(self): """The current logger instance used by the session.""" diff --git a/laboneq/implementation/legacy_adapters/calibration_converter.py b/laboneq/implementation/legacy_adapters/calibration_converter.py index 5e5db38..06aad73 100644 --- a/laboneq/implementation/legacy_adapters/calibration_converter.py +++ b/laboneq/implementation/legacy_adapters/calibration_converter.py @@ -120,10 +120,19 @@ def convert_amplifier_pump( return obj return calibration.AmplifierPump( uid=obj.uid, - pump_freq=convert_maybe_parameter(obj.pump_freq), + pump_on=obj.pump_on, + pump_frequency=convert_maybe_parameter(obj.pump_frequency), pump_power=convert_maybe_parameter(obj.pump_power), - alc_engaged=obj.alc_engaged, - use_probe=obj.use_probe, + pump_filter_on=obj.pump_filter_on, + cancellation_on=obj.cancellation_on, + cancellation_phase=convert_maybe_parameter(obj.cancellation_phase), + cancellation_attenuation=convert_maybe_parameter(obj.cancellation_attenuation), + cancellation_source=obj.cancellation_source, + cancellation_source_frequency=convert_maybe_parameter( + obj.cancellation_source_frequency + ), + alc_on=obj.alc_on, + probe_on=obj.probe_on, probe_frequency=convert_maybe_parameter(obj.probe_frequency), probe_power=convert_maybe_parameter(obj.probe_power), ) diff --git a/laboneq/implementation/legacy_adapters/converters_experiment_description.py b/laboneq/implementation/legacy_adapters/converters_experiment_description.py index 0fef136..6b58de6 100644 --- a/laboneq/implementation/legacy_adapters/converters_experiment_description.py +++ b/laboneq/implementation/legacy_adapters/converters_experiment_description.py @@ -246,6 +246,9 @@ def convert_Match(orig: MatchDSL): retval.prng_sample = ( convert_PRNGSample(orig.prng_sample) if orig.prng_sample is not None else None ) + retval.sweep_parameter = convert_dynamic( + orig.sweep_parameter, converter_function_directory + ) retval.local = orig.local retval.uid = orig.uid return post_process(orig, retval, converter_function_directory) diff --git a/laboneq/implementation/payload_builder/experiment_info_builder/experiment_info_builder.py b/laboneq/implementation/payload_builder/experiment_info_builder/experiment_info_builder.py index 2177865..7d9536b 100644 --- a/laboneq/implementation/payload_builder/experiment_info_builder/experiment_info_builder.py +++ b/laboneq/implementation/payload_builder/experiment_info_builder/experiment_info_builder.py @@ -275,11 +275,17 @@ def _load_amplifier_pump( self, amp_pump: AmplifierPump, channel ) -> AmplifierPumpInfo: return AmplifierPumpInfo( - pump_freq=self.opt_param(amp_pump.pump_freq, nt_only=True), + pump_frequency=self.opt_param(amp_pump.pump_frequency, nt_only=True), pump_power=self.opt_param(amp_pump.pump_power, nt_only=True), - cancellation=amp_pump.cancellation, - alc_engaged=amp_pump.alc_engaged, - use_probe=amp_pump.use_probe, + pump_on=amp_pump.pump_on, + pump_filter_on=amp_pump.pump_filter_on, + cancellation_on=amp_pump.cancellation_on, + cancellation_phase=self.opt_param(amp_pump.cancellation_phase), + cancellation_attenuation=self.opt_param(amp_pump.cancellation_attenuation), + cancellation_source=amp_pump.cancellation_source, + cancellation_source_frequency=amp_pump.cancellation_source_frequency, + alc_on=amp_pump.alc_on, + probe_on=amp_pump.probe_on, probe_frequency=self.opt_param(amp_pump.probe_frequency, nt_only=True), probe_power=self.opt_param(amp_pump.probe_power, nt_only=True), channel=channel, @@ -481,8 +487,8 @@ def _add_parameter( return param_info def opt_param( - self, value: float | int | complex | Parameter, nt_only=False - ) -> float | int | complex | ParameterInfo: + self, value: float | int | complex | None | Parameter, nt_only=False + ) -> float | int | complex | ParameterInfo | None: """Pass through numbers, but convert `Parameter` to `ParameterInfo` Args: @@ -842,6 +848,7 @@ def _load_section( local = None match_user_register = None match_prng_sample = None + match_sweep_parameter: ParameterInfo | None = None if isinstance(section, Match): match_handle = section.handle local = section.local @@ -849,6 +856,18 @@ def _load_section( match_prng_sample = ( section.prng_sample.uid if section.prng_sample is not None else None ) + match_sweep_parameter = self.opt_param( + section.sweep_parameter if section.sweep_parameter is not None else None + ) + if ( + match_handle is None + and match_user_register is None + and match_sweep_parameter is None + and match_prng_sample is None + ): + raise LabOneQException( + f"Match section '{section.uid}' requires a target (measurement handle, sweep parameter, ...)" + ) state = getattr(section, "state", None) this_acquisition_type = None @@ -871,6 +890,7 @@ def _load_section( match_handle=match_handle, match_user_register=match_user_register, match_prng_sample=match_prng_sample, + match_sweep_parameter=match_sweep_parameter, state=state, local=local, execution_type=execution_type, diff --git a/laboneq/controller/laboneq_logging.py b/laboneq/laboneq_logging.py similarity index 77% rename from laboneq/controller/laboneq_logging.py rename to laboneq/laboneq_logging.py index 072ee6d..83d6fbe 100644 --- a/laboneq/controller/laboneq_logging.py +++ b/laboneq/laboneq_logging.py @@ -260,3 +260,76 @@ def set_level(log_level: int | str = logging.INFO): initialize_logging(log_level=log_level) logging.getLogger().setLevel(log_level) logging.getLogger("laboneq").setLevel(log_level) + + +def add_logging_level(level_name, level_num, method_name=None): + """ + Add a new logging level to the `logging` module and the currently configured + logging class. + + Adapted from https://stackoverflow.com/a/35804945/6364667 + + `levelName` becomes an attribute of the `logging` module with the value + `levelNum`. `methodName` becomes a convenience method for both `logging` + itself and the class returned by `logging.getLoggerClass()` (usually just + `logging.Logger`). If `methodName` is not specified, `levelName.lower()` is + used. + + To avoid accidental clobberings of existing attributes, this method will + raise an `AttributeError` if the level name is already an attribute of the + `logging` module or if the method name is already present + + Example + ------- + >>> add_logging_level('TRACE', logging.DEBUG - 5) + >>> logging.getLogger(__name__).setLevel("TRACE") + >>> logging.getLogger(__name__).trace('that worked') + >>> logging.trace('so did this') + >>> logging.TRACE + 5 + + """ + if not method_name: + method_name = level_name.lower() + + def log_for_level(self, message, *args, **kwargs): + if self.isEnabledFor(level_num): + self._log(level_num, message, args, **kwargs) + + def log_to_root(message, *args, **kwargs): + logging.log(level_num, message, *args, **kwargs) + + if hasattr(logging, level_name): + logging.error("{} already defined in logging module".format(level_name)) + else: + setattr(logging, level_name, level_num) + + if hasattr(logging, method_name): + logging.error("{} already defined in logging module".format(method_name)) + else: + setattr(logging, method_name, log_to_root) + + if hasattr(logging.getLoggerClass(), method_name): + logging.error("{} already defined in logger class".format(method_name)) + else: + setattr(logging.getLoggerClass(), method_name, log_for_level) + + logging.addLevelName(level_num, level_name) + + +from logging import CRITICAL, ERROR, WARNING, INFO, DEBUG, NOTSET # noqa: F401 + +DIAGNOSTIC = logging.INFO - 5 + +add_logging_level("DIAGNOSTIC", DIAGNOSTIC) + + +class LabOneQLogger(logging.Logger): + """Stub, never instantiated""" + + def diagnostic(self, *args, **kwargs): + """Stub, actual implementation is patched into `logging.Logger`.""" + + +def get_logger(name: str) -> LabOneQLogger: + return logging.getLogger(name) # type: ignore diff --git a/laboneq/openqasm3/openqasm3_importer.py b/laboneq/openqasm3/openqasm3_importer.py index cf9cba6..2bbd1b8 100644 --- a/laboneq/openqasm3/openqasm3_importer.py +++ b/laboneq/openqasm3/openqasm3_importer.py @@ -5,7 +5,7 @@ import math import operator -from contextlib import contextmanager +from contextlib import contextmanager, nullcontext from typing import Any, Callable, Optional, TextIO, Union import openpulse @@ -934,3 +934,178 @@ def exp_from_qasm( exp.set_calibration(importer.implicit_calibration) return exp + + +def exp_from_qasm_list( + programs: list[str], + qubits: dict[str, Qubit], + gate_store: GateStore, + inputs: dict[str, Any] | None = None, + externs: dict[str, Callable] | None = None, + count: int = 1, + averaging_mode: AveragingMode = AveragingMode.CYCLIC, + acquisition_type: AcquisitionType = AcquisitionType.INTEGRATION, + repetition_time: float = 1e-3, + batch_execution_mode="pipeline", + do_reset=False, + pipeline_chunk_count: int | None = None, +) -> Experiment: + """Process a list of openQASM programs into a single LabOne Q experiment that + executes the QASM snippets sequentially. + + At this time, the QASM programs must not include any measurements. We automatically + append a measurement of all qubits to the end of each program. + + Optionally, a reset operation on all qubits is prepended to each program. The + duration between the reset and the final readout is fixed and must be specified as + `repetition_time`. It must be chosen large enough to accommodate the longest of the + programs. The `repetition_time` parameter is also required if the resets are + disabled. In a future version we hope to make an explicit `repetition_time` optional. + + For the measurement we require the gate store to be loaded with a `measurement` + gate. Similarly, the optional reset requires a `reset` gate to be available. + + Args: + programs: + the list of the QASM snippets + qubits: + Map from OpenQASM qubit names to LabOne Q DSL Qubit objects + gate_store: + Map from OpenQASM gate names to LabOne Q DSL Gate objects + inputs: + Inputs to the OpenQASM program. + externs: + Extern functions for the OpenQASM program. + count: + The number of acquire iterations. + averaging_mode: + The mode of how to average the acquired data. + acquisition_type: + The type of acquisition to perform. + repetition_time: + The length that any single program is padded to. + batch_execution_mode: + The execution mode for the sequence of programs. Can be any of the following: + + - "nt": The individual programs are dispatched by software. + - "pipeline": The individual programs are dispatched by the sequence pipeliner. + - "rt": All the programs are combined into a single real-time program. + + "rt" offers the fastest execution, but is limited by device memory. + In comparison, "pipeline" introduces non-deterministic delays between + programs of up to a few 100 microseconds. "nt" is the slowest. + do_reset: + If `True`, an active reset operation is added to the beginning of each program. + pipeline_chunk_count: + The number of pipeline chunks to divide the experiment into. + + The default chunk count is equal to the number of programs, so that there is one + program per pipeliner chunk. Future versions of LabOne Q may use a more + sophisticated default based on the program sizes. + + Currently the number of programs must be a multiple of the chunk count so that + there are the same number of programs in each chunk. This limitation will be + removed in a future release of LabOne Q. + + A `ValueError` is raised if the number of programs is not a multiple of the + chunk count. + + Returns: + The experiment generated from the OpenQASM programs. + """ + if batch_execution_mode == "pipeline": + if pipeline_chunk_count is None: + pipeline_chunk_count = len(programs) + if len(programs) % pipeline_chunk_count != 0: + # The underlying limitation is that the structure of the acquisitions + # must be the same in each chunk, because the compiled experiment + # recipe only supplies the acquisition information once, rather than + # once per chunk. Once the acquisition information has been moved to + # per-chunk execution information and the controller updated to apply + # this, then this restriction can be removed. + raise ValueError( + f"Number of programs ({len(programs)}) not divisible" + f" by pipeline_chunk_count ({pipeline_chunk_count})" + ) + + signals = [] + for qubit in qubits.values(): + for exp_signal in qubit.experiment_signals(): + if exp_signal in signals: + msg = f"Signal with id {exp_signal.uid} already assigned." + raise LabOneQException(msg) + signals.append(exp_signal) + + exp = Experiment(signals=signals) + experiment_index = LinearSweepParameter( + uid="index", start=0, stop=len(programs) - 1, count=len(programs) + ) + + if batch_execution_mode == "nt": + maybe_nt_sweep = exp.sweep(experiment_index) + else: + maybe_nt_sweep = nullcontext() + + with maybe_nt_sweep: + with exp.acquire_loop_rt( + count=count, + averaging_mode=averaging_mode, + acquisition_type=acquisition_type, + ): + sweep_kwargs = {} + if batch_execution_mode != "nt": + if batch_execution_mode == "pipeline": + # pipelined sweep with specified programs per chunk + sweep_kwargs["chunk_count"] = pipeline_chunk_count + maybe_rt_sweep = exp.sweep(experiment_index, **sweep_kwargs) + else: + maybe_rt_sweep = nullcontext() + + with maybe_rt_sweep: + if do_reset: + with exp.section(uid="qubit reset") as reset_section: + for qasm_qubit_name in qubits: + reset_section.add( + gate_store.lookup_gate("reset", (qasm_qubit_name,)) + ) + + with exp.section( + alignment=SectionAlignment.RIGHT, + length=repetition_time, + ): + with exp.match( + sweep_parameter=experiment_index, + ): + for i, program in enumerate(programs): + with exp.case(i) as c: + importer = OpenQasm3Importer( + qubits=qubits, + inputs=inputs, + externs=externs, + gate_store=gate_store, + ) + c.add(importer(text=program)) + + # read out all qubits + with exp.section(uid="qubit_readout") as readout_section: + for qasm_qubit_name, qubit in qubits.items(): + readout_section.add( + gate_store.lookup_gate( + "measure", + (qasm_qubit_name,), + kwargs={"handle": f"meas{qasm_qubit_name}"}, + ) + ) + with exp.section(): + # The next shot will immediately start with an active reset. + # SHFQA needs some time to process previous results before + # it can trigger the next measurement. So we add a delay + # here to have sufficient margin between the two readouts. + # In the future, we'll ideally not have resort to two + # measurements (one for readout, one for reset) in the + # first place. + exp.delay(qubit.signals["measure"], 500e-9) + + exp.set_calibration(importer.implicit_calibration) + + return exp diff --git a/laboneq/simple.py b/laboneq/simple.py index a8ca6d6..f1ba4fc 100644 --- a/laboneq/simple.py +++ b/laboneq/simple.py @@ -5,8 +5,8 @@ Convenience header for the LabOne Q project. """ +from laboneq import laboneq_logging from laboneq._token import install_token -from laboneq.controller import laboneq_logging from laboneq.core.types.compiled_experiment import CompiledExperiment from laboneq.dsl import LinearSweepParameter, SweepParameter from laboneq.dsl.calibration import ( @@ -14,26 +14,27 @@ BounceCompensation, Calibratable, Calibration, + CancellationSource, ExponentialCompensation, FIRCompensation, HighPassCompensation, MixerCalibration, Oscillator, - Precompensation, OutputRoute, + Precompensation, SignalCalibration, units, ) -from laboneq.dsl.device import create_connection, DeviceSetup +from laboneq.dsl.device import DeviceSetup, create_connection from laboneq.dsl.device.device_setup_helper import DeviceSetupHelper from laboneq.dsl.device.instruments import ( - SHFQC, + HDAWG, + PQSC, SHFPPC, - SHFSG, SHFQA, - HDAWG, + SHFQC, + SHFSG, UHFQA, - PQSC, ) from laboneq.dsl.enums import ( AcquisitionType, @@ -70,6 +71,10 @@ from laboneq.dsl.utils import has_onboard_lo from laboneq.implementation.data_storage.laboneq_database import DataStore from laboneq.openqasm3.gate_store import GateStore -from laboneq.openqasm3.openqasm3_importer import exp_from_qasm, ExternResult +from laboneq.openqasm3.openqasm3_importer import ( + ExternResult, + exp_from_qasm, + exp_from_qasm_list, +) from laboneq.pulse_sheet_viewer.pulse_sheet_viewer import show_pulse_sheet from laboneq.simulator.output_simulator import OutputSimulator diff --git a/laboneq/simulator/seqc_parser.py b/laboneq/simulator/seqc_parser.py index 009ad79..3075bc2 100644 --- a/laboneq/simulator/seqc_parser.py +++ b/laboneq/simulator/seqc_parser.py @@ -191,7 +191,6 @@ def parse_expression(item, runtime: SimpleRuntime): return runtime.setOscFreqByParam(func_name[4:], *args) else: raise RuntimeError(f"unknown function: {func_name}") - return # Skipping unknown function raise TreeWalkException("not an expression") @@ -322,6 +321,7 @@ class SeqCDescriptor: wave_index: dict[Any, Any] = None command_table: list[Any] = None is_spectroscopy: bool = False + feedback_command_table_offset: int = 0 class Operation(Enum): @@ -732,8 +732,7 @@ def playHold(self, length): def _waves_from_command_table_entry(self, ct_entry): if "waveform" not in ct_entry: return None, None - if "index" not in ct_entry["waveform"]: - return None, None + assert "index" in ct_entry["waveform"] wave_index = ct_entry["waveform"]["index"] known_wave = WaveRefInfo(assigned_index=wave_index) @@ -763,15 +762,18 @@ def executeTableEntry(self, ct_index, latency=None): ZSYNC_DATA_PQSC_REGISTER_HD = 0b10000000001 if ct_index == QA_DATA_PROCESSED_SG or ct_index == ZSYNC_DATA_PQSC_REGISTER_SG: assert self.descriptor.device_type == "SHFSG" - # todo(JL): Find a better index via the command table offset; take last for now - ct_index = self.descriptor.command_table[-1]["index"] + ct_index = self.descriptor.feedback_command_table_offset elif ct_index == ZSYNC_DATA_PQSC_REGISTER_HD: assert self.descriptor.device_type == "HDAWG" - # todo(JL): Find a better index via the command table offset; take last for now - ct_index = self.descriptor.command_table[-1]["index"] + ct_index = self.descriptor.feedback_command_table_offset ct_entry = self._command_table_by_index[ct_index] + if "waveform" in ct_entry and ct_entry["waveform"].get("playZero", False): + length = ct_entry["waveform"]["length"] + self.playZero(length) + return + wave_names, known_wave = self._waves_from_command_table_entry(ct_entry) ct_info = CommandTableEntryInfo.from_ct_entry(ct_entry) @@ -1080,6 +1082,7 @@ def analyze_recipe( sample_multiple=sample_multiple, sampling_rate=sampling_rate, output_port_delay=output_channel_delays.get(output_channels[0], 0.0), + feedback_command_table_offset=awg.command_table_match_offset, ) precompensation_info = output_channel_precompensation.get( diff --git a/pyproject.toml b/pyproject.toml index bb05bb1..29c2595 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -37,7 +37,7 @@ dependencies = [ "jsonschema", "matplotlib", "cycler", - "numpy", + "numpy<2.0", "openpulse", "openqasm3", "orjson", @@ -54,9 +54,10 @@ dependencies = [ "attrs", "sortedcontainers", "typing_extensions", - "zhinst-core~=23.10.52579", - "zhinst-toolkit~=0.6.2", - "zhinst-utils==0.3.6", + "zhinst-core~=24.1.54288", + "zhinst-toolkit~=0.6.3", + "zhinst-utils==0.4.0", + "zhinst-timing-models~= 24.1.54288", "unsync==1.4.0", ]