From 75d9c928d789f11d8163b154204a98e2445f85e9 Mon Sep 17 00:00:00 2001 From: Shuli Shu <31480676+multiphaseCFD@users.noreply.github.com> Date: Wed, 12 Jun 2024 16:26:41 -0400 Subject: [PATCH] Add `pybind11` and `Python` layers to `lightning.tensor` (#748) ### Before submitting Please complete the following checklist when submitting a PR: - [x] All new features must include a unit test. If you've fixed a bug or added code that should be tested, add a test to the [`tests`](../tests) directory! - [x] All new functions and code must be clearly commented and documented. If you do make documentation changes, make sure that the docs build and render correctly by running `make docs`. - [x] Ensure that the test suite passes, by running `make test`. - [x] Add a new entry to the `.github/CHANGELOG.md` file, summarizing the change, and including a link back to the PR. - [x] Ensure that code is properly formatted by running `make format`. When all the above are checked, delete everything above the dashed line and fill in the pull request template. ------------------------------------------------------------------------------------------------------------ **Context:** Add both `python` and `pybind` layer to the `lightning.tensor`-`mps` device. 1. Gates supported: Operators that have 1 or 2 wires or can be decomposed into 1-2 wires gates. `MultiRZ` gate is not supported. 2. Obs supported: All obs supported in `lightning` state vector backends except for `qml.Projector` and `SparseHamiltionian` 3. Both `qml.QubitStateVector` and `qml.StatePrep` are not supported. 4. `qml.state()` is not supported and the `state()` returns a `LightningStateTensor` object represents the whole compute graph 5. `qml.BasisState` only accepts array representation of a basis state. A `int` value can only supports up to `32` qubit system. 6. `qml.expval` is the only measurement `lightning.tensor` support. 7. Limited `gradient support` with `parameter-shift`. And a `100` qubits SEL circuit was tested on `A100 80 GB GPU` 8. `maxBondDim` is default as `128`. 9. `pip` editable installation with `PL_BACKEND="lightning_tensor" pip install -e .`, given that `cutensornet-cu12` is installed an `venv` env. Detailed instructions are on[ PR#756](https://github.com/PennyLaneAI/pennylane-lightning/pull/756). 10. `lightning.tensor` is also tested against all the tests in `tests/` if the tested feature is supported in `lightning.tensor`. The skipped tests are tasks for the next step. 11. [SC-62440] [SC-62439] [SC-60329] [SC-60325] **Description of the Change:** **Benefits:** **Possible Drawbacks:** **Related GitHub Issues:** --------- Co-authored-by: ringo-but-quantum Co-authored-by: Ali Asadi <10773383+maliasadi@users.noreply.github.com> Co-authored-by: Mudit Pandey Co-authored-by: Vincent Michaud-Rioux Co-authored-by: Vincent Michaud-Rioux Co-authored-by: Amintor Dusko <87949283+AmintorDusko@users.noreply.github.com> --- .github/CHANGELOG.md | 3 + .../workflows/tests_lmps_tncuda_python.yml | 208 +++++++++++ .github/workflows/tests_without_binary.yml | 6 +- CMakeLists.txt | 7 +- pennylane_lightning/core/_serialize.py | 31 +- pennylane_lightning/core/_version.py | 2 +- .../core/src/bindings/Bindings.cpp | 30 +- .../core/src/bindings/Bindings.hpp | 228 ++++++++---- .../core/src/bindings/BindingsBase.hpp | 57 +++ .../lightning_tensor/tncuda/CMakeLists.txt | 4 + .../lightning_tensor/tncuda/MPSTNCuda.hpp | 23 +- .../tncuda/bindings/CMakeLists.txt | 18 + .../tncuda/bindings/LTensorTNCudaBindings.hpp | 141 ++++++++ .../tncuda/gates/TNCudaGateCache.hpp | 7 +- .../tests/Test_MPSTNCuda_Expval.cpp | 42 ++- .../tncuda/observables/ObservablesTNCuda.hpp | 27 ++ .../utils/tncuda_utils/tncudaError.hpp | 2 +- .../lightning_tensor/_measurements.py | 131 +++++++ .../lightning_tensor/_tensornet.py | 194 ++++++++++ .../lightning_tensor/lightning_tensor.py | 257 +++++++++++-- requirements-dev.txt | 1 + setup.py | 14 +- tests/conftest.py | 10 +- .../test_adjoint_jacobian_class.py | 6 +- .../test_measurements_class.py | 4 +- .../test_state_vector_class.py | 6 +- .../lightning_tensor/test_gates_and_expval.py | 341 ++++++++++++++++++ .../lightning_tensor/test_lightning_tensor.py | 56 +-- .../test_measurements_class.py | 79 ++++ .../lightning_tensor/test_tensornet_class.py | 88 +++++ tests/new_api/test_device.py | 73 +++- tests/new_api/test_expval.py | 6 +- tests/new_api/test_var.py | 5 +- tests/test_adjoint_jacobian.py | 3 + tests/test_apply.py | 157 +++++++- tests/test_arrays.py | 2 +- tests/test_comparison.py | 4 + tests/test_device.py | 4 + tests/test_execute.py | 4 + tests/test_expval.py | 24 +- tests/test_gates.py | 6 + tests/test_measurements.py | 24 +- tests/test_measurements_sparse.py | 4 + tests/test_serialize.py | 58 ++- tests/test_templates.py | 84 ++++- tests/test_var.py | 4 + tests/test_vjp.py | 3 + 47 files changed, 2260 insertions(+), 228 deletions(-) create mode 100644 .github/workflows/tests_lmps_tncuda_python.yml create mode 100644 pennylane_lightning/core/src/simulators/lightning_tensor/tncuda/bindings/CMakeLists.txt create mode 100644 pennylane_lightning/core/src/simulators/lightning_tensor/tncuda/bindings/LTensorTNCudaBindings.hpp create mode 100644 pennylane_lightning/lightning_tensor/_measurements.py create mode 100644 pennylane_lightning/lightning_tensor/_tensornet.py create mode 100644 tests/lightning_tensor/test_gates_and_expval.py create mode 100644 tests/lightning_tensor/test_measurements_class.py create mode 100644 tests/lightning_tensor/test_tensornet_class.py diff --git a/.github/CHANGELOG.md b/.github/CHANGELOG.md index 1c8d12bd6d..a6f212876b 100644 --- a/.github/CHANGELOG.md +++ b/.github/CHANGELOG.md @@ -1,6 +1,9 @@ # Release 0.37.0-dev ### New features since last release +* Implement Python interface to the `lightning.tensor` device. + [(#748)](https://github.com/PennyLaneAI/pennylane-lightning/pull/748) + * Add `inverse` support for gate operations in `lightning.tensor` in the C++ layer. [(#753)](https://github.com/PennyLaneAI/pennylane-lightning/pull/753) diff --git a/.github/workflows/tests_lmps_tncuda_python.yml b/.github/workflows/tests_lmps_tncuda_python.yml new file mode 100644 index 0000000000..d281943cf5 --- /dev/null +++ b/.github/workflows/tests_lmps_tncuda_python.yml @@ -0,0 +1,208 @@ +name: Testing::Linux::x86_64::MPSTNCuda::Python +# TODO remove MPS from the workflow name and the workflow filename once exact is in. +# TODO remove the `pl_tensor_method` once exact TN is added. +on: + workflow_call: + inputs: + lightning-version: + type: string + required: true + description: The version of Lightning to use. Valid values are either 'release' (most recent release candidate), 'stable' (most recent git-tag) or 'latest' (most recent commit from master) + pennylane-version: + type: string + required: true + description: The version of PennyLane to use. Valid values are either 'release' (most recent release candidate), 'stable' (most recent git-tag) or 'latest' (most recent commit from master) + release: + pull_request: + paths-ignore: + - .github/** + - '!.github/workflows/tests_lmps_tncuda_python.yml' + - pennylane_lightning/core/src/simulators/lightning_kokkos/** + - pennylane_lightning/core/src/simulators/lightning_qubit/** + - pennylane_lightning/core/src/simulators/lightning_gpu/** + - pennylane_lightning/core/_version.py + - pennylane_lightning/lightning_gpu/** + - pennylane_lightning/lightning_qubit/** + - pennylane_lightning/lightning_kokkos/** + push: + branches: + - master + +env: + CI_CUDA_ARCH: 86 + COVERAGE_FLAGS: "--cov=pennylane_lightning --cov-report=term-missing --cov-report=xml:./coverage.xml --no-flaky-report -p no:warnings --tb=native" + GCC_VERSION: 11 + +concurrency: + group: tests_lmps_tncuda_python-${{ github.ref }}-${{ github.event }}-${{ inputs.lightning-version }}-${{ inputs.pennylane-version }} + cancel-in-progress: true + +jobs: + builddeps: + runs-on: + - self-hosted + - ubuntu-22.04 + - gpu + + strategy: + max-parallel: 1 + matrix: + os: [ubuntu-22.04] + pl_backend: ["lightning_tensor"] + cuda_version: ["12"] + + steps: + - name: Validate GPU version and installed compiler + run: | + source /etc/profile.d/modules.sh + module use /opt/modules + module load cuda/${{ matrix.cuda_version }} + echo "${PATH}" >> $GITHUB_PATH + echo "LD_LIBRARY_PATH=${LD_LIBRARY_PATH}" >> $GITHUB_ENV + nvcc --version + nvidia-smi + + pythontestswithMPSTNCuda: + if: ${{ !contains(fromJSON('["schedule", "workflow_dispatch"]'), github.event_name) }} + needs: [builddeps] + strategy: + matrix: + os: [ubuntu-22.04] + pl_backend: ["lightning_tensor"] + default_backend: ["lightning_qubit"] + pl_tensor_method: ["mps"] + pl_tensor_backend: ["cutensornet"] + cuda_version: ["12"] + + name: Python Tests (${{ matrix.pl_backend }}, method-${{ matrix.pl_tensor_method }}, backend-${{ matrix.pl_tensor_backend }}, cuda-${{ matrix.cuda_version }}) + runs-on: + - ${{ matrix.os }} + - self-hosted + - gpu + + steps: + - name: Validate GPU version and installed compiler + run: | + source /etc/profile.d/modules.sh + module use /opt/modules + module load cuda/${{ matrix.cuda_version }} + echo "${PATH}" >> $GITHUB_PATH + echo "LD_LIBRARY_PATH=${LD_LIBRARY_PATH}" >> $GITHUB_ENV + nvcc --version + nvidia-smi + + - name: Checkout PennyLane-Lightning-Tensor-MPS-TNCuda + uses: actions/checkout@v4 + with: + path: main + + - uses: actions/setup-python@v5 + name: Install Python + with: + python-version: '3.9' + + # Since the self-hosted runner can be re-used. It is best to set up all package + # installations in a virtual environment that gets cleaned at the end of each workflow run + - name: Setup Python virtual environment + id: setup_venv + env: + VENV_NAME: ${{ github.workspace }}/venv_${{ steps.setup_python.outputs.python-version }}_${{ github.sha }} + run: | + # Clear any pre-existing venvs + rm -rf venv_* + + # Create new venv for this workflow_run + python --version + python -m venv ${{ env.VENV_NAME }} + + # Add the venv to PATH for subsequent steps + echo ${{ env.VENV_NAME }}/bin >> $GITHUB_PATH + + # Adding venv name as an output for subsequent steps to reference if needed + echo "venv_name=${{ env.VENV_NAME }}" >> $GITHUB_OUTPUT + + - name: Display Python-Path + id: python_path + run: | + py_path=$(which python) + echo "Python Interpreter Path => $py_path" + echo "python=$py_path" >> $GITHUB_OUTPUT + + pip_path=$(which python) + echo "PIP Path => $py_path" + echo "pip=$py_path" >> $GITHUB_OUTPUT + + - name: Install required packages + run: | + cd main + python -m pip install -r requirements-dev.txt + python -m pip install ninja cmake scipy custatevec-cu${{ matrix.cuda_version }} cutensornet-cu${{ matrix.cuda_version }} openfermionpyscf + + - name: Checkout PennyLane for release build + if: inputs.pennylane-version == 'release' + uses: actions/checkout@v4 + with: + path: pennylane + repository: PennyLaneAI/pennylane + + - name: Switch to release build of PennyLane + if: inputs.pennylane-version == 'release' + run: | + cd pennylane + git fetch --all + git checkout $(git branch -a --list "origin/v*rc*" | tail -1) + python -m pip uninstall -y pennylane && python -m pip install . -vv --no-deps + + - name: Install Stable PennyLane + if: inputs.pennylane-version == 'stable' + run: | + cd main + python -m pip uninstall -y pennylane && python -m pip install -U pennylane + + - name: Build and install package + run: | + cd main + rm -rf build + CMAKE_ARGS="-DPL_BACKEND=${{ matrix.default_backend }} -DCMAKE_CXX_COMPILER=$(which g++-$GCC_VERSION)" \ + python -m pip install . -vv + rm -rf build + + rm -rf build + CMAKE_ARGS="-DPL_BACKEND=${{ matrix.pl_backend }} -DCMAKE_CXX_COMPILER=$(which g++-$GCC_VERSION)" \ + python -m pip install . -vv + + - name: Run PennyLane-Lightning-Tensor unit tests + if: ${{ matrix.pl_backend != 'all'}} + run: | + cd main/ + DEVICENAME=`echo ${{ matrix.pl_backend }} | sed "s/_/./g"` + PL_DEVICE=${DEVICENAME} python -m pytest tests $COVERAGE_FLAGS + mv coverage.xml coverage-${{ github.job }}-${{ matrix.pl_backend }}.xml + + - name: Upload code coverage results + uses: actions/upload-artifact@v3 + with: + name: ubuntu-codecov-results-python + path: ./main/coverage-${{ github.job }}-${{ matrix.pl_backend }}.xml + if-no-files-found: error + + + upload-to-codecov-linux-python: + needs: [pythontestswithMPSTNCuda] + name: Upload coverage data to codecov + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Download coverage reports + uses: actions/download-artifact@v3 + with: + name: ubuntu-codecov-results-python + + - name: Upload to Codecov + uses: codecov/codecov-action@v4 + with: + fail_ci_if_error: true + verbose: true + token: ${{ secrets.CODECOV_TOKEN }} diff --git a/.github/workflows/tests_without_binary.yml b/.github/workflows/tests_without_binary.yml index e7578e3050..959995ae86 100644 --- a/.github/workflows/tests_without_binary.yml +++ b/.github/workflows/tests_without_binary.yml @@ -37,7 +37,7 @@ jobs: runs-on: ${{ needs.determine_runner.outputs.runner_group }} strategy: matrix: - pl_backend: ["lightning_qubit", "lightning_kokkos", "lightning_gpu"] + pl_backend: ["lightning_qubit", "lightning_kokkos", "lightning_gpu", "lightning_tensor"] name: Python Tests without Binary (${{ matrix.pl_backend }}) @@ -97,7 +97,7 @@ jobs: python -m pip uninstall -y pennylane && python -m pip install -U pennylane - name: Install the pennylane_lightning package - if: ${{ matrix.pl_backend == 'lightning_kokkos' || matrix.pl_backend == 'lightning_gpu'}} + if: ${{ contains(fromJson('["lightning_kokkos", "lightning_gpu", "lightning_tensor"]'), matrix.pl_backend) }} run: | cd main SKIP_COMPILATION=True PL_BACKEND="lightning_qubit" python -m pip install . -vv @@ -110,7 +110,7 @@ jobs: cd main python -m pip install . -vv - - name: Run PennyLane-Lightning unit tests + - name: Run PennyLane-Lightning unit tests for all backends run: | cd main/ DEVICENAME=`echo ${{ matrix.pl_backend }} | sed "s/_/./g"` diff --git a/CMakeLists.txt b/CMakeLists.txt index 344378e95d..a7d0b84f10 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -156,7 +156,7 @@ if(ENABLE_PYTHON) pybind11_add_module("${PL_BACKEND}_ops" "pennylane_lightning/core/src/bindings/Bindings.cpp") # Allow pip installation of cuQuantum & CUDA 12 libs to be accessible without setting LD_LIBRARY_PATH for lightning_gpu - if("${PL_BACKEND}" STREQUAL "lightning_gpu") + if("${PL_BACKEND}" STREQUAL "lightning_gpu" OR "${PL_BACKEND}" STREQUAL "lightning_tensor") set(CMAKE_BUILD_RPATH_USE_ORIGIN ON) set_target_properties("${PL_BACKEND}_ops" PROPERTIES BUILD_RPATH "$ORIGIN/../cuquantum/lib:$ORIGIN/../nvidia/cuda_runtime/lib:$ORIGIN/../nvidia/cublas/lib:$ORIGIN/../nvidia/cusparse/lib:$ORIGIN") endif() @@ -171,12 +171,15 @@ if(ENABLE_PYTHON) ) target_link_libraries("${PL_BACKEND}_ops" PRIVATE ${PL_BACKEND} #simulator - "${PL_BACKEND}_algorithms" "${PL_BACKEND}_observables" "${PL_BACKEND}_bindings" "${PL_BACKEND}_measurements" ) + if(NOT DEFINED PL_TENSOR) + target_link_libraries("${PL_BACKEND}_ops" PRIVATE "${PL_BACKEND}_algorithms") + endif() + set_target_properties("${PL_BACKEND}_ops" PROPERTIES CXX_VISIBILITY_PRESET hidden) target_compile_definitions("${PL_BACKEND}_ops" PRIVATE VERSION_INFO=${VERSION_STRING}) endif() diff --git a/pennylane_lightning/core/_serialize.py b/pennylane_lightning/core/_serialize.py index a220b12226..b056e7fba8 100644 --- a/pennylane_lightning/core/_serialize.py +++ b/pennylane_lightning/core/_serialize.py @@ -58,7 +58,7 @@ class QuantumScriptSerializer: """ - # pylint: disable=import-outside-toplevel, too-many-instance-attributes, c-extension-no-member + # pylint: disable=import-outside-toplevel, too-many-instance-attributes, c-extension-no-member, too-many-branches, too-many-statements def __init__( self, device_name, use_csingle: bool = False, use_mpi: bool = False, split_obs: bool = False ): @@ -86,10 +86,23 @@ def __init__( raise ImportError( f"Pre-compiled binaries for {device_name} are not available." ) from exception + elif device_name == "lightning.tensor": + try: + import pennylane_lightning.lightning_tensor_ops as lightning_ops + except ImportError as exception: + raise ImportError( + f"Pre-compiled binaries for {device_name} are not available." + ) from exception else: raise DeviceError(f'The device name "{device_name}" is not a valid option.') - self.statevector_c64 = lightning_ops.StateVectorC64 - self.statevector_c128 = lightning_ops.StateVectorC128 + + if device_name == "lightning.tensor": + self.tensornetwork_c64 = lightning_ops.TensorNetC64 + self.tensornetwork_c128 = lightning_ops.TensorNetC128 + else: + self.statevector_c64 = lightning_ops.StateVectorC64 + self.statevector_c128 = lightning_ops.StateVectorC128 + self.named_obs_c64 = lightning_ops.observables.NamedObsC64 self.named_obs_c128 = lightning_ops.observables.NamedObsC128 self.hermitian_obs_c64 = lightning_ops.observables.HermitianObsC64 @@ -98,8 +111,10 @@ def __init__( self.tensor_prod_obs_c128 = lightning_ops.observables.TensorProdObsC128 self.hamiltonian_c64 = lightning_ops.observables.HamiltonianC64 self.hamiltonian_c128 = lightning_ops.observables.HamiltonianC128 - self.sparse_hamiltonian_c64 = lightning_ops.observables.SparseHamiltonianC64 - self.sparse_hamiltonian_c128 = lightning_ops.observables.SparseHamiltonianC128 + + if device_name != "lightning.tensor": + self.sparse_hamiltonian_c64 = lightning_ops.observables.SparseHamiltonianC64 + self.sparse_hamiltonian_c128 = lightning_ops.observables.SparseHamiltonianC128 self._use_mpi = use_mpi @@ -134,6 +149,8 @@ def sv_type(self): """State vector matching ``use_csingle`` precision (and MPI if it is supported).""" if self._use_mpi: return self.statevector_mpi_c64 if self.use_csingle else self.statevector_mpi_c128 + if self.device_name == "lightning.tensor": + return self.tensornetwork_c64 if self.use_csingle else self.tensornetwork_c128 return self.statevector_c64 if self.use_csingle else self.statevector_c128 @property @@ -286,6 +303,10 @@ def _ob(self, observable, wires_map: dict = None): if isinstance(observable, OP_MATH_OBS): return self._hamiltonian(observable, wires_map) if isinstance(observable, SparseHamiltonian): + if self.device_name == "lightning.tensor": + raise NotImplementedError( + "SparseHamiltonian is not supported on the lightning.tensor device." + ) return self._sparse_hamiltonian(observable, wires_map) return self._hermitian_ob(observable, wires_map) diff --git a/pennylane_lightning/core/_version.py b/pennylane_lightning/core/_version.py index 4809ce72fa..9516c8c6a1 100644 --- a/pennylane_lightning/core/_version.py +++ b/pennylane_lightning/core/_version.py @@ -16,4 +16,4 @@ Version number (major.minor.patch[-label]) """ -__version__ = "0.37.0-dev32" +__version__ = "0.37.0-dev33" diff --git a/pennylane_lightning/core/src/bindings/Bindings.cpp b/pennylane_lightning/core/src/bindings/Bindings.cpp index 425a5ea096..2733e6f5e0 100644 --- a/pennylane_lightning/core/src/bindings/Bindings.cpp +++ b/pennylane_lightning/core/src/bindings/Bindings.cpp @@ -1,4 +1,4 @@ -// Copyright 2018-2023 Xanadu Quantum Technologies Inc. +// Copyright 2018-2024 Xanadu Quantum Technologies Inc. // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -28,17 +28,20 @@ #define LIGHTNING_MODULE_NAME lightning_kokkos_ops #elif _ENABLE_PLGPU == 1 #define LIGHTNING_MODULE_NAME lightning_gpu_ops +#elif _ENABLE_PLTENSOR == 1 +#define LIGHTNING_TENSOR_MODULE_NAME lightning_tensor_ops #endif -#if defined(LIGHTNING_MODULE_NAME) /// @cond DEV namespace { using namespace Pennylane; } // namespace /// @endcond +#if defined(LIGHTNING_MODULE_NAME) /** - * @brief Add C++ classes, methods and functions to Python module. + * @brief Add Lightning State-vector C++ classes, methods and functions to + * Python module. */ PYBIND11_MODULE( LIGHTNING_MODULE_NAME, // NOLINT: No control over Pybind internals @@ -65,4 +68,23 @@ PYBIND11_MODULE( #endif } -#endif \ No newline at end of file +#endif + +#if defined(LIGHTNING_TENSOR_MODULE_NAME) +/** + * @brief Add LightningTensor C++ classes, methods and functions to Python + * module. + */ +PYBIND11_MODULE( + LIGHTNING_TENSOR_MODULE_NAME, // NOLINT: No control over Pybind internals + m) { + // Suppress doxygen autogenerated signatures + pybind11::options options; + options.disable_function_signatures(); + + // Register bindings for backend-specific info: + registerBackendSpecificInfo(m); + + registerLightningTensorClassBindings(m); +} +#endif diff --git a/pennylane_lightning/core/src/bindings/Bindings.hpp b/pennylane_lightning/core/src/bindings/Bindings.hpp index f5aa305828..fe72f1bb86 100644 --- a/pennylane_lightning/core/src/bindings/Bindings.hpp +++ b/pennylane_lightning/core/src/bindings/Bindings.hpp @@ -86,6 +86,22 @@ using namespace Pennylane::LightningGPU::Measures; } // namespace /// @endcond +#elif _ENABLE_PLTENSOR == 1 + +#include "LTensorTNCudaBindings.hpp" +#include "MeasurementsTNCuda.hpp" +#include "ObservablesTNCuda.hpp" +#include "Util.hpp" + +namespace py = pybind11; + +/// @cond DEV +namespace { +using namespace Pennylane::LightningTensor::TNCuda; +using namespace Pennylane::LightningTensor::TNCuda::Observables; +using namespace Pennylane::LightningTensor::TNCuda::Measures; +} // namespace + /// @endcond #else static_assert(false, "Backend not found."); @@ -289,16 +305,18 @@ void registerInfo(py::module_ &m) { /** * @brief Register observable classes. * - * @tparam StateVectorT + * @tparam LightningBackendT * @param m Pybind module */ -template +template void registerBackendAgnosticObservables(py::module_ &m) { using PrecisionT = - typename StateVectorT::PrecisionT; // Statevector's precision. + typename LightningBackendT::PrecisionT; // LightningBackendT's's + // precision. using ComplexT = - typename StateVectorT::ComplexT; // Statevector's complex type. - using ParamT = PrecisionT; // Parameter's data precision + typename LightningBackendT::ComplexT; // LightningBackendT's + // complex type. + using ParamT = PrecisionT; // Parameter's data precision const std::string bitsize = std::to_string(sizeof(std::complex) * 8); @@ -306,122 +324,119 @@ void registerBackendAgnosticObservables(py::module_ &m) { using np_arr_c = py::array_t, py::array::c_style>; using np_arr_r = py::array_t; +#ifdef _ENABLE_PLTENSOR + using ObservableT = ObservableTNCuda; + using NamedObsT = NamedObsTNCuda; + using HermitianObsT = HermitianObsTNCuda; + using TensorProdObsT = TensorProdObsTNCuda; + using HamiltonianT = HamiltonianTNCuda; +#else + using ObservableT = Observable; + using NamedObsT = NamedObs; + using HermitianObsT = HermitianObs; + using TensorProdObsT = TensorProdObs; + using HamiltonianT = Hamiltonian; +#endif + std::string class_name; class_name = "ObservableC" + bitsize; - py::class_, - std::shared_ptr>>(m, class_name.c_str(), + py::class_>(m, class_name.c_str(), py::module_local()); class_name = "NamedObsC" + bitsize; - py::class_, std::shared_ptr>, - Observable>(m, class_name.c_str(), - py::module_local()) + py::class_, ObservableT>( + m, class_name.c_str(), py::module_local()) .def(py::init( [](const std::string &name, const std::vector &wires) { - return NamedObs(name, wires); + return NamedObsT(name, wires); })) - .def("__repr__", &NamedObs::getObsName) - .def("get_wires", &NamedObs::getWires, - "Get wires of observables") + .def("__repr__", &NamedObsT::getObsName) + .def("get_wires", &NamedObsT::getWires, "Get wires of observables") .def( "__eq__", - [](const NamedObs &self, py::handle other) -> bool { - if (!py::isinstance>(other)) { + [](const NamedObsT &self, py::handle other) -> bool { + if (!py::isinstance(other)) { return false; } - auto other_cast = other.cast>(); + auto other_cast = other.cast(); return self == other_cast; }, "Compare two observables"); class_name = "HermitianObsC" + bitsize; - py::class_, - std::shared_ptr>, - Observable>(m, class_name.c_str(), - py::module_local()) - .def(py::init( - [](const np_arr_c &matrix, const std::vector &wires) { - auto buffer = matrix.request(); - const auto *ptr = static_cast(buffer.ptr); - return HermitianObs( - std::vector(ptr, ptr + buffer.size), wires); - })) - .def("__repr__", &HermitianObs::getObsName) - .def("get_wires", &HermitianObs::getWires, - "Get wires of observables") - .def("get_matrix", &HermitianObs::getMatrix, + py::class_, ObservableT>( + m, class_name.c_str(), py::module_local()) + .def(py::init([](const np_arr_c &matrix, + const std::vector &wires) { + auto buffer = matrix.request(); + const auto *ptr = static_cast(buffer.ptr); + return HermitianObsT(std::vector(ptr, ptr + buffer.size), + wires); + })) + .def("__repr__", &HermitianObsT::getObsName) + .def("get_wires", &HermitianObsT::getWires, "Get wires of observables") + .def("get_matrix", &HermitianObsT::getMatrix, "Get matrix representation of Hermitian operator") .def( "__eq__", - [](const HermitianObs &self, - py::handle other) -> bool { - if (!py::isinstance>(other)) { + [](const HermitianObsT &self, py::handle other) -> bool { + if (!py::isinstance(other)) { return false; } - auto other_cast = other.cast>(); + auto other_cast = other.cast(); return self == other_cast; }, "Compare two observables"); class_name = "TensorProdObsC" + bitsize; - py::class_, - std::shared_ptr>, - Observable>(m, class_name.c_str(), - py::module_local()) - .def(py::init( - [](const std::vector>> - &obs) { return TensorProdObs(obs); })) - .def("__repr__", &TensorProdObs::getObsName) - .def("get_wires", &TensorProdObs::getWires, - "Get wires of observables") - .def("get_ops", &TensorProdObs::getObs, - "Get operations list") + py::class_, ObservableT>( + m, class_name.c_str(), py::module_local()) + .def(py::init([](const std::vector> &obs) { + return TensorProdObsT(obs); + })) + .def("__repr__", &TensorProdObsT::getObsName) + .def("get_wires", &TensorProdObsT::getWires, "Get wires of observables") + .def("get_ops", &TensorProdObsT::getObs, "Get operations list") .def( "__eq__", - [](const TensorProdObs &self, - py::handle other) -> bool { - if (!py::isinstance>(other)) { + [](const TensorProdObsT &self, py::handle other) -> bool { + if (!py::isinstance(other)) { return false; } - auto other_cast = other.cast>(); + auto other_cast = other.cast(); return self == other_cast; }, "Compare two observables"); class_name = "HamiltonianC" + bitsize; - using ObsPtr = std::shared_ptr>; - py::class_, - std::shared_ptr>, - Observable>(m, class_name.c_str(), - py::module_local()) + using ObsPtr = std::shared_ptr; + py::class_, ObservableT>( + m, class_name.c_str(), py::module_local()) .def(py::init( [](const np_arr_r &coeffs, const std::vector &obs) { auto buffer = coeffs.request(); const auto ptr = static_cast(buffer.ptr); - return Hamiltonian{ - std::vector(ptr, ptr + buffer.size), obs}; + return HamiltonianT{std::vector(ptr, ptr + buffer.size), obs}; })) - .def("__repr__", &Hamiltonian::getObsName) - .def("get_wires", &Hamiltonian::getWires, - "Get wires of observables") - .def("get_ops", &Hamiltonian::getObs, + .def("__repr__", &HamiltonianT::getObsName) + .def("get_wires", &HamiltonianT::getWires, "Get wires of observables") + .def("get_ops", &HamiltonianT::getObs, "Get operations contained by Hamiltonian") - .def("get_coeffs", &Hamiltonian::getCoeffs, + .def("get_coeffs", &HamiltonianT::getCoeffs, "Get Hamiltonian coefficients") .def( "__eq__", - [](const Hamiltonian &self, - py::handle other) -> bool { - if (!py::isinstance>(other)) { + [](const HamiltonianT &self, py::handle other) -> bool { + if (!py::isinstance(other)) { return false; } - auto other_cast = other.cast>(); + auto other_cast = other.cast(); return self == other_cast; }, "Compare two observables"); } - +#ifndef _ENABLE_PLTENSOR /** * @brief Register agnostic measurements class functionalities. * @@ -699,4 +714,79 @@ void registerLightningClassBindings(py::module_ &m) { m, "LightningException"); } } + +#elif _ENABLE_PLTENSOR == 1 +/** + * @brief Register lightning.tensor measurements class functionalities. + * + * @tparam TensorNetT + * @tparam PyClass + * @param pyclass Pybind11's measurements class to bind methods. + */ +template +void registerLightningTensorBackendAgnosticMeasurements(PyClass &pyclass) { + using MeasurementsT = MeasurementsTNCuda; + using ObservableT = ObservableTNCuda; + pyclass.def( + "expval", + [](MeasurementsT &M, const std::shared_ptr &ob) { + return M.expval(*ob); + }, + "Expected value of an observable object."); +} + +/** + * @brief Templated class to build lightning.tensor class bindings. + * + * @tparam TensorNetT Tensor network type + * @param m Pybind11 module. + */ +template void lightningTensorClassBindings(py::module_ &m) { + using PrecisionT = + typename TensorNetT::PrecisionT; // TensorNet's precision. + // Enable module name to be based on size of complex datatype + const std::string bitsize = + std::to_string(sizeof(std::complex) * 8); + + //***********************************************************************// + // TensorNet + //***********************************************************************// + std::string class_name = "TensorNetC" + bitsize; + auto pyclass = + py::class_(m, class_name.c_str(), py::module_local()); + + registerBackendClassSpecificBindings(pyclass); + + //***********************************************************************// + // Observables + //***********************************************************************// + /* Observables submodule */ + py::module_ obs_submodule = + m.def_submodule("observables", "Submodule for observables classes."); + registerBackendAgnosticObservables(obs_submodule); + + //***********************************************************************// + // Measurements + //***********************************************************************// + class_name = "MeasurementsC" + bitsize; + auto pyclass_measurements = py::class_>( + m, class_name.c_str(), py::module_local()); + + pyclass_measurements.def(py::init()); + registerLightningTensorBackendAgnosticMeasurements( + pyclass_measurements); +} + +template +void registerLightningTensorClassBindings(py::module_ &m) { + if constexpr (!std::is_same_v) { + using TensorNetT = typename TypeList::Type; + lightningTensorClassBindings(m); + registerLightningTensorClassBindings(m); + py::register_local_exception( + m, "LightningException"); + } +} +#endif + } // namespace Pennylane diff --git a/pennylane_lightning/core/src/bindings/BindingsBase.hpp b/pennylane_lightning/core/src/bindings/BindingsBase.hpp index 996917105d..793a970816 100644 --- a/pennylane_lightning/core/src/bindings/BindingsBase.hpp +++ b/pennylane_lightning/core/src/bindings/BindingsBase.hpp @@ -76,4 +76,61 @@ void registerGatesForStateVector(PyClass &pyclass) { pyclass.def(gate_name.c_str(), func, doc.c_str()); }); } + +// TODO: Unify registerTensor and registerGatesForStateVector +/** + * @brief Register matrix. + */ +template +void registerTensor( + TensorNetT &tensor_network, + const py::array_t, + py::array::c_style | py::array::forcecast> &matrix, + const std::vector &wires, bool inverse = false) { + using ComplexT = typename TensorNetT::ComplexT; + const auto m_buffer = matrix.request(); + std::vector conv_matrix; + if (m_buffer.size) { + const auto m_ptr = static_cast(m_buffer.ptr); + conv_matrix = std::vector{m_ptr, m_ptr + m_buffer.size}; + } + tensor_network.applyOperation("applyMatrix", wires, inverse, {}, + conv_matrix); +} + +// TODO: Unify registerGatesForTensorNet and registerMatrix +/** + * @brief Register TensorNet class to pybind. + * + * @tparam TensorNetT Tensor network type to register + * @tparam Pyclass Pybind11's class object type + * + * @param pyclass Pybind11's class object to bind tensor network + */ +template +void registerGatesForTensorNet(PyClass &pyclass) { + using PrecisionT = typename TensorNetT::PrecisionT; // TensorNet's precision + using ParamT = PrecisionT; // Parameter's data precision + + using Pennylane::Gates::GateOperation; + using Pennylane::Util::for_each_enum; + namespace Constant = Pennylane::Gates::Constant; + + pyclass.def("applyMatrix", ®isterTensor, + "Apply a given matrix to wires."); + + for_each_enum([&pyclass](GateOperation gate_op) { + using Pennylane::Util::lookup; + const auto gate_name = + std::string(lookup(Constant::gate_names, gate_op)); + const std::string doc = "Apply the " + gate_name + " gate."; + auto func = [gate_name = gate_name]( + TensorNetT &tensor_network, + const std::vector &wires, bool inverse, + const std::vector ¶ms) { + tensor_network.applyOperation(gate_name, wires, inverse, params); + }; + pyclass.def(gate_name.c_str(), func, doc.c_str()); + }); +} } // namespace Pennylane::Bindings diff --git a/pennylane_lightning/core/src/simulators/lightning_tensor/tncuda/CMakeLists.txt b/pennylane_lightning/core/src/simulators/lightning_tensor/tncuda/CMakeLists.txt index aeb597cea3..49630b536f 100644 --- a/pennylane_lightning/core/src/simulators/lightning_tensor/tncuda/CMakeLists.txt +++ b/pennylane_lightning/core/src/simulators/lightning_tensor/tncuda/CMakeLists.txt @@ -21,6 +21,8 @@ set(LTENSOR_MPS_FILES MPSTNCuda.cpp CACHE INTERNAL "" FORCE) add_library(${PL_BACKEND} STATIC ${LTENSOR_MPS_FILES}) +target_compile_options(lightning_compile_options INTERFACE "-D_ENABLE_PLTENSOR=1") + ########################## ## Enforce C++ Standard ## ########################## @@ -37,6 +39,7 @@ option(PL_DISABLE_CUDA_SAFETY "Build without CUDA call safety checks" OFF) target_link_libraries(${PL_BACKEND} PUBLIC lightning_compile_options lightning_external_libs + lightning_utils ${PL_BACKEND}_gates ${PL_BACKEND}_tensor ${PL_BACKEND}_tensornetBase @@ -58,6 +61,7 @@ endif() # Include all nested sources directories ############################################################################### set(COMPONENT_SUBDIRS base + bindings gates measurements observables diff --git a/pennylane_lightning/core/src/simulators/lightning_tensor/tncuda/MPSTNCuda.hpp b/pennylane_lightning/core/src/simulators/lightning_tensor/tncuda/MPSTNCuda.hpp index 2a3fc8d8f1..e6754f92f2 100644 --- a/pennylane_lightning/core/src/simulators/lightning_tensor/tncuda/MPSTNCuda.hpp +++ b/pennylane_lightning/core/src/simulators/lightning_tensor/tncuda/MPSTNCuda.hpp @@ -208,9 +208,13 @@ class MPSTNCuda final : public TNCudaBase> { }; /** - * @brief Get final state of the quantum circuit. + * @brief Append MPS final state to the quantum circuit. + * + * @param cutoff Cutoff value for SVD decomposition. Default is 0. + * @param cutoff_mode Cutoff mode for SVD decomposition. Default is "abs". */ - void get_final_state() { + void append_mps_final_state(double cutoff = 0, + std::string cutoff_mode = "abs") { if (MPSFinalized_ == MPSStatus::MPSFinalizedNotSet) { MPSFinalized_ = MPSStatus::MPSFinalizedSet; PL_CUTENSORNET_IS_SUCCESS(cutensornetStateFinalizeMPS( @@ -235,6 +239,21 @@ class MPSTNCuda final : public TNCudaBase> { /* const void * */ &algo, /* size_t */ sizeof(algo))); + PL_ABORT_IF_NOT(cutoff_mode == "rel" || cutoff_mode == "abs", + "cutoff_mode should either 'rel' or 'abs'."); + + cutensornetStateAttributes_t svd_cutoff_mode = + (cutoff_mode == "abs") + ? CUTENSORNET_STATE_CONFIG_MPS_SVD_ABS_CUTOFF + : CUTENSORNET_STATE_CONFIG_MPS_SVD_REL_CUTOFF; + + PL_CUTENSORNET_IS_SUCCESS(cutensornetStateConfigure( + /* const cutensornetHandle_t */ BaseType::getTNCudaHandle(), + /* cutensornetState_t */ BaseType::getQuantumState(), + /* cutensornetStateAttributes_t */ svd_cutoff_mode, + /* const void * */ &cutoff, + /* size_t */ sizeof(cutoff))); + BaseType::computeState( const_cast(getSitesExtentsPtr().data()), reinterpret_cast(getTensorsOutDataPtr().data())); diff --git a/pennylane_lightning/core/src/simulators/lightning_tensor/tncuda/bindings/CMakeLists.txt b/pennylane_lightning/core/src/simulators/lightning_tensor/tncuda/bindings/CMakeLists.txt new file mode 100644 index 0000000000..8a0a48b580 --- /dev/null +++ b/pennylane_lightning/core/src/simulators/lightning_tensor/tncuda/bindings/CMakeLists.txt @@ -0,0 +1,18 @@ +cmake_minimum_required(VERSION 3.20) + +project(${PL_BACKEND}_bindings LANGUAGES CXX) + +add_library(${PL_BACKEND}_bindings INTERFACE) + +target_include_directories(${PL_BACKEND}_bindings INTERFACE ${CMAKE_CURRENT_SOURCE_DIR}) + +target_link_libraries(${PL_BACKEND}_bindings INTERFACE lightning_bindings + lightning_utils + ${PL_BACKEND} + ${PL_BACKEND}_gates + ${PL_BACKEND}_observables + ${PL_BACKEND}_measurements + ${PL_BACKEND}_utils + ) + +set_property(TARGET ${PL_BACKEND}_bindings PROPERTY POSITION_INDEPENDENT_CODE ON) diff --git a/pennylane_lightning/core/src/simulators/lightning_tensor/tncuda/bindings/LTensorTNCudaBindings.hpp b/pennylane_lightning/core/src/simulators/lightning_tensor/tncuda/bindings/LTensorTNCudaBindings.hpp new file mode 100644 index 0000000000..83f2187dc0 --- /dev/null +++ b/pennylane_lightning/core/src/simulators/lightning_tensor/tncuda/bindings/LTensorTNCudaBindings.hpp @@ -0,0 +1,141 @@ +// Copyright 2024 Xanadu Quantum Technologies Inc. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at + +// http://www.apache.org/licenses/LICENSE-2.0 + +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * @file LTensorTNCudaBindings.hpp + * Defines LightningTensor-specific operations to export to Python, other + * utility functions interfacing with Pybind11 and support to agnostic bindings. + */ + +#pragma once +#include + +#include "cuda.h" + +#include "BindingsBase.hpp" +#include "DevTag.hpp" +#include "DevicePool.hpp" +#include "Error.hpp" +#include "MPSTNCuda.hpp" +#include "TypeList.hpp" +#include "cuda_helpers.hpp" + +/// @cond DEV +namespace { +using namespace Pennylane; +using namespace Pennylane::Bindings; +using Pennylane::LightningTensor::TNCuda::MPSTNCuda; +} // namespace +/// @endcond + +namespace py = pybind11; + +namespace Pennylane::LightningTensor::TNCuda { +using TensorNetBackends = + Pennylane::Util::TypeList, MPSTNCuda, void>; + +/** + * @brief Get a gate kernel map for a tensor network. + */ +template +void registerBackendClassSpecificBindings(PyClass &pyclass) { + registerGatesForTensorNet(pyclass); + + pyclass + .def(py::init()) // num_qubits, max_bond_dim + .def(py::init>()) // num_qubits, max_bond_dim, dev-tag + .def( + "setBasisState", + [](TensorNet &tensor_network, + std::vector &basisState) { + tensor_network.setBasisState(basisState); + }, + "Create Basis State on GPU.") + .def( + "appendMPSFinalState", + [](TensorNet &tensor_network, double cutoff, + std::string cutoff_mode) { + tensor_network.append_mps_final_state(cutoff, cutoff_mode); + }, + "Get the final state.") + .def("reset", &TensorNet::reset, "Reset the statevector."); +} + +/** + * @brief Provide backend information. + */ +auto getBackendInfo() -> py::dict { + using namespace py::literals; + + return py::dict("NAME"_a = "lightning.tensor"); +} + +/** + * @brief Register bindings for backend-specific info. + * + * @param m Pybind11 module. + */ +// TODO Move this method to a separate module for both LGPU and LTensor usage. +void registerBackendSpecificInfo(py::module_ &m) { + m.def("backend_info", &getBackendInfo, "Backend-specific information."); + m.def("device_reset", &deviceReset, "Reset all GPU devices and contexts."); + m.def("allToAllAccess", []() { + for (int i = 0; i < static_cast(getGPUCount()); i++) { + cudaDeviceEnablePeerAccess(i, 0); + } + }); + + m.def("is_gpu_supported", &isCuQuantumSupported, + py::arg("device_number") = 0, + "Checks if the given GPU device meets the minimum architecture " + "support for the PennyLane-Lightning-Tensor device."); + + m.def("get_gpu_arch", &getGPUArch, py::arg("device_number") = 0, + "Returns the given GPU major and minor GPU support."); + py::class_>(m, "DevPool") + .def(py::init<>()) + .def("getActiveDevices", &DevicePool::getActiveDevices) + .def("isActive", &DevicePool::isActive) + .def("isInactive", &DevicePool::isInactive) + .def("acquireDevice", &DevicePool::acquireDevice) + .def("releaseDevice", &DevicePool::releaseDevice) + .def("syncDevice", &DevicePool::syncDevice) + .def_static("getTotalDevices", &DevicePool::getTotalDevices) + .def_static("getDeviceUIDs", &DevicePool::getDeviceUIDs) + .def_static("setDeviceID", &DevicePool::setDeviceIdx); + + py::class_>(m, "DevTag") + .def(py::init<>()) + .def(py::init()) + .def(py::init([](int device_id, void *stream_id) { + // Note, streams must be handled externally for now. + // Binding support provided through void* conversion to cudaStream_t + return new DevTag(device_id, + static_cast(stream_id)); + })) + .def(py::init &>()) + .def("getDeviceID", &DevTag::getDeviceID) + .def("getStreamID", + [](DevTag &dev_tag) { + // default stream points to nullptr, so just return void* as + // type + return static_cast(dev_tag.getStreamID()); + }) + .def("refresh", &DevTag::refresh); +} + +} // namespace Pennylane::LightningTensor::TNCuda + /// @endcond diff --git a/pennylane_lightning/core/src/simulators/lightning_tensor/tncuda/gates/TNCudaGateCache.hpp b/pennylane_lightning/core/src/simulators/lightning_tensor/tncuda/gates/TNCudaGateCache.hpp index 104d261a43..ba15c458cf 100644 --- a/pennylane_lightning/core/src/simulators/lightning_tensor/tncuda/gates/TNCudaGateCache.hpp +++ b/pennylane_lightning/core/src/simulators/lightning_tensor/tncuda/gates/TNCudaGateCache.hpp @@ -167,9 +167,10 @@ template class TNCudaGateCache { } }; - // device_gates_ is a map of id of gate tensor operator in the graph to the - // gate_info and gate_info is a pair of gate_info_key, which contains both - // gate name and parameter value, and the tensor data on device. + // device_gates_ is a map of id of gate tensor operator in the graph to + // the gate_info and gate_info is a pair of gate_info_key, which + // contains both gate name and parameter value, and the tensor data on + // device. std::unordered_map device_gates_; }; } // namespace Pennylane::LightningTensor::TNCuda::Gates diff --git a/pennylane_lightning/core/src/simulators/lightning_tensor/tncuda/measurements/tests/Test_MPSTNCuda_Expval.cpp b/pennylane_lightning/core/src/simulators/lightning_tensor/tncuda/measurements/tests/Test_MPSTNCuda_Expval.cpp index f4977d3c2a..344bb8eea0 100644 --- a/pennylane_lightning/core/src/simulators/lightning_tensor/tncuda/measurements/tests/Test_MPSTNCuda_Expval.cpp +++ b/pennylane_lightning/core/src/simulators/lightning_tensor/tncuda/measurements/tests/Test_MPSTNCuda_Expval.cpp @@ -51,7 +51,7 @@ TEMPLATE_TEST_CASE("[Identity]", "[MPSTNCuda_Expval]", float, double) { mps_state.applyOperations({{"Hadamard"}, {"CNOT"}, {"CNOT"}}, {{0}, {0, 1}, {1, 2}}, {{false}, {false}, {false}}); - mps_state.get_final_state(); + mps_state.append_mps_final_state(); auto ob = NamedObsT("Identity", {0}); auto res = measure.expval(ob); CHECK(res == Approx(ONE)); @@ -78,7 +78,7 @@ TEMPLATE_TEST_CASE("[PauliX]", "[MPSTNCuda_Expval]", float, double) { mps_state.applyOperations({{"Hadamard"}, {"CNOT"}, {"CNOT"}}, {{0}, {0, 1}, {1, 2}}, {{false}, {false}, {false}}); - mps_state.get_final_state(); + mps_state.append_mps_final_state(); auto ob = NamedObsT("PauliX", {0}); auto res = measure.expval(ob); CHECK(res == ZERO); @@ -88,7 +88,7 @@ TEMPLATE_TEST_CASE("[PauliX]", "[MPSTNCuda_Expval]", float, double) { mps_state.applyOperations( {{"Hadamard"}, {"Hadamard"}, {"Hadamard"}}, {{0}, {1}, {2}}, {{false}, {false}, {false}}); - mps_state.get_final_state(); + mps_state.append_mps_final_state(); auto ob = NamedObsT("PauliX", {0}); auto res = measure.expval(ob); CHECK(res == Approx(ONE)); @@ -104,7 +104,7 @@ TEMPLATE_TEST_CASE("[PauliX]", "[MPSTNCuda_Expval]", float, double) { {"Hadamard"}}, {{0}, {0}, {1}, {1}, {2}, {2}}, {{false}, {false}, {false}, {false}, {false}, {false}}); - mps_state.get_final_state(); + mps_state.append_mps_final_state(); auto ob = NamedObsT("PauliX", {0}); auto res = measure.expval(ob); CHECK(res == -Approx(ONE)); @@ -181,6 +181,30 @@ TEMPLATE_TEST_CASE("[PauliZ]", "[MPSTNCuda_Expval]", float, double) { PrecisionT ref = 0.8775825618903724; REQUIRE(res == Approx(ref).margin(1e-6)); } + + SECTION("Using expval mps with cutoff") { + double cutoff = GENERATE(1e-1, 1e-2); + std::string cutoff_mode = GENERATE("rel", "abs"); + mps_state.applyOperations( + {{"Hadamard"}, + {"Hadamard"}, + {"Hadamard"}, + {"SingleExcitation"}, + {"IsingXX"}, + {"IsingXY"}}, + {{0}, {1}, {2}, {0, 1}, {1, 2}, {0, 2}}, + {{false}, {false}, {false}, {false}, {false}, {false}}, + {{}, {}, {}, {0.5}, {0.6}, {0.7}}); + mps_state.append_mps_final_state(cutoff, cutoff_mode); + auto m = MeasurementsTNCuda(mps_state); + auto ob = NamedObsT("PauliZ", {0}); + auto res = m.expval(ob); + // ref is from default.qubit + PrecisionT ref = -0.2115276040475712; + + REQUIRE_THAT(res, Catch::Matchers::WithinRel( + ref, static_cast(cutoff))); + } } } @@ -205,7 +229,7 @@ TEMPLATE_TEST_CASE("[Hadamard]", "[MPSTNCuda_Expval]", float, double) { // multiple times with different observables SECTION("Using expval") { mps_state.applyOperation("PauliX", {0}); - mps_state.get_final_state(); + mps_state.append_mps_final_state(); auto ob = NamedObsT("Hadamard", {0}); auto res = measure.expval(ob); @@ -234,7 +258,7 @@ TEMPLATE_TEST_CASE("[Parametric_obs]", "[MPSTNCuda_Expval]", float, double) { SECTION("Using expval") { mps_state.applyOperation("PauliX", {0}); - mps_state.get_final_state(); + mps_state.append_mps_final_state(); auto ob = NamedObsT("RX", {0}, {0}); auto res = measure.expval(ob); @@ -267,7 +291,7 @@ TEMPLATE_TEST_CASE("[Hermitian]", "[MPSTNCuda_Expval]", float, double) { mps_state.applyOperations({{"Hadamard"}, {"CNOT"}, {"CNOT"}}, {{0}, {0, 1}, {1, 2}}, {{false}, {false}, {false}}); - mps_state.get_final_state(); + mps_state.append_mps_final_state(); auto ob = HermitianObsT(mat, std::vector{0}); auto res = measure.expval(ob); CHECK(res == ZERO); @@ -277,7 +301,7 @@ TEMPLATE_TEST_CASE("[Hermitian]", "[MPSTNCuda_Expval]", float, double) { mps_state.applyOperations( {{"Hadamard"}, {"Hadamard"}, {"Hadamard"}}, {{0}, {1}, {2}}, {{false}, {false}, {false}}); - mps_state.get_final_state(); + mps_state.append_mps_final_state(); auto ob = HermitianObsT(mat, {0}); auto res = measure.expval(ob); CHECK(res == Approx(ONE)); @@ -293,7 +317,7 @@ TEMPLATE_TEST_CASE("[Hermitian]", "[MPSTNCuda_Expval]", float, double) { {"Hadamard"}}, {{0}, {0}, {1}, {1}, {2}, {2}}, {{false}, {false}, {false}, {false}, {false}, {false}}); - mps_state.get_final_state(); + mps_state.append_mps_final_state(); auto ob = HermitianObsT(mat, {0}); auto res = measure.expval(ob); CHECK(res == -Approx(ONE)); diff --git a/pennylane_lightning/core/src/simulators/lightning_tensor/tncuda/observables/ObservablesTNCuda.hpp b/pennylane_lightning/core/src/simulators/lightning_tensor/tncuda/observables/ObservablesTNCuda.hpp index b5134a747d..3db2825e7b 100644 --- a/pennylane_lightning/core/src/simulators/lightning_tensor/tncuda/observables/ObservablesTNCuda.hpp +++ b/pennylane_lightning/core/src/simulators/lightning_tensor/tncuda/observables/ObservablesTNCuda.hpp @@ -264,6 +264,11 @@ class HermitianObsTNCuda : public ObservableTNCuda { [[nodiscard]] auto getWires() const -> std::vector override { return wires_; } + + /** + * @brief Get the matrix of the Hermitian observable. + */ + [[nodiscard]] auto getMatrix() const -> const MatrixT & { return matrix_; } }; /** @@ -424,6 +429,14 @@ class TensorProdObsTNCuda : public ObservableTNCuda { } return obs_stream.str(); } + + /** + * @brief Get the observable. + */ + [[nodiscard]] auto getObs() const + -> std::vector>> { + return obs_; + }; }; /** @@ -534,5 +547,19 @@ class HamiltonianTNCuda : public ObservableTNCuda { ss << "]}"; return ss.str(); } + /** + * @brief Get the observable. + */ + [[nodiscard]] auto getObs() const + -> std::vector>> { + return obs_; + }; + + /** + * @brief Get the coefficients of the observable. + */ + [[nodiscard]] auto getCoeffs() const -> std::vector { + return BaseType::getCoeffs(); + }; }; } // namespace Pennylane::LightningTensor::TNCuda::Observables diff --git a/pennylane_lightning/core/src/simulators/lightning_tensor/utils/tncuda_utils/tncudaError.hpp b/pennylane_lightning/core/src/simulators/lightning_tensor/utils/tncuda_utils/tncudaError.hpp index f5d1e9366c..6f3169c4a8 100644 --- a/pennylane_lightning/core/src/simulators/lightning_tensor/utils/tncuda_utils/tncudaError.hpp +++ b/pennylane_lightning/core/src/simulators/lightning_tensor/utils/tncuda_utils/tncudaError.hpp @@ -44,7 +44,7 @@ using namespace Pennylane::Util; GetCuTensorNetworkErrorString(err) \ .c_str()) #else -#define PL_CUTENSORNET_IS_SUCCESS \ +#define PL_CUTENSORNET_IS_SUCCESS(err) \ { static_cast(err); } #endif diff --git a/pennylane_lightning/lightning_tensor/_measurements.py b/pennylane_lightning/lightning_tensor/_measurements.py new file mode 100644 index 0000000000..6b1ee3203f --- /dev/null +++ b/pennylane_lightning/lightning_tensor/_measurements.py @@ -0,0 +1,131 @@ +# Copyright 2024 Xanadu Quantum Technologies Inc. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Class implementation for tensornet measurements. +""" + +# pylint: disable=import-error, no-name-in-module, ungrouped-imports +try: + from pennylane_lightning.lightning_tensor_ops import MeasurementsC64, MeasurementsC128 +except ImportError: + pass + +from typing import Callable + +import numpy as np +import pennylane as qml +from pennylane.measurements import ExpectationMP, MeasurementProcess, StateMeasurement +from pennylane.tape import QuantumScript +from pennylane.typing import Result, TensorLike + +from pennylane_lightning.core._serialize import QuantumScriptSerializer + + +class LightningTensorMeasurements: + """Lightning Tensor Measurements class + + Measures the tensor network provided by the LightningTensorNet class. + + Args: + tensor_network(LightningTensorNet): Lightning tensornet class containing the tensor network to be measured. + """ + + def __init__( + self, + tensor_network, + ) -> None: + self._tensornet = tensor_network + self._dtype = tensor_network.dtype + self._measurement_lightning = self._measurement_dtype()(tensor_network.tensornet) + + @property + def dtype(self): + """Returns the simulation data type.""" + return self._dtype + + def _measurement_dtype(self): + """Binding to Lightning Measurements C++ class. + + Returns: the Measurements class + """ + return MeasurementsC64 if self.dtype == np.complex64 else MeasurementsC128 + + # pylint: disable=protected-access + def expval(self, measurementprocess: MeasurementProcess): + """Expectation value of the supplied observable contained in the MeasurementProcess. + + Args: + measurementprocess (StateMeasurement): measurement to apply to the tensor network + + Returns: + Expectation value of the observable + """ + if isinstance(measurementprocess.obs, qml.SparseHamiltonian): + raise NotImplementedError("Sparse Hamiltonians are not supported.") + + ob_serialized = QuantumScriptSerializer( + self._tensornet.device_name, self.dtype == np.complex64 + )._ob(measurementprocess.obs) + return self._measurement_lightning.expval(ob_serialized) + + def get_measurement_function( + self, measurementprocess: MeasurementProcess + ) -> Callable[[MeasurementProcess, TensorLike], TensorLike]: + """Get the appropriate method for performing a measurement. + + Args: + measurementprocess (MeasurementProcess): measurement process to apply to the graph + + Returns: + Callable: function that returns the measurement result + """ + if isinstance(measurementprocess, StateMeasurement): + if isinstance(measurementprocess, ExpectationMP): + return self.expval + + raise NotImplementedError( + "Does not support current measurement. Only ExpectationMP measurements are supported." + ) + + def measurement(self, measurementprocess: MeasurementProcess) -> TensorLike: + """Apply a measurement process to a tensor network. + + Args: + measurementprocess (MeasurementProcess): measurement process to apply to the graph + + Returns: + TensorLike: the result of the measurement + """ + return self.get_measurement_function(measurementprocess)(measurementprocess) + + def measure_tensor_network(self, circuit: QuantumScript) -> Result: + """ + Perform the measurements required by the circuit on the provided tensor network. + + This is an internal function that will be called by the successor to ``lightning.tensor``. + + Args: + circuit (QuantumScript): The single circuit to simulate + + Returns: + Tuple[TensorLike]: The measurement results + """ + + if circuit.shots: + raise NotImplementedError("Shots are not supported for tensor network simulations.") + # analytic case + if len(circuit.measurements) == 1: + return self.measurement(circuit.measurements[0]) + + return tuple(self.measurement(mp) for mp in circuit.measurements) diff --git a/pennylane_lightning/lightning_tensor/_tensornet.py b/pennylane_lightning/lightning_tensor/_tensornet.py new file mode 100644 index 0000000000..cf9eceb150 --- /dev/null +++ b/pennylane_lightning/lightning_tensor/_tensornet.py @@ -0,0 +1,194 @@ +# Copyright 2024 Xanadu Quantum Technologies Inc. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Class implementation for tensornet manipulation. +""" + +# pylint: disable=import-error, no-name-in-module, ungrouped-imports +try: + from pennylane_lightning.lightning_tensor_ops import TensorNetC64, TensorNetC128 +except ImportError: + pass + + +import numpy as np +import pennylane as qml +from pennylane import BasisState, DeviceError, StatePrep +from pennylane.ops.op_math import Adjoint +from pennylane.tape import QuantumScript + + +# pylint: disable=too-many-instance-attributes +class LightningTensorNet: + """Lightning tensornet class. + + Interfaces with C++ python binding methods for tensornet manipulation. + + Args: + num_wires(int): the number of wires to initialize the device with + c_dtype: Datatypes for tensor network representation. Must be one of + ``np.complex64`` or ``np.complex128``. Default is ``np.complex128`` + method(string): tensor network method. Options: ["mps"]. Default is "mps". + max_bond_dim(int): maximum bond dimension for the tensor network + cutoff(float): threshold for singular value truncation. Default is 0. + cutoff_mode(string): singular value truncation mode. Options: ["rel", "abs"]. + device_name(string): tensor network device name. Options: ["lightning.tensor"] + """ + + # pylint: disable=too-many-arguments + def __init__( + self, + num_wires, + method: str = "mps", + c_dtype=np.complex128, + max_bond_dim: int = 128, + cutoff: float = 0, + cutoff_mode: str = "abs", + device_name="lightning.tensor", + ): + self._num_wires = num_wires + self._max_bond_dim = max_bond_dim + self._method = method + self._cutoff = cutoff + self._cutoff_mode = cutoff_mode + self._c_dtype = c_dtype + + if device_name != "lightning.tensor": + raise DeviceError(f'The device name "{device_name}" is not a valid option.') + + self._device_name = device_name + self._tensornet = self._tensornet_dtype()(self._num_wires, self._max_bond_dim) + + @property + def dtype(self): + """Returns the tensor network data type.""" + return self._c_dtype + + @property + def device_name(self): + """Returns the tensor network device name.""" + return self._device_name + + @property + def num_wires(self): + """Number of wires addressed on this device""" + return self._num_wires + + @property + def tensornet(self): + """Returns a handle to the tensor network.""" + return self._tensornet + + def _tensornet_dtype(self): + """Binding to Lightning Managed tensor network C++ class. + + Returns: the tensor network class + """ + return TensorNetC128 if self.dtype == np.complex128 else TensorNetC64 + + def reset_state(self): + """Reset the device's initial quantum state""" + # init the quantum state to |00..0> + self._tensornet.reset() + + def _apply_basis_state(self, state, wires): + """Initialize the quantum state in a specified computational basis state. + + Args: + state (array[int]): computational basis state of shape ``(wires,)`` + consisting of 0s and 1s. + wires (Wires): wires that the provided computational state should be + initialized on + + Note: This function does not support broadcasted inputs yet. + """ + # length of basis state parameter + n_basis_state = len(state) + + if not set(state.tolist()).issubset({0, 1}): + raise ValueError("BasisState parameter must consist of 0 or 1 integers.") + + if n_basis_state != len(wires): + raise ValueError("BasisState parameter and wires must be of equal length.") + + self._tensornet.setBasisState(state) + + def _apply_lightning(self, operations): + """Apply a list of operations to the quantum state. + + Args: + operations (list[~pennylane.operation.Operation]): operations to apply + + Returns: + None + """ + tensornet = self._tensornet + + # Skip over identity operations instead of performing + # matrix multiplication with it. + for operation in operations: + if isinstance(operation, qml.Identity): + continue + if isinstance(operation, Adjoint): + name = operation.base.name + invert_param = True + else: + name = operation.name + invert_param = False + method = getattr(tensornet, name, None) + wires = list(operation.wires) + + if method is not None: # apply specialized gate + param = operation.parameters + method(wires, invert_param, param) + else: # apply gate as a matrix + # Inverse can be set to False since qml.matrix(operation) is already in + # inverted form + method = getattr(tensornet, "applyMatrix") + try: + method(qml.matrix(operation), wires, False) + except AttributeError: # pragma: no cover + # To support older versions of PL + method(operation.matrix, wires, False) + + def apply_operations(self, operations): + """Append operations to the tensor network graph.""" + # State preparation is currently done in Python + if operations: # make sure operations[0] exists + if isinstance(operations[0], StatePrep): + raise DeviceError( + "lightning.tensor does not support initialization with a state vector." + ) + if isinstance(operations[0], BasisState): + self._apply_basis_state(operations[0].parameters[0], operations[0].wires) + operations = operations[1:] + + self._apply_lightning(operations) + + def set_tensor_network(self, circuit: QuantumScript): + """ + Set the tensor network that results from executing the given quantum script. + + This is an internal function that will be called by the successor to ``lightning.tensor``. + + Args: + circuit (QuantumScript): The single circuit to simulate + + Returns: + LightningTensorNet: Lightning final state class. + + """ + self.apply_operations(circuit.operations) + if self._method == "mps": + self._tensornet.appendMPSFinalState(self._cutoff, self._cutoff_mode) diff --git a/pennylane_lightning/lightning_tensor/lightning_tensor.py b/pennylane_lightning/lightning_tensor/lightning_tensor.py index 9916a4317d..b0db8f84b4 100644 --- a/pennylane_lightning/lightning_tensor/lightning_tensor.py +++ b/pennylane_lightning/lightning_tensor/lightning_tensor.py @@ -1,4 +1,4 @@ -# Copyright 2018-2024 Xanadu Quantum Technologies Inc. +# Copyright 2024 Xanadu Quantum Technologies Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,7 +13,7 @@ # limitations under the License. """ This module contains the LightningTensor class that inherits from the new device interface. -It is a device to perform tensor network simulation of a quantum circuit. +It is a device to perform tensor network simulations of quantum circuits using `cutensornet`. """ from dataclasses import replace from numbers import Number @@ -23,9 +23,28 @@ import pennylane as qml from pennylane.devices import DefaultExecutionConfig, Device, ExecutionConfig from pennylane.devices.modifiers import simulator_tracking, single_tape_support -from pennylane.tape import QuantumTape +from pennylane.devices.preprocess import ( + decompose, + validate_device_wires, + validate_measurements, + validate_observables, +) +from pennylane.operation import Operator +from pennylane.tape import QuantumScript, QuantumTape +from pennylane.transforms.core import TransformProgram from pennylane.typing import Result, ResultBatch +from ._measurements import LightningTensorMeasurements +from ._tensornet import LightningTensorNet + +try: + # pylint: disable=import-error, unused-import + from pennylane_lightning.lightning_tensor_ops import backend_info + + LT_CPP_BINARY_AVAILABLE = True +except ImportError: + LT_CPP_BINARY_AVAILABLE = False + Result_or_ResultBatch = Union[Result, ResultBatch] QuantumTapeBatch = Sequence[QuantumTape] QuantumTape_or_Batch = Union[QuantumTape, QuantumTapeBatch] @@ -38,6 +57,112 @@ _methods = frozenset({"mps"}) # The set of supported methods. +_operations = frozenset( + { + "Identity", + "BasisState", + "QubitUnitary", + "ControlledQubitUnitary", + "MultiControlledX", + "DiagonalQubitUnitary", + "PauliX", + "PauliY", + "PauliZ", + "Hadamard", + "S", + "Adjoint(S)", + "T", + "Adjoint(T)", + "SX", + "Adjoint(SX)", + "CNOT", + "SWAP", + "ISWAP", + "Adjoint(ISWAP)", + "PSWAP", + "Adjoint(SISWAP)", + "SISWAP", + "SQISW", + "CSWAP", + "Toffoli", + "CY", + "CZ", + "PhaseShift", + "ControlledPhaseShift", + "RX", + "RY", + "RZ", + "Rot", + "CRX", + "CRY", + "CRZ", + "CRot", + "IsingXX", + "IsingYY", + "IsingZZ", + "IsingXY", + "SingleExcitation", + "SingleExcitationPlus", + "SingleExcitationMinus", + "DoubleExcitation", + "QubitCarry", + "QubitSum", + "OrbitalRotation", + "QFT", + "ECR", + "BlockEncode", + "C(BlockEncode)", + } +) + +_observables = frozenset( + { + "PauliX", + "PauliY", + "PauliZ", + "Hadamard", + "Hermitian", + "Identity", + "Hamiltonian", + "LinearCombination", + "Sum", + "SProd", + "Prod", + "Exp", + } +) +# The set of supported observables. + + +def stopping_condition(op: Operator) -> bool: + """A function that determines whether or not an operation is supported by the ``mps`` method of ``lightning.tensor``.""" + # These thresholds are adapted from `lightning_base.py` + # To avoid building matrices beyond the given thresholds. + # This should reduce runtime overheads for larger systems. + return op.has_matrix and len(op.wires) <= 2 and op.name in _operations + + +def simulate(circuit: QuantumScript, tensornet: LightningTensorNet) -> Result: + """Simulate a single quantum script. + + Args: + circuit (QuantumTape): The single circuit to simulate + tensornet (LightningTensorNet): handle to Lightning tensor network + + Returns: + Tuple[TensorLike]: The results of the simulation + + Note that this function can return measurements for non-commuting observables simultaneously. + """ + tensornet.reset_state() + tensornet.set_tensor_network(circuit) + return LightningTensorMeasurements(tensornet).measure_tensor_network(circuit) + + +def accepted_observables(obs: Operator) -> bool: + """A function that determines whether or not an observable is supported by ``lightning.tensor``.""" + return obs.name in _observables + def accepted_backends(backend: str) -> bool: """A function that determines whether or not a backend is supported by ``lightning.tensor``.""" @@ -56,24 +181,57 @@ class LightningTensor(Device): A device to perform tensor network operations on a quantum circuit. + This device is designed to simulate large-scale quantum circuits using tensor network methods. For + small circuits, other devices like ``lightning.qubit``, ``lightning.gpu``or ``lightning.kokkos`` are + recommended. + + Currently, only the Matrix Product State (MPS) method as implemented in the ``cutensornet`` backend is supported. + Args: wires (int): The number of wires to initialize the device with. Defaults to ``None`` if not specified. - backend (str): Supported backend. Currently, only ``cutensornet`` is supported. method (str): Supported method. Currently, only ``mps`` is supported. - shots (int): How many times the circuit should be evaluated (or sampled) to estimate - the expectation values. Currently, it can only be ``None``, so that computation of - statistics like expectation values and variances is performed analytically. c_dtype: Datatypes for the tensor representation. Must be one of - ``np.complex64`` or ``np.complex128``. - **kwargs: keyword arguments. TODO add when cuTensorNet MPS backend is available as a prototype. + ``numpy.complex64`` or ``numpy.complex128``. Default is ``numpy.complex128``. + Keyword Args: + max_bond_dim (int): The maximum bond dimension to be used in the MPS simulation. Default is 128. + The accuracy of the wavefunction representation comes with a memory tradeoff which can be + tuned with `max_bond_dim`. The larger the internal bond dimension, the more entanglement can + be described but the larger the memory requirements. Note that GPUs are ill-suited (i.e. less + competitive compared with CPUs) for simulating circuits with low bond dimensions and/or circuit + layers with a single or few gates because the arithmetic intensity is lower. + cutoff (float): The threshold used to truncate the singular values of the MPS tensors. Default is 0 + cutoff_mode (str): Singular value truncation mode. Options: ["rel", "abs"]. Default is "abs". + backend (str): Supported backend. Currently, only ``cutensornet`` is supported. + method (str): Supported method. Currently, only ``mps`` is supported. + + **Example** + + .. code-block:: python + + import pennylane as qml + + num_qubits = 100 + + dev = qml.device("lightning.tensor", wires=num_qubits) + + @qml.qnode(dev) + def circuit(num_qubits): + for qubit in range(0, num_qubits - 1): + qml.CZ(wires=[qubit, qubit + 1]) + qml.X(wires=[qubit]) + qml.Z(wires=[qubit + 1]) + return qml.expval(qml.Z(0)) + + >>> print(circuit(num_qubits)) + -1.0 """ # pylint: disable=too-many-instance-attributes # So far we just consider the options for MPS simulator - _device_options = ("backend", "c_dtype") - + _device_options = ("backend", "max_bond_dim", "cutoff", "cutoff_mode") + _CPP_BINARY_AVAILABLE = LT_CPP_BINARY_AVAILABLE _new_API = True # pylint: disable=too-many-arguments @@ -81,29 +239,37 @@ def __init__( self, *, wires=None, - backend="cutensornet", - method="mps", - shots=None, + method: str = "mps", c_dtype=np.complex128, **kwargs, ): - if not accepted_backends(backend): - raise ValueError(f"Unsupported backend: {backend}") + if not self._CPP_BINARY_AVAILABLE: + raise ImportError("Pre-compiled binaries for lightning.tensor are not available. ") if not accepted_methods(method): raise ValueError(f"Unsupported method: {method}") - if shots is not None: - raise ValueError("lightning.tensor does not support finite shots.") + if c_dtype not in [np.complex64, np.complex128]: # pragma: no cover + raise TypeError(f"Unsupported complex type: {c_dtype}") - super().__init__(wires=wires, shots=shots) + if wires is None: + raise ValueError("The number of wires must be specified.") + + super().__init__(wires=wires, shots=None) + + if isinstance(wires, int): + self._wire_map = None # should just use wires as is + else: + self._wire_map = {w: i for i, w in enumerate(self.wires)} self._num_wires = len(self.wires) if self.wires else 0 - self._backend = backend self._method = method self._c_dtype = c_dtype - self._interface = None + self._max_bond_dim = kwargs.get("max_bond_dim", 128) + self._cutoff = kwargs.get("cutoff", 0) + self._cutoff_mode = kwargs.get("cutoff_mode", "abs") + self._backend = kwargs.get("backend", "cutensornet") for arg in kwargs: if arg not in self._device_options: @@ -111,6 +277,15 @@ def __init__( f"Unexpected argument: {arg} during initialization of the lightning.tensor device." ) + if not accepted_backends(self._backend): + raise ValueError(f"Unsupported backend: {self._backend}") + + if self._cutoff_mode not in ["rel", "abs"]: + raise ValueError(f"Unsupported cutoff mode: {self._cutoff_mode}") + + if not isinstance(self._max_bond_dim, int) or self._max_bond_dim < 1: + raise ValueError("The maximum bond dimension must be an integer greater than 0.") + @property def name(self): """The name of the device.""" @@ -136,6 +311,17 @@ def c_dtype(self): """Tensor complex data type.""" return self._c_dtype + def _tensornet(self): + """Return the tensornet object.""" + return LightningTensorNet( + self._num_wires, + self._method, + self._c_dtype, + self._max_bond_dim, + self._cutoff, + self._cutoff_mode, + ) + dtype = c_dtype def _setup_execution_config( @@ -177,11 +363,22 @@ def preprocess( * Does not support vector-Jacobian products. """ - # TODO: remove comments when cuTensorNet MPS backend is available as a prototype - # config = self._setup_execution_config(execution_config) - # program = self._interface.preprocess() - # return program, config + config = self._setup_execution_config(execution_config) + program = TransformProgram() + + program.add_transform(validate_measurements, name=self.name) + program.add_transform(validate_observables, accepted_observables, name=self.name) + program.add_transform(validate_device_wires, self._wires, name=self.name) + program.add_transform( + decompose, + stopping_condition=stopping_condition, + skip_initial_state_prep=True, + name=self.name, + ) + return program, config + + # pylint: disable=unused-argument def execute( self, circuits: QuantumTape_or_Batch, @@ -197,8 +394,14 @@ def execute( TensorLike, tuple[TensorLike], tuple[tuple[TensorLike]]: A numeric result of the computation. """ - # TODO: remove comment when cuTensorNet MPS backend is available as a prototype - # return self._interface.execute(circuits, execution_config) + results = [] + + for circuit in circuits: + if self._wire_map is not None: + [circuit], _ = qml.map_wires(circuit, self._wire_map) + results.append(simulate(circuit, self._tensornet())) + + return tuple(results) # pylint: disable=unused-argument def supports_derivatives( diff --git a/requirements-dev.txt b/requirements-dev.txt index e5c6db9967..80268897c6 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -17,5 +17,6 @@ isort==5.13.2 click==8.0.4 cmake custatevec-cu12 +cutensornet-cu12 pylint scipy diff --git a/setup.py b/setup.py index d231306a9f..2c9657b984 100644 --- a/setup.py +++ b/setup.py @@ -21,7 +21,7 @@ from setuptools.command.build_ext import build_ext default_backend = "lightning_qubit" -supported_backends = {"lightning_kokkos", "lightning_qubit", "lightning_gpu"} +supported_backends = {"lightning_kokkos", "lightning_qubit", "lightning_gpu", "lightning_tensor"} supported_backends.update({sb.replace("_", ".") for sb in supported_backends}) @@ -177,7 +177,7 @@ def build_extension(self, ext: CMakeExtension): packages_list = ["pennylane_lightning." + backend] if backend == "lightning_qubit": - packages_list += ["pennylane_lightning.core", "pennylane_lightning.lightning_tensor"] + packages_list += ["pennylane_lightning.core"] else: requirements += ["pennylane_lightning==" + version] @@ -187,10 +187,6 @@ def build_extension(self, ext: CMakeExtension): suffix = suffix[0].upper() + suffix[1:] pennylane_plugins = [device_name + " = pennylane_lightning." + backend + ":Lightning" + suffix] -if suffix == "Qubit": - pennylane_plugins.append( - "lightning.tensor = pennylane_lightning.lightning_tensor:LightningTensor" - ) pkg_suffix = "" if suffix == "Qubit" else "_" + suffix @@ -226,11 +222,7 @@ def build_extension(self, ext: CMakeExtension): "pennylane_lightning.core": [ os.path.join("src", "*"), os.path.join("src", "**", "*"), - ], - "pennylane_lightning.lightning_tensor": [ - os.path.join("backends", "*"), - os.path.join("backends", "**", "*"), - ], + ] }, } ) diff --git a/tests/conftest.py b/tests/conftest.py index db058c5a07..d7dbf46493 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -89,7 +89,7 @@ def n_subsystems(request): # Looking for the device for testing. default_device = "lightning.qubit" -supported_devices = {"lightning.kokkos", "lightning.qubit", "lightning.gpu"} +supported_devices = {"lightning.kokkos", "lightning.qubit", "lightning.gpu", "lightning.tensor"} supported_devices.update({sb.replace(".", "_") for sb in supported_devices}) @@ -137,6 +137,12 @@ def get_device(): if hasattr(pennylane_lightning, "lightning_gpu_ops"): import pennylane_lightning.lightning_gpu_ops as lightning_ops from pennylane_lightning.lightning_gpu_ops import LightningException +elif device_name == "lightning.tensor": + from pennylane_lightning.lightning_tensor import LightningTensor as LightningDevice + + if hasattr(pennylane_lightning, "lightning_tensor_ops"): + import pennylane_lightning.lightning_tensor_ops as lightning_ops + from pennylane_lightning.lightning_tensor_ops import LightningException else: from pennylane_lightning.lightning_qubit import LightningQubit as LightningDevice @@ -152,6 +158,8 @@ def get_device(): ) def qubit_device(request): def _device(wires, shots=None): + if device_name == "lightning.tensor": + return qml.device(device_name, wires=wires, c_dtype=request.param) return qml.device(device_name, wires=wires, shots=shots, c_dtype=request.param) return _device diff --git a/tests/lightning_qubit/test_adjoint_jacobian_class.py b/tests/lightning_qubit/test_adjoint_jacobian_class.py index 4bce15422c..ed161259e6 100644 --- a/tests/lightning_qubit/test_adjoint_jacobian_class.py +++ b/tests/lightning_qubit/test_adjoint_jacobian_class.py @@ -18,7 +18,7 @@ import pennylane as qml import pytest -from conftest import LightningDevice # tested device +from conftest import LightningDevice, device_name # tested device from pennylane import numpy as np from pennylane.tape import QuantumScript from scipy.stats import unitary_group @@ -26,8 +26,8 @@ from pennylane_lightning.lightning_qubit._adjoint_jacobian import LightningAdjointJacobian from pennylane_lightning.lightning_qubit._state_vector import LightningStateVector -if not LightningDevice._new_API: - pytest.skip("Exclusive tests for new API. Skipping.", allow_module_level=True) +if device_name != "lightning.qubit": + pytest.skip("Exclusive tests for lightning.qubit. Skipping.", allow_module_level=True) if not LightningDevice._CPP_BINARY_AVAILABLE: pytest.skip("No binary module found. Skipping.", allow_module_level=True) diff --git a/tests/lightning_qubit/test_measurements_class.py b/tests/lightning_qubit/test_measurements_class.py index c282e69358..1d0bac9889 100644 --- a/tests/lightning_qubit/test_measurements_class.py +++ b/tests/lightning_qubit/test_measurements_class.py @@ -33,8 +33,8 @@ from pennylane_lightning.lightning_qubit._measurements import LightningMeasurements from pennylane_lightning.lightning_qubit._state_vector import LightningStateVector -if not LightningDevice._new_API: - pytest.skip("Exclusive tests for new API. Skipping.", allow_module_level=True) +if device_name != "lightning.qubit": + pytest.skip("Exclusive tests for lightning.qubit. Skipping.", allow_module_level=True) if not LightningDevice._CPP_BINARY_AVAILABLE: pytest.skip("No binary module found. Skipping.", allow_module_level=True) diff --git a/tests/lightning_qubit/test_state_vector_class.py b/tests/lightning_qubit/test_state_vector_class.py index d7662862d1..d192c8c346 100644 --- a/tests/lightning_qubit/test_state_vector_class.py +++ b/tests/lightning_qubit/test_state_vector_class.py @@ -20,14 +20,14 @@ import numpy as np import pennylane as qml import pytest -from conftest import LightningDevice # tested device +from conftest import LightningDevice, device_name # tested device from pennylane.tape import QuantumScript from pennylane.wires import Wires from pennylane_lightning.lightning_qubit._state_vector import LightningStateVector -if not LightningDevice._new_API: - pytest.skip("Exclusive tests for new API. Skipping.", allow_module_level=True) +if device_name != "lightning.qubit": + pytest.skip("Exclusive tests for lightning.qubit. Skipping.", allow_module_level=True) if not LightningDevice._CPP_BINARY_AVAILABLE: pytest.skip("No binary module found. Skipping.", allow_module_level=True) diff --git a/tests/lightning_tensor/test_gates_and_expval.py b/tests/lightning_tensor/test_gates_and_expval.py new file mode 100644 index 0000000000..cfeffa90fc --- /dev/null +++ b/tests/lightning_tensor/test_gates_and_expval.py @@ -0,0 +1,341 @@ +# Copyright 2024 Xanadu Quantum Technologies Inc. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Tests for the expectation value calculations on the LightningTensor device. +""" + +import pennylane as qml +import pytest +from conftest import PHI, THETA, LightningDevice, device_name +from pennylane import DeviceError +from pennylane import numpy as np + +if device_name != "lightning.tensor": + pytest.skip("Exclusive tests for Lightning Tensor device. Skipping.", allow_module_level=True) +else: + from pennylane_lightning.lightning_tensor import LightningTensor + from pennylane_lightning.lightning_tensor._measurements import LightningTensorMeasurements + from pennylane_lightning.lightning_tensor._tensornet import LightningTensorNet + +if not LightningDevice._new_API: # pylint: disable=protected-access + pytest.skip("Exclusive tests for new API. Skipping.", allow_module_level=True) + +if not LightningDevice._CPP_BINARY_AVAILABLE: # pylint: disable=protected-access + pytest.skip("No binary module found. Skipping.", allow_module_level=True) + + +random_unitary = np.array( + [ + [ + -0.48401572 - 0.11012304j, + -0.44806504 + 0.46775911j, + -0.36968281 + 0.19235993j, + -0.37561358 + 0.13887962j, + ], + [ + -0.12838047 + 0.13992187j, + 0.14531831 + 0.45319438j, + 0.28902175 - 0.71158765j, + -0.24333677 - 0.29721109j, + ], + [ + 0.26400811 - 0.72519269j, + 0.13965687 + 0.35092711j, + 0.09141515 - 0.14367072j, + 0.14894673 + 0.45886629j, + ], + [ + -0.04067799 + 0.34681783j, + -0.45852968 - 0.03214391j, + -0.10528164 - 0.4431247j, + 0.50251451 + 0.45476965j, + ], + ] +) + + +def circuit_ansatz(params, wires): + """Circuit ansatz containing all the parametrized gates""" + qml.Identity(wires=wires[0]) + qml.QubitUnitary(random_unitary, wires=[wires[1], wires[3]]) + qml.ControlledQubitUnitary( + qml.matrix(qml.PauliX([wires[1]])), control_wires=[wires[0]], wires=wires[1] + ) + qml.DiagonalQubitUnitary(np.array([1, 1]), wires=wires[2]) + qml.MultiControlledX(wires=[wires[0], wires[1], wires[3]], control_values=[wires[0], wires[1]]) + qml.PauliX(wires=wires[1]) + qml.PauliY(wires=wires[2]) + qml.PauliZ(wires=wires[3]) + qml.Hadamard(wires=wires[4]) + qml.adjoint(qml.S(wires=wires[4])) + qml.S(wires=wires[5]) + qml.adjoint(qml.T(wires=wires[1])) + qml.T(wires=wires[0]) + qml.adjoint(qml.SX(wires=wires[0])) + qml.SX(wires=wires[1]) + qml.CNOT(wires=[wires[6], wires[7]]) + qml.SWAP(wires=[wires[2], wires[3]]) + qml.adjoint(qml.ISWAP(wires=[wires[0], wires[1]])) + qml.ISWAP(wires=[wires[4], wires[5]]) + qml.PSWAP(params[0], wires=[wires[6], wires[7]]) + qml.adjoint(qml.SISWAP(wires=[wires[0], wires[1]])) + qml.SISWAP(wires=[wires[4], wires[5]]) + qml.SQISW(wires=[wires[1], wires[0]]) + qml.CSWAP(wires=[wires[2], wires[4], wires[5]]) + qml.Toffoli(wires=[wires[0], wires[1], wires[2]]) + qml.CY(wires=[wires[0], wires[2]]) + qml.CZ(wires=[wires[1], wires[3]]) + qml.PhaseShift(params[1], wires=wires[2]) + qml.ControlledPhaseShift(params[2], wires=[wires[0], wires[5]]) + qml.RX(params[3], wires=wires[0]) + qml.RY(params[4], wires=wires[1]) + qml.RZ(params[5], wires=wires[3]) + qml.Rot(params[6], params[7], params[8], wires=wires[0]) + qml.CRX(params[9], wires=[wires[1], wires[0]]) + qml.CRY(params[10], wires=[wires[3], wires[2]]) + qml.CRZ(params[11], wires=[wires[2], wires[1]]) + qml.IsingXX(params[12], wires=[wires[1], wires[0]]) + qml.IsingYY(params[13], wires=[wires[3], wires[2]]) + qml.IsingXY(params[14], wires=[wires[2], wires[1]]) + qml.IsingZZ(params[15], wires=[wires[2], wires[1]]) + qml.SingleExcitation(params[16], wires=[wires[2], wires[0]]) + qml.SingleExcitationPlus(params[17], wires=[wires[3], wires[1]]) + qml.SingleExcitationMinus(params[18], wires=[wires[4], wires[2]]) + qml.DoubleExcitation(params[19], wires=[wires[0], wires[1], wires[2], wires[3]]) + qml.QubitCarry(wires=[wires[0], wires[1], wires[6], wires[7]]) + qml.QubitSum(wires=[wires[2], wires[3], wires[7]]) + qml.OrbitalRotation(params[20], wires=[wires[0], wires[1], wires[5], wires[6]]) + qml.QFT(wires=[wires[0]]) + qml.ECR(wires=[wires[1], wires[3]]) + qml.BlockEncode([[0.1, 0.2], [0.3, 0.4]], wires=[wires[0], wires[3]]) + qml.ctrl(qml.BlockEncode([0.1], wires=[wires[0]]), control=wires[1]) + + +@pytest.mark.parametrize( + "returns", + [ + (qml.PauliX(0),), + (qml.PauliY(0),), + (qml.PauliZ(0),), + (qml.PauliX(1),), + (qml.PauliY(1),), + (qml.PauliZ(1),), + (qml.PauliX(2),), + (qml.PauliY(2),), + (qml.PauliZ(2),), + (qml.PauliX(3),), + (qml.PauliY(3),), + (qml.PauliZ(3),), + (qml.PauliX(0), qml.PauliY(1)), + ( + qml.PauliZ(0), + qml.PauliX(1), + qml.PauliY(2), + ), + ( + qml.PauliY(0), + qml.PauliZ(1), + qml.PauliY(3), + ), + (qml.PauliZ(0) @ qml.PauliY(3),), + (qml.Hadamard(2),), + (qml.Hadamard(3) @ qml.PauliZ(2),), + (qml.PauliX(0) @ qml.PauliY(3),), + (qml.PauliY(0) @ qml.PauliY(2) @ qml.PauliY(3),), + (qml.PauliZ(0) @ qml.PauliZ(1) @ qml.PauliZ(2),), + (0.5 * qml.PauliZ(0) @ qml.PauliZ(2),), + (qml.ops.LinearCombination([1.0, 2.0], [qml.X(0) @ qml.Z(1), qml.Y(3) @ qml.Z(2)])), + (qml.ops.prod(qml.X(0), qml.Y(1))), + ], +) +def test_integration_for_all_supported_gates(returns): + """Integration tests that compare to default.qubit for a large circuit containing parametrized + operations""" + num_wires = 8 + dev_default = qml.device("default.qubit", wires=range(num_wires)) + dev_ltensor = LightningTensor(wires=range(num_wires), max_bond_dim=16, c_dtype=np.complex128) + + def circuit(params): + qml.BasisState(np.array([1, 0, 1, 0, 1, 0, 1, 0]), wires=range(num_wires)) + circuit_ansatz(params, wires=range(num_wires)) + return qml.math.hstack([qml.expval(r) for r in returns]) + + n_params = 22 + np.random.seed(1337) + params_init = np.random.rand(n_params) + + params = np.array(params_init, requires_grad=True) + + qnode_ltensor = qml.QNode(circuit, dev_ltensor) + qnode_default = qml.QNode(circuit, dev_default) + + j_ltensor = qnode_ltensor(params) + j_default = qnode_default(params) + + assert np.allclose(j_ltensor, j_default, rtol=1e-1) + + +@pytest.mark.parametrize("theta, phi", list(zip(THETA, PHI))) +def test_state_prep_not_support(qubit_device, theta, phi): + """Test that state preparation is not supported on the device.""" + dev = qubit_device(wires=3) + obs = qml.Hermitian([[1, 0], [0, -1]], wires=[0]) + + tape = qml.tape.QuantumScript( + [ + qml.StatePrep([1.0, 0, 0, 0, 0, 0, 0, 0], wires=[0, 1, 2]), + qml.RX(theta, wires=[0]), + qml.RX(phi, wires=[1]), + qml.RX(theta + phi, wires=[2]), + ], + measurements=[qml.expval(obs)], + ) + + with pytest.raises( + DeviceError, match="lightning.tensor does not support initialization with a state vector." + ): + dev.execute(tape) + + +class TestSparseHExpval: + """Test sparseH expectation values""" + + @pytest.mark.parametrize( + "cases", + [ + [qml.PauliX(0) @ qml.Identity(1), 0.00000000000000000, 1.000000000000000000], + [qml.Identity(0) @ qml.PauliX(1), -0.19866933079506122, 0.960530638694763184], + [qml.PauliY(0) @ qml.Identity(1), -0.38941834230865050, 0.848353326320648193], + [qml.Identity(0) @ qml.PauliY(1), 0.00000000000000000, 1.000000119209289551], + [qml.PauliZ(0) @ qml.Identity(1), 0.92106099400288520, 0.151646673679351807], + [qml.Identity(0) @ qml.PauliZ(1), 0.98006657784124170, 0.039469480514526367], + ], + ) + def test_sparse_Pauli_words(self, cases, qubit_device): + """Test expval of some simple sparse Hamiltonian""" + dev = qubit_device(wires=4) + + @qml.qnode(dev, diff_method="parameter-shift") + def circuit_expval(): + qml.RX(0.4, wires=[0]) + qml.RY(-0.2, wires=[1]) + return qml.expval( + qml.SparseHamiltonian( + qml.Hamiltonian([1], [cases[0]]).sparse_matrix(), wires=[0, 1] + ) + ) + + with pytest.raises(DeviceError): + circuit_expval() + + def test_expval_sparseH(self): + """Test that expval is chosen for a variety of different expectation values.""" + with qml.queuing.AnnotatedQueue() as q: + qml.expval(qml.SparseHamiltonian(qml.PauliX.compute_sparse_matrix(), wires=0)) + + tensornet = LightningTensorNet(4, 10) + m = LightningTensorMeasurements(tensornet) + + with pytest.raises(NotImplementedError, match="Sparse Hamiltonians are not supported."): + m.expval(q.queue[0]) + + def test_measurement_shot_not_supported(self): + """Test shots measurement error for measure_tensor_network.""" + obs = [ + qml.expval(qml.PauliX(0) @ qml.Identity(1)), + ] + + tensornet = LightningTensorNet(4, 10) + tape = qml.tape.QuantumScript(measurements=obs, shots=1000) + m = LightningTensorMeasurements(tensornet) + + with pytest.raises(NotImplementedError, match="Shots are not supported for tensor network"): + m.measure_tensor_network(tape) + + def test_measurement_not_supported(self): + """Test error for measure_tensor_network.""" + obs = [qml.sample(wires=0)] + + tensornet = LightningTensorNet(4, 10) + tape = qml.tape.QuantumScript(measurements=obs) + m = LightningTensorMeasurements(tensornet) + + with pytest.raises( + NotImplementedError, + match="Does not support current measurement. Only ExpectationMP measurements are supported.", + ): + m.measure_tensor_network(tape) + + +class QChem: + """Integration tests for qchem module by parameter-shift and finite-diff differentiation methods.""" + + @pytest.mark.parametrize("diff_approach", ["parameter-shift", "finite-diff"]) + def test_integration_H2_Hamiltonian(self, diff_approach): + symbols = ["H", "H"] + + geometry = np.array( + [[-0.676411907, 0.000000000, 0.000000000], [0.676411907, 0.000000000, 0.000000000]], + requires_grad=False, + ) + + mol = qml.qchem.Molecule(symbols, geometry, basis_name="STO-3G") + + H, qubits = qml.qchem.molecular_hamiltonian( + symbols, + geometry, + basis="STO-3G", + ) + + singles, doubles = qml.qchem.excitations(mol.n_electrons, len(H.wires)) + + excitations = singles + doubles + num_params = len(singles + doubles) + params = np.zeros(num_params, requires_grad=True) + + hf_state = qml.qchem.hf_state(mol.n_electrons, qubits) + + # Choose different batching supports here + dev = qml.device(device_name, wires=qubits) + dev_comp = qml.device("default.qubit", wires=qubits) + + @qml.qnode(dev, diff_method=diff_approach) + def circuit(params, excitations): + qml.BasisState(hf_state, wires=range(qubits)) + for i, excitation in enumerate(excitations): + if len(excitation) == 4: + qml.DoubleExcitation(params[i], wires=excitation) + else: + qml.SingleExcitation(params[i], wires=excitation) + return qml.expval(H) + + @qml.qnode(dev_comp, diff_method=diff_approach) + def circuit_compare(params, excitations): + qml.BasisState(hf_state, wires=range(qubits)) + + for i, excitation in enumerate(excitations): + if len(excitation) == 4: + qml.DoubleExcitation(params[i], wires=excitation) + else: + qml.SingleExcitation(params[i], wires=excitation) + return qml.expval(H) + + jac_func = qml.jacobian(circuit) + jac_func_comp = qml.jacobian(circuit_compare) + + params = qml.numpy.array([0.0] * len(doubles), requires_grad=True) + jacs = jac_func(params, excitations=doubles) + jacs_comp = jac_func_comp(params, excitations=doubles) + + assert np.allclose(jacs, jacs_comp) diff --git a/tests/lightning_tensor/test_lightning_tensor.py b/tests/lightning_tensor/test_lightning_tensor.py index 4bb2c2d7fd..5c82babdfb 100644 --- a/tests/lightning_tensor/test_lightning_tensor.py +++ b/tests/lightning_tensor/test_lightning_tensor.py @@ -15,35 +15,31 @@ Unit tests for the LightningTensor class. """ - import numpy as np import pennylane as qml import pytest -from conftest import LightningDevice # tested device +from conftest import LightningDevice, device_name # tested device from pennylane.wires import Wires -from pennylane_lightning.lightning_tensor import LightningTensor - -if not LightningDevice._new_API: - pytest.skip("Exclusive tests for new API. Skipping.", allow_module_level=True) +if device_name != "lightning.tensor": + pytest.skip("Skipping tests for the LightningTensor class.", allow_module_level=True) +else: + from pennylane_lightning.lightning_tensor import LightningTensor -if LightningDevice._CPP_BINARY_AVAILABLE: +if not LightningDevice._CPP_BINARY_AVAILABLE: # pylint: disable=protected-access pytest.skip("Device doesn't have C++ support yet.", allow_module_level=True) -@pytest.mark.parametrize("num_wires", [None, 4]) +@pytest.mark.parametrize("num_wires", [3, 4, 5]) @pytest.mark.parametrize("c_dtype", [np.complex64, np.complex128]) def test_device_name_and_init(num_wires, c_dtype): """Test the class initialization and returned properties.""" wires = Wires(range(num_wires)) if num_wires else None - dev = LightningTensor(wires=wires, c_dtype=c_dtype) + dev = LightningTensor(wires=wires, max_bond_dim=10, c_dtype=c_dtype) assert dev.name == "lightning.tensor" assert dev.c_dtype == c_dtype assert dev.wires == wires - if num_wires is None: - assert dev.num_wires == 0 - else: - assert dev.num_wires == num_wires + assert dev.num_wires == num_wires def test_device_available_as_plugin(): @@ -56,7 +52,7 @@ def test_device_available_as_plugin(): def test_invalid_backend(backend): """Test an invalid backend.""" with pytest.raises(ValueError, match=f"Unsupported backend: {backend}"): - LightningTensor(backend=backend) + LightningTensor(wires=1, backend=backend) @pytest.mark.parametrize("method", ["fake_method"]) @@ -66,27 +62,33 @@ def test_invalid_method(method): LightningTensor(method=method) -def test_invalid_keyword_arg(): - """Test an invalid keyword argument.""" - with pytest.raises(TypeError): - LightningTensor(fake_arg=None) +def test_invalid_bonddims(): + """Test that an error is raised if bond dimensions are less than 1.""" + with pytest.raises(ValueError): + LightningTensor(wires=5, max_bond_dim=0) + + +def test_invalid_wires_none(): + """Test that an error is raised if wires are none.""" + with pytest.raises(ValueError): + LightningTensor(wires=None) -def test_invalid_shots(): - """Test that an error is raised if finite number of shots are requestd.""" +def test_invalid_cutoff_mode(): + """Test that an error is raised if an invalid cutoff mode is provided.""" with pytest.raises(ValueError): - LightningTensor(shots=5) + LightningTensor(wires=2, cutoff_mode="invalid_mode") def test_support_derivatives(): """Test that the device does not support derivatives yet.""" - dev = LightningTensor() + dev = LightningTensor(wires=2) assert not dev.supports_derivatives() def test_compute_derivatives(): """Test that an error is raised if the `compute_derivatives` method is called.""" - dev = LightningTensor() + dev = LightningTensor(wires=2) with pytest.raises( NotImplementedError, match="The computation of derivatives has yet to be implemented for the lightning.tensor device.", @@ -96,7 +98,7 @@ def test_compute_derivatives(): def test_execute_and_compute_derivatives(): """Test that an error is raised if `execute_and_compute_derivative` method is called.""" - dev = LightningTensor() + dev = LightningTensor(wires=2) with pytest.raises( NotImplementedError, match="The computation of derivatives has yet to be implemented for the lightning.tensor device.", @@ -106,13 +108,13 @@ def test_execute_and_compute_derivatives(): def test_supports_vjp(): """Test that the device does not support VJP yet.""" - dev = LightningTensor() + dev = LightningTensor(wires=2) assert not dev.supports_vjp() def test_compute_vjp(): """Test that an error is raised if `compute_vjp` method is called.""" - dev = LightningTensor() + dev = LightningTensor(wires=2) with pytest.raises( NotImplementedError, match="The computation of vector-Jacobian product has yet to be implemented for the lightning.tensor device.", @@ -122,7 +124,7 @@ def test_compute_vjp(): def test_execute_and_compute_vjp(): """Test that an error is raised if `execute_and_compute_vjp` method is called.""" - dev = LightningTensor() + dev = LightningTensor(wires=2) with pytest.raises( NotImplementedError, match="The computation of vector-Jacobian product has yet to be implemented for the lightning.tensor device.", diff --git a/tests/lightning_tensor/test_measurements_class.py b/tests/lightning_tensor/test_measurements_class.py new file mode 100644 index 0000000000..6d9574aa83 --- /dev/null +++ b/tests/lightning_tensor/test_measurements_class.py @@ -0,0 +1,79 @@ +# Copyright 2024 Xanadu Quantum Technologies Inc. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Unit tests for measurements class. +""" +import numpy as np +import pennylane as qml +import pytest +from conftest import LightningDevice, device_name # tested device + +if device_name != "lightning.tensor": + pytest.skip( + "Skipping tests for the LightningTensorMeasurements class.", allow_module_level=True + ) + +from pennylane_lightning.lightning_tensor._measurements import LightningTensorMeasurements +from pennylane_lightning.lightning_tensor._tensornet import LightningTensorNet + +if not LightningDevice._CPP_BINARY_AVAILABLE: # pylint: disable=protected-access + pytest.skip("No binary module found. Skipping.", allow_module_level=True) + +THETA = np.linspace(0.11, 1, 3) +PHI = np.linspace(0.32, 1, 3) + + +# General LightningTensorNet fixture, for any number of wires. +@pytest.fixture( + params=[np.complex64, np.complex128], +) +def lightning_tn(request): + """Fixture for creating a LightningTensorNet object.""" + return LightningTensorNet(num_wires=5, max_bond_dim=128, c_dtype=request.param) + + +class TestMeasurementFunction: + """Tests for the measurement method.""" + + def test_initialization(self, lightning_tn): + """Tests for the initialization of the LightningTensorMeasurements class.""" + tensornetwork = lightning_tn + m = LightningTensorMeasurements(tensornetwork) + + assert m.dtype == tensornetwork.dtype + + def test_not_implemented_state_measurements(self, lightning_tn): + """Test than a NotImplementedError is raised if the measurement is not a state measurement.""" + + tensornetwork = lightning_tn + m = LightningTensorMeasurements(tensornetwork) + + mp = qml.counts(wires=(0, 1)) + with pytest.raises(NotImplementedError): + m.get_measurement_function(mp) + + def test_not_measure_tensor_network(self, lightning_tn): + """Test than a NotImplementedError is raised if the measurement is not a state measurement.""" + + tensornetwork = lightning_tn + m = LightningTensorMeasurements(tensornetwork) + + tape = qml.tape.QuantumScript( + [qml.RX(0.1, wires=0), qml.Hadamard(1), qml.PauliZ(1)], + [qml.expval(qml.prod(qml.PauliZ(0), qml.PauliX(1)))], + shots=1000, + ) + + with pytest.raises(NotImplementedError): + m.measure_tensor_network(tape) diff --git a/tests/lightning_tensor/test_tensornet_class.py b/tests/lightning_tensor/test_tensornet_class.py new file mode 100644 index 0000000000..9e2113fb2d --- /dev/null +++ b/tests/lightning_tensor/test_tensornet_class.py @@ -0,0 +1,88 @@ +# Copyright 2018-2024 Xanadu Quantum Technologies Inc. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Unit tests for the tensornet functions. +""" + +import math + +import numpy as np +import pennylane as qml +import pytest +from conftest import LightningDevice, device_name # tested device +from pennylane import DeviceError +from pennylane.wires import Wires + +if device_name != "lightning.tensor": + pytest.skip("Skipping tests for the tensornet class.", allow_module_level=True) +else: + from pennylane_lightning.lightning_tensor._tensornet import LightningTensorNet + +if not LightningDevice._CPP_BINARY_AVAILABLE: # pylint: disable=protected-access + pytest.skip("No binary module found. Skipping.", allow_module_level=True) + + +@pytest.mark.parametrize("num_wires", range(1, 4)) +@pytest.mark.parametrize("bondDims", [1, 2, 3, 4]) +@pytest.mark.parametrize("dtype", [np.complex64, np.complex128]) +@pytest.mark.parametrize("device_name", ["lightning.tensor"]) +def test_device_name_and_init(num_wires, bondDims, dtype, device_name): + """Test the class initialization and returned properties.""" + tensornet = LightningTensorNet(num_wires, bondDims, c_dtype=dtype, device_name=device_name) + assert tensornet.dtype == dtype + assert tensornet.device_name == device_name + assert tensornet.num_wires == num_wires + + +def test_wrong_device_name(): + """Test an invalid device name""" + with pytest.raises(qml.DeviceError, match="The device name"): + LightningTensorNet(3, 5, device_name="thunder.tensor") + + +def test_errors_basis_state(): + """Test that errors are raised when applying a BasisState operation.""" + with pytest.raises(ValueError, match="BasisState parameter must consist of 0 or 1 integers."): + tensornet = LightningTensorNet(3, 5) + tensornet.apply_operations([qml.BasisState(np.array([-0.2, 4.2]), wires=[0, 1])]) + with pytest.raises(ValueError, match="BasisState parameter and wires must be of equal length."): + tensornet = LightningTensorNet(3, 5) + tensornet.apply_operations([qml.BasisState(np.array([0, 1]), wires=[0])]) + + +@pytest.mark.parametrize( + "operation,par", + [ + (qml.StatePrep, [0, 0, 1, 0]), + (qml.StatePrep, [0, 0, 0, 1]), + ( + qml.StatePrep, + [1 / math.sqrt(3), 0, 1 / math.sqrt(3), 1 / math.sqrt(3)], + ), + ( + qml.StatePrep, + [1 / math.sqrt(3), 0, -1 / math.sqrt(3), 1 / math.sqrt(3)], + ), + ], +) +def test_errors_apply_operation_state_preparation(operation, par): + """Test that errors are raised when applying a StatePreparation operation.""" + wires = 2 + bondDims = 5 + tensornet = LightningTensorNet(wires, bondDims) + + with pytest.raises( + DeviceError, match="lightning.tensor does not support initialization with a state vector." + ): + tensornet.apply_operations([operation(np.array(par), Wires(range(wires)))]) diff --git a/tests/new_api/test_device.py b/tests/new_api/test_device.py index eff47472b1..7ecb6fb684 100644 --- a/tests/new_api/test_device.py +++ b/tests/new_api/test_device.py @@ -19,27 +19,34 @@ import numpy as np import pennylane as qml import pytest -from conftest import PHI, THETA, VARPHI, LightningDevice +from conftest import PHI, THETA, VARPHI, LightningDevice, device_name from pennylane.devices import DefaultExecutionConfig, DefaultQubit, ExecutionConfig, MCMConfig from pennylane.devices.default_qubit import adjoint_ops from pennylane.tape import QuantumScript -from pennylane_lightning.lightning_qubit.lightning_qubit import ( - _add_adjoint_transforms, - _supports_adjoint, - accepted_observables, - adjoint_measurements, - adjoint_observables, - decompose, - mid_circuit_measurements, - no_sampling, - stopping_condition, - stopping_condition_shots, - validate_adjoint_trainable_params, - validate_device_wires, - validate_measurements, - validate_observables, -) +if device_name == "lightning.qubit": + from pennylane_lightning.lightning_qubit.lightning_qubit import ( + _add_adjoint_transforms, + _supports_adjoint, + accepted_observables, + adjoint_measurements, + adjoint_observables, + decompose, + mid_circuit_measurements, + no_sampling, + stopping_condition, + stopping_condition_shots, + validate_adjoint_trainable_params, + validate_device_wires, + validate_measurements, + validate_observables, + ) + +if device_name == "lightning.tensor": + from pennylane_lightning.lightning_tensor.lightning_tensor import ( + accepted_observables, + stopping_condition, + ) if not LightningDevice._new_API: pytest.skip("Exclusive tests for new device API. Skipping.", allow_module_level=True) @@ -75,10 +82,14 @@ def test_accepted_observables(self): is supported by the device.""" valid_obs = qml.Projector([0], 0) invalid_obs = self.DummyOperator(0) - - assert accepted_observables(valid_obs) is True + result = True if device_name != "lightning.tensor" else False + assert accepted_observables(valid_obs) is result assert accepted_observables(invalid_obs) is False + @pytest.mark.skipif( + device_name == "lightning.tensor", + reason="lightning.tensor device does not support adjoint_observables", + ) @pytest.mark.parametrize( "obs, expected", [ @@ -100,6 +111,10 @@ def test_adjoint_observables(self, obs, expected): a given observable""" assert adjoint_observables(obs) == expected + @pytest.mark.skipif( + device_name == "lightning.tensor", + reason="lightning.tensor device does not support adjoint", + ) def test_add_adjoint_transforms(self): """Test that the correct transforms are added to the program by _add_adjoint_transforms""" expected_program = qml.transforms.core.TransformProgram() @@ -126,6 +141,10 @@ def test_add_adjoint_transforms(self): _add_adjoint_transforms(actual_program) assert actual_program == expected_program + @pytest.mark.skipif( + device_name == "lightning.tensor", + reason="lightning.tensor device does not support adjoint", + ) @pytest.mark.parametrize( "circuit, expected", [ @@ -141,6 +160,10 @@ def test_supports_adjoint(self, circuit, expected): assert _supports_adjoint(circuit) == expected +@pytest.mark.skipif( + device_name == "lightning.tensor", + reason="lightning.tensor does not support shots or mcmc", +) class TestInitialization: """Unit tests for device initialization""" @@ -164,6 +187,10 @@ def test_invalid_kernel_name(self): _ = LightningDevice(wires=2, shots=1000, mcmc=True, kernel_name="bleh") +@pytest.mark.skipif( + device_name == "lightning.tensor", + reason="lightning.tensor does not support adjoint_observables", +) class TestExecution: """Unit tests for executing quantum tapes on a device""" @@ -482,6 +509,10 @@ def test_probs_different_wire_orders(self, wires, wire_order): assert qml.math.allclose(res3, np.array([0.5, 0.0, 0.5, 0.0])) +@pytest.mark.skipif( + device_name == "lightning.tensor", + reason="lightning.tensor does not support derivatives", +) @pytest.mark.parametrize("batch_obs", [True, False]) class TestDerivatives: """Unit tests for calculating derivatives with a device""" @@ -818,6 +849,10 @@ def test_derivatives_tape_batch(self, phi, execute_and_derivatives, batch_obs): assert np.allclose(jacs[1], expected_jac[1]) +@pytest.mark.skipif( + device_name == "lightning.tensor", + reason="lightning.tensor does not support vjp", +) @pytest.mark.parametrize("batch_obs", [True, False]) class TestVJP: """Unit tests for VJP computation with the new device API.""" diff --git a/tests/new_api/test_expval.py b/tests/new_api/test_expval.py index abfcc1fd33..22e15dd261 100644 --- a/tests/new_api/test_expval.py +++ b/tests/new_api/test_expval.py @@ -18,7 +18,7 @@ import numpy as np import pennylane as qml import pytest -from conftest import PHI, THETA, VARPHI, LightningDevice +from conftest import PHI, THETA, VARPHI, LightningDevice, device_name from pennylane.devices import DefaultQubit if not LightningDevice._new_API: @@ -200,6 +200,10 @@ def test_hamiltonian_expectation(self, theta, phi, tol, dev): assert np.allclose(calculated_val, reference_val, tol) + @pytest.mark.skipif( + device_name == "lightning.tensor", + reason="lightning.tensor does not support sparse Hamiltonians", + ) def test_sparse_hamiltonian_expectation(self, theta, phi, tol, dev): """Tests a Hamiltonian.""" diff --git a/tests/new_api/test_var.py b/tests/new_api/test_var.py index eb35d40e93..8feb620c82 100644 --- a/tests/new_api/test_var.py +++ b/tests/new_api/test_var.py @@ -19,9 +19,12 @@ # pylint: disable=too-many-arguments, redefined-outer-name import pytest -from conftest import PHI, THETA, VARPHI, LightningDevice +from conftest import PHI, THETA, VARPHI, LightningDevice, device_name from pennylane.tape import QuantumScript +if device_name == "lightning.tensor": + pytest.skip("lightning.tensor does not support qml.var()", allow_module_level=True) + if not LightningDevice._new_API: pytest.skip("Exclusive tests for new API. Skipping.", allow_module_level=True) diff --git a/tests/test_adjoint_jacobian.py b/tests/test_adjoint_jacobian.py index 5487c0c431..e7dc7e252a 100644 --- a/tests/test_adjoint_jacobian.py +++ b/tests/test_adjoint_jacobian.py @@ -36,6 +36,9 @@ if not ld._CPP_BINARY_AVAILABLE: pytest.skip("No binary module found. Skipping.", allow_module_level=True) +if device_name == "lightning.tensor": + pytest.skip("lightning.tensor doesn't support adjoint jacobian.", allow_module_level=True) + kokkos_args = [None] if device_name == "lightning.kokkos": from pennylane_lightning.lightning_kokkos_ops import InitializationSettings diff --git a/tests/test_apply.py b/tests/test_apply.py index 47d8d01c5d..9b89c60f48 100644 --- a/tests/test_apply.py +++ b/tests/test_apply.py @@ -30,7 +30,10 @@ pytest.skip("No binary module found. Skipping.", allow_module_level=True) -@pytest.mark.skipif(ld._new_API, reason="Old API required") +@pytest.mark.skipif( + ld._new_API or device_name == "lightning.tensor", + reason="Old API required, lightning.tensor does not support qml.state().", +) class TestApply: """Tests that operations of certain operations are applied correctly or that the proper errors are raised. @@ -524,6 +527,10 @@ def test_apply_state_vector_lightning_handle(self, qubit_device, tol): class TestExpval: """Tests that expectation values are properly calculated or that the proper errors are raised.""" + @pytest.mark.skipif( + device_name == "lightning.tensor", + reason="lightning.tensor does not support qml.QubitStateVector & qml.StatePrep", + ) @pytest.mark.parametrize( "operation,input,expected_output", [ @@ -562,6 +569,10 @@ def test_expval_single_wire_no_parameters( assert np.isclose(res, expected_output, atol=tol, rtol=0) + @pytest.mark.skipif( + device_name == "lightning.tensor", + reason="lightning.tensor does not support shot measurement", + ) def test_expval_estimate(self): """Test that the expectation value is not analytically calculated""" dev = qml.device(device_name, wires=1, shots=3) @@ -580,6 +591,10 @@ def circuit(): class TestVar: """Tests that variances are properly calculated.""" + @pytest.mark.skipif( + device_name == "lightning.tensor", + reason="lightning.tensor does not support qml.QubitStateVector & qml.StatePrep", + ) @pytest.mark.parametrize( "operation,input,expected_output", [ @@ -618,6 +633,10 @@ def test_var_single_wire_no_parameters( assert np.isclose(res, expected_output, atol=tol, rtol=0) + @pytest.mark.skipif( + device_name == "lightning.tensor", + reason="lightning.tensor does not support shot measurement", + ) def test_var_estimate(self): """Test that the variance is not analytically calculated""" @@ -634,6 +653,10 @@ def circuit(): assert var != 1.0 +@pytest.mark.skipif( + device_name == "lightning.tensor", + reason="lightning.tensor device does not support qml.samples()", +) class TestSample: """Tests that samples are properly calculated.""" @@ -761,12 +784,18 @@ def circuit(): qnode = qml.QNode(circuit, dev, diff_method="best") assert isinstance(qnode.device, ld) + @pytest.mark.xfail( + device_name == "lightning.tensor", reason="lightning.tensor raises different errors" + ) def test_args(self): """Test that the plugin requires correct arguments""" with pytest.raises(TypeError, match="missing 1 required positional argument: 'wires'"): qml.device(device_name) + @pytest.mark.skipif( + device_name == "lightning.tensor", reason="lightning.tensor requires num_wires > 1" + ) def test_qubit_circuit(self, qubit_device, tol): """Test that the default qubit plugin provides correct result for a simple circuit""" @@ -782,6 +811,9 @@ def circuit(x): assert np.isclose(circuit(p), expected, atol=tol, rtol=0) + @pytest.mark.skipif( + device_name == "lightning.tensor", reason="lightning.tensor requires num_wires > 1" + ) def test_qubit_identity(self, qubit_device, tol): """Test that the default qubit plugin provides correct result for the Identity expectation""" @@ -796,6 +828,10 @@ def circuit(x): assert np.isclose(circuit(p), 1, atol=tol, rtol=0) + @pytest.mark.skipif( + device_name == "lightning.tensor", + reason="lightning.tensor does not support shot measurements", + ) def test_nonzero_shots(self, tol_stochastic): """Test that the default qubit plugin provides correct result for high shot number""" @@ -817,6 +853,10 @@ def circuit(x): assert np.isclose(np.mean(runs), -np.sin(p), atol=tol_stochastic, rtol=0) # This test is ran against the state |0> with one Z expval + @pytest.mark.skipif( + device_name == "lightning.tensor", + reason="lightning.tensor does not support shot measurements", + ) @pytest.mark.parametrize( "name,expected_output", [ @@ -844,6 +884,10 @@ def circuit(): assert np.isclose(circuit(), expected_output, atol=tol, rtol=0) # This test is ran against the state |Phi+> with two Z expvals + @pytest.mark.skipif( + device_name == "lightning.tensor", + reason="lightning.tensor does not support qml.Stateprep()", + ) @pytest.mark.parametrize( "name,expected_output", [ @@ -902,9 +946,33 @@ def circuit(): ("BasisState", [0, 0], [1, 1]), ("BasisState", [1, 0], [-1, 1]), ("BasisState", [0, 1], [1, -1]), - ("QubitStateVector", [1, 0, 0, 0], [1, 1]), - ("QubitStateVector", [0, 0, 1, 0], [-1, 1]), - ("QubitStateVector", [0, 1, 0, 0], [1, -1]), + pytest.param( + "QubitStateVector", + [1, 0, 0, 0], + [1, 1], + marks=pytest.mark.skipif( + device_name == "lightning.tensor", + reason="lightning.tensor does not support qml.QubitStateVector()", + ), + ), + pytest.param( + "QubitStateVector", + [0, 0, 1, 0], + [-1, 1], + marks=pytest.mark.skipif( + device_name == "lightning.tensor", + reason="lightning.tensor does not support qml.QubitStateVector()", + ), + ), + pytest.param( + "QubitStateVector", + [0, 1, 0, 0], + [1, -1], + marks=pytest.mark.skipif( + device_name == "lightning.tensor", + reason="lightning.tensor does not support qml.QubitStateVector()", + ), + ), ], ) def test_supported_state_preparation(self, qubit_device, tol, name, par, expected_output): @@ -927,8 +995,26 @@ def circuit(): "name,par,wires,expected_output", [ ("BasisState", [1, 1], [0, 1], [-1, -1]), - ("BasisState", [1], [0], [-1, 1]), - ("BasisState", [1], [1], [1, -1]), + pytest.param( + "BasisState", + [1], + [0], + [-1, 1], + marks=pytest.mark.skipif( + device_name == "lightning.tensor", + reason="lightning.tensor requires a vector of length num_wires for qml.BasisState()", + ), + ), + pytest.param( + "BasisState", + [1], + [1], + [1, -1], + marks=pytest.mark.skipif( + device_name == "lightning.tensor", + reason="lightning.tensor requires a vector of length num_wires for qml.BasisState()", + ), + ), ], ) def test_basis_state_2_qubit_subset(self, qubit_device, tol, name, par, wires, expected_output): @@ -944,6 +1030,10 @@ def circuit(): assert np.allclose(circuit(), expected_output, atol=tol, rtol=0) # This test is run with two expvals + @pytest.mark.skipif( + device_name == "lightning.tensor", + reason="lightning.tensor does not support QubitStateVector", + ) @pytest.mark.parametrize( "name,par,wires,expected_output", [ @@ -971,6 +1061,10 @@ def circuit(): assert np.allclose(circuit(), expected_output, atol=tol, rtol=0) # This test is run with three expvals + @pytest.mark.skipif( + device_name == "lightning.tensor", + reason="lightning.tensor does not support qml.QubitStateVector()", + ) @pytest.mark.parametrize( "name,par,wires,expected_output", [ @@ -1005,6 +1099,9 @@ def circuit(): assert np.allclose(circuit(), expected_output, atol=tol, rtol=0) # This test is ran on the state |0> with one Z expvals + @pytest.mark.skipif( + device_name == "lightning.tensor", reason="lightning.tensor requires num_wires > 1" + ) @pytest.mark.parametrize( "name,par,expected_output", [ @@ -1042,6 +1139,10 @@ def circuit(): assert np.isclose(circuit(), expected_output, atol=tol, rtol=0) # This test is ran against the state 1/2|00>+sqrt(3)/2|11> with two Z expvals + @pytest.mark.skipif( + device_name == "lightning.tensor", + reason="lightning.tensor does not support qml.QubitStateVector() and qml.StatePrep()", + ) @pytest.mark.parametrize( "name,par,expected_output", [ @@ -1085,6 +1186,10 @@ def circuit(): assert np.allclose(circuit(), expected_output, atol=tol, rtol=0) + @pytest.mark.skipif( + device_name == "lightning.tensor", + reason="lightning.tensor does not support qml.QubitStateVector() and qml.StatePrep()", + ) @pytest.mark.parametrize( "name,state,expected_output", [ @@ -1120,6 +1225,10 @@ def circuit(): assert np.isclose(circuit(), expected_output, atol=tol, rtol=0) + @pytest.mark.skipif( + device_name == "lightning.tensor", + reason="lightning.tensor does not support qml.QubitStateVector() and qml.StatePrep()", + ) @pytest.mark.parametrize( "name,state,expected_output,par", [ @@ -1146,6 +1255,10 @@ def circuit(): assert np.isclose(circuit(), expected_output, atol=tol, rtol=0) + @pytest.mark.skipif( + device_name == "lightning.tensor", + reason="lightning.tensor does not support shot measurements", + ) def test_multi_samples_return_correlated_results(self, qubit_device): """Tests if the samples returned by the sample function have the correct dimensions @@ -1167,6 +1280,10 @@ def circuit(): assert np.array_equal(outcomes[0], outcomes[1]) + @pytest.mark.skipif( + device_name == "lightning.tensor", + reason="lightning.tensor does not support shot measurements.", + ) @pytest.mark.parametrize("num_wires", [3, 4, 5, 6, 7, 8]) def test_multi_samples_return_correlated_results_more_wires_than_size_of_observable( self, num_wires @@ -1192,6 +1309,10 @@ def circuit(): assert np.array_equal(outcomes[0], outcomes[1]) + @pytest.mark.skipif( + device_name == "lightning.tensor", + reason="lightning.tensor does not support shot measurements", + ) def test_snapshot_is_ignored_without_shot(self): """Tests if the Snapshot operator is ignored correctly""" dev = qml.device(device_name, wires=4) @@ -1208,6 +1329,10 @@ def circuit(): assert np.allclose(outcomes, [0.0]) + @pytest.mark.skipif( + device_name == "lightning.tensor", + reason="lightning.tensor does not support shot measurements", + ) def test_snapshot_is_ignored_with_shots(self): """Tests if the Snapshot operator is ignored correctly""" dev = qml.device(device_name, wires=4, shots=1000) @@ -1229,6 +1354,10 @@ def circuit(): assert np.array_equal(outcomes[0], outcomes[1]) + @pytest.mark.skipif( + device_name == "lightning.tensor", + reason="lightning.tensor does not support qml.prob()", + ) def test_apply_qpe(self, qubit_device, tol): """Test the application of qml.QuantumPhaseEstimation""" dev = qubit_device(wires=2) @@ -1264,6 +1393,10 @@ def circuit(): # Check the BlockEncode PennyLane page for details: # https://docs.pennylane.ai/en/stable/code/api/pennylane.BlockEncode.html + @pytest.mark.skipif( + device_name == "lightning.tensor", + reason="lightning.tensor does not support qml.state()", + ) @pytest.mark.parametrize( "op, op_wires", [ @@ -1302,6 +1435,10 @@ def circuit1(A): class TestApplyLightningMethod: """Unit tests for the apply_lightning method.""" + @pytest.mark.skipif( + device_name == "lightning.tensor", + reason="lightning.tensor does not support _apply_state_vector", + ) @pytest.mark.skipif(ld._new_API, reason="Old API required") def test_apply_identity_skipped(self, mocker, tol): """Test identity operation does not perform additional computations.""" @@ -1332,6 +1469,10 @@ def compute_matrix(*params, **hyperparams): with pytest.raises(ValueError, match="Unsupported operation"): dev.apply_lightning([EmptyGate(0)]) + @pytest.mark.skipif( + device_name == "lightning.tensor", + reason="lightning.tensor does not support qml.state()", + ) @pytest.mark.parametrize( "ops0", [ @@ -1375,6 +1516,10 @@ def circuit(): assert np.allclose(results, expected) +@pytest.mark.skipif( + device_name == "lightning.tensor", + reason="lightning.tensor does not support qml.StatePrep()", +) @pytest.mark.parametrize( "op", [ diff --git a/tests/test_arrays.py b/tests/test_arrays.py index 1eef3f8364..10068e7dbd 100644 --- a/tests/test_arrays.py +++ b/tests/test_arrays.py @@ -19,7 +19,7 @@ from conftest import LightningDevice as ld from conftest import device_name, lightning_ops -if device_name == "lightning_gpu" or not ld._CPP_BINARY_AVAILABLE: +if device_name in ("lightning_gpu", "lightning.tensor") or not ld._CPP_BINARY_AVAILABLE: pytest.skip("No binary module found. Skipping.", allow_module_level=True) diff --git a/tests/test_comparison.py b/tests/test_comparison.py index 631e5bfa1e..6cacca8c8a 100644 --- a/tests/test_comparison.py +++ b/tests/test_comparison.py @@ -59,6 +59,10 @@ def one_qubit_block(wires=None): qml.PauliX(wires=wires) +@pytest.mark.skipif( + device_name == "lightning.tensor", + reason="lightning.tensor device dose not support state return", +) class TestComparison: """A test that compares the output states of the lightning device and ``default.qubit`` for a variety of different circuits. This uses ``default.qubit`` as a reference.""" diff --git a/tests/test_device.py b/tests/test_device.py index 3c18ec7d0d..6ff1007c43 100644 --- a/tests/test_device.py +++ b/tests/test_device.py @@ -65,6 +65,10 @@ def test_create_device_with_unsupported_mpi_buf_size(): pass +@pytest.mark.skipif( + device_name == "lightning.tensor", + reason="lightning.tensor doesn't support 0 wires.", +) def test_device_init_zero_qubit(): """Test the device initialization with zero-qubit.""" diff --git a/tests/test_execute.py b/tests/test_execute.py index 505c705543..02a6bfa1f6 100644 --- a/tests/test_execute.py +++ b/tests/test_execute.py @@ -25,6 +25,10 @@ pytest.skip("No binary module found. Skipping.", allow_module_level=True) +@pytest.mark.skipif( + device_name == "lightning.tensor", + reason="lightning.tensor does not support gates with more than 2 wires, preprocess is required for the following tests", +) @pytest.mark.usefixtures("use_legacy_and_new_opmath") @pytest.mark.parametrize("diff_method", ("param_shift", "finite_diff")) class TestQChem: diff --git a/tests/test_expval.py b/tests/test_expval.py index 3340ab78b4..3063b91c84 100644 --- a/tests/test_expval.py +++ b/tests/test_expval.py @@ -21,6 +21,7 @@ import pytest from conftest import PHI, THETA, VARPHI from conftest import LightningDevice as ld +from conftest import device_name if not ld._CPP_BINARY_AVAILABLE: pytest.skip("No binary module found. Skipping.", allow_module_level=True) @@ -134,6 +135,10 @@ def test_hadamard_expectation(self, theta, phi, qubit_device, tol): ) / np.sqrt(2) assert np.allclose(res, expected, tol) + @pytest.mark.skipif( + device_name == "lightning.tensor", + reason="lightning.tensor does not support qml.Projector()", + ) def test_projector_expectation(self, theta, phi, qubit_device, tol): """Test that Projector variance value is correct""" n_qubits = 2 @@ -158,7 +163,7 @@ def circuit(): circ_def = qml.QNode(circuit, dev_def) assert np.allclose(circ(), circ_def(), tol) - @pytest.mark.parametrize("n_wires", range(1, 7)) + @pytest.mark.parametrize("n_wires", range(1, 7 if device_name != "lightning.tensor" else 5)) def test_hermitian_expectation(self, n_wires, theta, phi, qubit_device, tol): """Test that Hermitian expectation value is correct""" n_qubits = 7 @@ -178,7 +183,8 @@ def test_hermitian_expectation(self, n_wires, theta, phi, qubit_device, tol): obs = qml.Hermitian(U, wires=perm) def circuit(): - qml.StatePrep(init_state, wires=range(n_qubits)) + if device_name != "lightning.tensor": + qml.StatePrep(init_state, wires=range(n_qubits)) qml.RY(theta, wires=[0]) qml.RY(phi, wires=[1]) qml.CNOT(wires=[0, 1]) @@ -189,7 +195,19 @@ def circuit(): assert np.allclose(circ(), circ_def(), tol) -@pytest.mark.parametrize("diff_method", ("parameter-shift", "adjoint")) +@pytest.mark.parametrize( + "diff_method", + [ + "parameter-shift", + pytest.param( + "adjoint", + marks=pytest.mark.skipif( + device_name == "lightning.tensor", + reason="lightning.tensor does not support the adjoint method", + ), + ), + ], +) class TestExpOperatorArithmetic: """Test integration of lightning with SProd, Prod, and Sum.""" diff --git a/tests/test_gates.py b/tests/test_gates.py index 4958c7ccc1..d4e99ebada 100644 --- a/tests/test_gates.py +++ b/tests/test_gates.py @@ -27,6 +27,12 @@ if not ld._CPP_BINARY_AVAILABLE: pytest.skip("No binary module found. Skipping.", allow_module_level=True) +if device_name == "lightning.tensor": + pytest.skip( + "lightning.tensor doesn't support qml.state() used across this module.", + allow_module_level=True, + ) + @pytest.fixture def op(op_name): diff --git a/tests/test_measurements.py b/tests/test_measurements.py index b71986f0b4..318223fcd8 100644 --- a/tests/test_measurements.py +++ b/tests/test_measurements.py @@ -50,6 +50,10 @@ def circuit(x): circuit(0.65) +@pytest.mark.skipif( + device_name == "lightning.tensor", + reason="lightning.tensor does not support qml.probs()", +) class TestProbs: """Test Probs in Lightning devices""" @@ -337,7 +341,9 @@ def circuit(): qml.RY(-0.2, wires=[1]) return qml.expval(ham) - assert np.allclose(circuit(), res, atol=tol, rtol=0) + assert np.allclose( + circuit(), res, atol=tol, rtol=0 if device_name != "lightning.tensor" else 2e-1 + ) def test_value(self, dev, tol): """Test that the expval interface works""" @@ -377,6 +383,10 @@ def circuit(): circuit() +@pytest.mark.skipif( + device_name == "lightning.tensor", + reason="lightning.tensor does not support var()", +) class TestVar: """Tests for the var function""" @@ -598,6 +608,10 @@ def circuit2(): assert np.allclose(circuit1(), circuit2(), atol=tol) +@pytest.mark.skipif( + device_name == "lightning.tensor", + reason="lightning.tensor does not support qml.sample()", +) class TestSample: """Tests that samples are properly calculated.""" @@ -648,6 +662,10 @@ def test_sample_values(self, qubit_device, tol): assert np.allclose(s1**2, 1, atol=tol, rtol=0) +@pytest.mark.skipif( + device_name == "lightning.tensor", + reason="lightning.tensor does not support qml.var()", +) class TestWiresInVar: """Test different Wires settings in Lightning's var.""" @@ -694,6 +712,10 @@ def circuit2(): assert np.allclose(circuit1(), circuit2(), atol=tol) +@pytest.mark.skipif( + device_name == "lightning.tensor", + reason="lightning.tensor does not support shots", +) @flaky(max_runs=5) @pytest.mark.parametrize("shots", [None, 10000, [10000, 11111]]) @pytest.mark.parametrize("measure_f", [qml.counts, qml.expval, qml.probs, qml.sample, qml.var]) diff --git a/tests/test_measurements_sparse.py b/tests/test_measurements_sparse.py index f01e6dd711..16c69903c4 100644 --- a/tests/test_measurements_sparse.py +++ b/tests/test_measurements_sparse.py @@ -25,6 +25,10 @@ pytest.skip("No binary module found. Skipping.", allow_module_level=True) +@pytest.mark.skipif( + device_name == "lightning.tensor", + reason="lightning.tensor does not support Sparse Hamiltonians", +) class TestSparseExpval: """Tests for the expval function""" diff --git a/tests/test_serialize.py b/tests/test_serialize.py index d7ce9281d2..465622e9c9 100644 --- a/tests/test_serialize.py +++ b/tests/test_serialize.py @@ -50,6 +50,18 @@ TensorProdObsC64, TensorProdObsC128, ) +elif device_name == "lightning.tensor": + from pennylane_lightning.lightning_tensor_ops import TensorNetC64, TensorNetC128 + from pennylane_lightning.lightning_tensor_ops.observables import ( + HamiltonianC64, + HamiltonianC128, + HermitianObsC64, + HermitianObsC128, + NamedObsC64, + NamedObsC128, + TensorProdObsC64, + TensorProdObsC128, + ) else: from pennylane_lightning.lightning_qubit_ops.observables import ( HamiltonianC64, @@ -81,7 +93,11 @@ def test_wrong_device_name(): (qml.Hadamard(0), NamedObsC128), (qml.Hermitian(np.eye(2), wires=0), HermitianObsC128), ( - qml.PauliZ(0) @ qml.Hadamard(1) @ (0.1 * (qml.PauliZ(2) + qml.PauliX(3))), + ( + qml.PauliZ(0) @ qml.Hadamard(1) @ (0.1 * (qml.PauliZ(2) + qml.PauliX(3))) + if device_name != "lightning.tensor" + else qml.PauliZ(0) @ qml.Hadamard(1) @ qml.PauliZ(2) @ qml.PauliX(3) + ), TensorProdObsC128, ), ( @@ -116,8 +132,14 @@ def test_wrong_device_name(): (qml.Hamiltonian([1], [qml.PauliZ(0)]), NamedObsC128), (qml.sum(qml.Hadamard(0), qml.PauliX(1)), HamiltonianC128), ( - qml.SparseHamiltonian(qml.Hamiltonian([1], [qml.PauliZ(0)]).sparse_matrix(), wires=[0]), - SparseHamiltonianC128, + ( + qml.SparseHamiltonian( + qml.Hamiltonian([1], [qml.PauliZ(0)]).sparse_matrix(), wires=[0] + ) + if device_name != "lightning.tensor" + else 0.5 * qml.PauliX(0) + ), + SparseHamiltonianC128 if device_name != "lightning.tensor" else HamiltonianC128, ), (2.5 * qml.PauliZ(0), HamiltonianC128), ], @@ -778,3 +800,33 @@ def test_global_phase(): D0 = check_global_phase_diagonal(par, wires, targets, controls, control_values) D1 = global_phase_diagonal(par, wires, controls, control_values) assert np.allclose(D0, D1) + + +@pytest.mark.skipif( + device_name != "lightning.tensor", reason="lightning.tensor does not support Sparse Hamiltonian" +) +@pytest.mark.parametrize( + "obs", + [qml.SparseHamiltonian(qml.Hamiltonian([1], [qml.PauliZ(0)]).sparse_matrix(), wires=[0])], +) +def test_unsupported_obs_returns_expected_type(obs): + """Tests that observables get serialized to the expected type, with and without wires map""" + serializer = QuantumScriptSerializer(device_name) + with pytest.raises( + NotImplementedError, + match="SparseHamiltonian is not supported on the lightning.tensor device.", + ): + serializer._ob(obs, dict(enumerate(obs.wires))) + + +@pytest.mark.skipif( + device_name != "lightning.tensor", reason="Only lightning.tensor requires the dtype check" +) +def test_tensornet_dtype(): + """Tests that the correct TensorNet type is used for the device""" + + serializer_c64 = QuantumScriptSerializer(device_name, use_csingle=True) + serializer_c128 = QuantumScriptSerializer(device_name, use_csingle=False) + + assert isinstance(serializer_c64.sv_type(3, 3), TensorNetC64) == True + assert isinstance(serializer_c128.sv_type(3, 3), TensorNetC128) == True diff --git a/tests/test_templates.py b/tests/test_templates.py index f133547282..69dc85f6f3 100644 --- a/tests/test_templates.py +++ b/tests/test_templates.py @@ -30,6 +30,10 @@ class TestGrover: """Test Grover's algorithm (multi-controlled gates, decomposition, etc.)""" + @pytest.mark.skipif( + device_name == "lightning.tensor", + reason="lightning.tensor does not support multi-controlled gates and probs()", + ) @pytest.mark.parametrize("n_qubits", range(4, 8)) def test_grover(self, n_qubits): np.random.seed(42) @@ -62,6 +66,10 @@ def circuit(omega): assert np.allclose(np.sum(prob), 1.0) assert prob[index] > 0.95 + @pytest.mark.skipif( + device_name == "lightning.tensor", + reason="lightning.tensor does not have full support of multi-controlled gates.", + ) @pytest.mark.skipif(not LightningDevice._new_API, reason="New API required.") @pytest.mark.parametrize("wires", [5, 10, 13, 15]) def test_preprocess_grover_operator_decomposition(self, wires): @@ -90,7 +98,7 @@ def test_angleembedding(self, n_qubits): def circuit(feature_vector): qml.AngleEmbedding(features=feature_vector, wires=range(n_qubits), rotation="Z") - return qml.state() + return qml.state() if device_name != "lightning.tensor" else qml.expval(qml.PauliZ(0)) X = np.random.rand(n_qubits) @@ -103,6 +111,10 @@ def circuit(feature_vector): class TestAmplitudeEmbedding: """Test the AmplitudeEmbedding algorithm.""" + @pytest.mark.skipif( + device_name == "lightning.tensor", + reason="lightning.tensor does not support QubitStateVector.", + ) @pytest.mark.parametrize("first_op", [False, True]) @pytest.mark.parametrize("n_qubits", range(2, 10, 2)) def test_amplitudeembedding(self, first_op, n_qubits): @@ -119,7 +131,7 @@ def circuit(f=None): if not first_op: qml.Hadamard(0) qml.AmplitudeEmbedding(features=f, wires=range(n_qubits)) - return qml.state() + return qml.state() if device_name != "lightning.tensor" else qml.expval(qml.PauliZ(0)) X = np.random.rand(2**n_qubits) X /= np.linalg.norm(X) @@ -139,7 +151,7 @@ def test_basisembedding(self, n_qubits): def circuit(feature_vector): qml.BasisEmbedding(features=feature_vector, wires=range(n_qubits)) - return qml.state() + return qml.state() if device_name != "lightning.tensor" else qml.expval(qml.PauliZ(0)) X = np.ones(n_qubits) @@ -160,7 +172,7 @@ def test_displacementembedding(self, n_qubits, template): def circuit(feature_vector): template(features=feature_vector, wires=range(n_qubits)) qml.QuadraticPhase(0.1, wires=1) - return qml.state() + return qml.state() if device_name != "lightning.tensor" else qml.expval(qml.PauliZ(0)) X = np.arange(1, n_qubits + 1) @@ -171,6 +183,9 @@ def circuit(feature_vector): class TestIQPEmbedding: """Test the IQPEmbedding algorithm.""" + @pytest.mark.skipif( + device_name == "lightning.tensor", reason="lightning.tensor does not support MultiRZ" + ) @pytest.mark.parametrize("n_qubits", range(2, 20, 2)) def test_iqpembedding(self, n_qubits): dev = qml.device(device_name, wires=n_qubits) @@ -191,6 +206,9 @@ def circuit(feature_vector): class TestQAOAEmbedding: """Test the QAOAEmbedding algorithm.""" + @pytest.mark.skipif( + device_name == "lightning.tensor", reason="lightning.tensor does not support MultiRZ" + ) @pytest.mark.parametrize("n_qubits", range(2, 20, 2)) def test_qaoaembedding(self, n_qubits): dev = qml.device(device_name, wires=n_qubits) @@ -219,7 +237,7 @@ def test_cvneuralnetlayers(self): def circuit(weights): qml.CVNeuralNetLayers(*weights, wires=[0, 1]) - return qml.state() + return qml.state() if device_name != "lightning.tensor" else qml.expval(qml.PauliZ(0)) shapes = qml.CVNeuralNetLayers.shape(n_layers=2, n_wires=n_qubits) weights = [np.random.random(shape) for shape in shapes] @@ -238,7 +256,7 @@ def test_randomlayers(self, n_qubits): def circuit(weights): qml.RandomLayers(weights=weights, wires=range(n_qubits)) - return qml.state() + return qml.state() if device_name != "lightning.tensor" else qml.expval(qml.PauliZ(0)) weights = np.array([[0.1, -2.1, 1.4]]) @@ -258,7 +276,7 @@ def test_stronglyentanglinglayers(self, n_qubits): def circuit(weights): qml.StronglyEntanglingLayers(weights=weights, wires=range(n_qubits)) - return qml.state() + return qml.state() if device_name != "lightning.tensor" else qml.expval(qml.PauliZ(0)) shape = qml.StronglyEntanglingLayers.shape(n_layers=2, n_wires=n_qubits) weights = np.random.random(size=shape) @@ -315,6 +333,10 @@ def circuit(weights): class TestMottonenStatePreparation: """Test the MottonenStatePreparation algorithm.""" + @pytest.mark.skipif( + device_name == "lightning.tensor", + reason="lightning.tensor does not support GlobalPhase and 2+ wires gates.", + ) @pytest.mark.parametrize("n_qubits", range(2, 6, 2)) def test_mottonenstatepreparation(self, n_qubits): dev = qml.device(device_name, wires=n_qubits) @@ -336,6 +358,10 @@ def circuit(state): class TestArbitraryStatePreparation: """Test the ArbitraryStatePreparation algorithm.""" + @pytest.mark.skipif( + device_name == "lightning.tensor", + reason="lightning.tensor does not support MultiRZ.", + ) @pytest.mark.parametrize("n_qubits", range(2, 6, 2)) def test_arbitrarystatepreparation(self, n_qubits): dev = qml.device(device_name, wires=n_qubits) @@ -356,6 +382,10 @@ def circuit(weights): class TestCosineWindow: """Test the CosineWindow algorithm.""" + @pytest.mark.skipif( + device_name == "lightning.tensor", + reason="lightning.tensor does not support 2+ wires gates that can't be decomposed into 1,2 wires gates.", + ) @pytest.mark.parametrize("n_qubits", range(2, 6, 2)) def test_cosinewindow(self, n_qubits): dev = qml.device(device_name, wires=n_qubits) @@ -389,7 +419,7 @@ def test_allsinglesdoubles(self): def circuit(weights, hf_state, singles, doubles): qml.templates.AllSinglesDoubles(weights, range(n_qubits), hf_state, singles, doubles) - return qml.state() + return qml.state() if device_name != "lightning.tensor" else qml.expval(qml.PauliZ(0)) weights = np.random.normal(0, np.pi, len(singles) + len(doubles)) res = qml.QNode(circuit, dev, diff_method=None)(weights, hf_state, singles, doubles) @@ -412,7 +442,7 @@ def circuit(unitary_matrix): wires=range(3), unitary_matrix=unitary_matrix, ) - return qml.state() + return qml.state() if device_name != "lightning.tensor" else qml.expval(qml.PauliZ(0)) unitary_matrix = np.array( [ @@ -445,7 +475,7 @@ def test_gatefabric(self): def circuit(weights): qml.GateFabric(weights, wires=[0, 1, 2, 3], init_state=ref_state, include_pi=True) - return qml.state() + return qml.state() if device_name != "lightning.tensor" else qml.expval(qml.PauliZ(0)) layers = 2 shape = qml.GateFabric.shape(n_layers=layers, n_wires=n_qubits) @@ -490,7 +520,7 @@ def test_uccsd(self): def circuit(weights): qml.UCCSD(weights, range(n_qubits), s_wires, d_wires, hf_state) - return qml.state() + return qml.state() if device_name != "lightning.tensor" else qml.expval(qml.PauliZ(0)) weights = np.random.random(len(singles) + len(doubles)) @@ -529,7 +559,7 @@ def test_kupccgsd(self): def circuit(weights): qml.kUpCCGSD(weights, range(n_qubits), k=1, delta_sz=0, init_state=hf_state) - return qml.state() + return qml.state() if device_name != "lightning.tensor" else qml.expval(qml.PauliZ(0)) # Get the shape of the weights for this template layers = 1 @@ -563,7 +593,7 @@ def test_particleconservingu1(self): # Define the cost function def circuit(params): ansatz(params) - return qml.state() + return qml.state() if device_name != "lightning.tensor" else qml.expval(qml.PauliZ(0)) layers = 2 shape = qml.ParticleConservingU1.shape(layers, n_qubits) @@ -596,7 +626,7 @@ def test_particleconservingu2(self): # Define the cost function def circuit(params): ansatz(params) - return qml.state() + return qml.state() if device_name != "lightning.tensor" else qml.expval(qml.PauliZ(0)) layers = 2 shape = qml.ParticleConservingU2.shape(layers, n_qubits) @@ -622,7 +652,9 @@ def test_approxtimeevolution(self, n_qubits): def circuit(time): qml.ApproxTimeEvolution(hamiltonian, time, 1) - return qml.state() + return ( + qml.state() if device_name != "lightning.tensor" else qml.expval(qml.PauliZ(0)) + ) # lightning.tensor does not support qml.state() res = qml.QNode(circuit, dev, diff_method=None)(1.3) ref = qml.QNode(circuit, dq, diff_method=None)(1.3) @@ -644,7 +676,9 @@ def test_qdrift(self, n_qubits): def circuit(time): qml.QDrift(hamiltonian, time=time, n=10, seed=10) - return qml.state() + return ( + qml.state() if device_name != "lightning.tensor" else qml.expval(qml.PauliZ(0)) + ) # lightning.tensor does not support qml.state() res = qml.QNode(circuit, dev, diff_method=None)(1.3) ref = qml.QNode(circuit, dq, diff_method=None)(1.3) @@ -666,7 +700,9 @@ def test_trotterproduct(self, n_qubits): def circuit(time): qml.TrotterProduct(hamiltonian, time=time, order=2) - return qml.state() + return ( + qml.state() if device_name != "lightning.tensor" else qml.expval(qml.PauliZ(0)) + ) # lightning.tensor does not support qml.state() res = qml.QNode(circuit, dev, diff_method=None)(1.3) ref = qml.QNode(circuit, dq, diff_method=None)(1.3) @@ -698,7 +734,11 @@ def circuit(): estimation_wires=estimation_wires, ) - return qml.probs(estimation_wires) + return ( + qml.probs(estimation_wires) + if device_name != "lightning.tensor" + else qml.expval(qml.PauliZ(0)) + ) # lightning.tensor does not support qml.probs() res = qml.QNode(circuit, dev, diff_method=None)() ref = qml.QNode(circuit, dq, diff_method=None)() @@ -717,7 +757,9 @@ def test_qft(self, n_qubits): def circuit(basis_state): qml.BasisState(basis_state, wires=range(n_qubits)) qml.QFT(wires=range(n_qubits)) - return qml.state() + return ( + qml.state() if device_name != "lightning.tensor" else qml.expval(qml.PauliZ(0)) + ) # lightning.tensor does not support qml.state() basis_state = [0] * n_qubits basis_state[0] = 1 @@ -755,7 +797,9 @@ def test_aqft(self, n_qubits): def circuit(basis_state): qml.BasisState(basis_state, wires=range(n_qubits)) qml.AQFT(order=1, wires=range(n_qubits)) - return qml.state() + return ( + qml.state() if device_name != "lightning.tensor" else qml.expval(qml.PauliZ(0)) + ) # lightning.tensor does not support qml.state() basis_state = [0] * n_qubits basis_state[0] = 1 diff --git a/tests/test_var.py b/tests/test_var.py index 01500da73e..560151ecad 100644 --- a/tests/test_var.py +++ b/tests/test_var.py @@ -19,10 +19,14 @@ import pytest from conftest import PHI, THETA, VARPHI from conftest import LightningDevice as ld +from conftest import device_name if not ld._CPP_BINARY_AVAILABLE: pytest.skip("No binary module found. Skipping.", allow_module_level=True) +if device_name == "lightning.tensor": + pytest.skip("lightning.tensor doesn't support var.", allow_module_level=True) + np.random.seed(42) diff --git a/tests/test_vjp.py b/tests/test_vjp.py index 489134dc97..63ec516645 100644 --- a/tests/test_vjp.py +++ b/tests/test_vjp.py @@ -25,6 +25,9 @@ if not ld._CPP_BINARY_AVAILABLE: pytest.skip("No binary module found. Skipping.", allow_module_level=True) +if device_name == "lightning.tensor": + pytest.skip("lightning.tensor doesn't support vjp.", allow_module_level=True) + def get_vjp(device, tapes, dy): """Helper to get VJP for a tape or batch of tapes"""