You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
With the refactoring of pyqtorch we are for now prioritizing HamEvo based on torch.matrix_exp since it has proven to be the best compromise between efficiency and support with torch autograd.
Below is the previous code computing HamEvo based on matrix diagonalization. It could prove useful in the future, as diagonalization is the best method to evaluate the same hamiltonian for various time values.
@lru_cache(maxsize=256)
def diagonalize(H: torch.Tensor) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
"""
Diagonalizes an Hermitian Hamiltonian, returning eigenvalues and eigenvectors.
First checks if it's already diagonal, and second checks if H is real.
"""
if is_diag(H):
# Skips diagonalization
eig_values = torch.diagonal(H)
eig_vectors = None
else:
if is_real(H):
eig_values, eig_vectors = torch.linalg.eigh(H.real)
eig_values = eig_values.to(torch.cdouble)
eig_vectors = eig_vectors.to(torch.cdouble)
else:
eig_values, eig_vectors = torch.linalg.eigh(H)
return eig_values, eig_vectors
class HamEvoEig(HamEvo):
"""
Class for Hamiltonian evolution operation using Eigenvalue Decomposition method.
Args:
H (tensor): Hamiltonian tensor
t (tensor): Time tensor
qubits (Any): Qubits for operation
n_qubits (int): Number of qubits
n_steps (int): Number of steps to be performed, defaults to 100
"""
def __init__(
self, H: torch.Tensor, t: torch.Tensor, qubits: list[int], n_qubits: int, n_steps: int = 100
):
super().__init__(H, t, qubits, n_qubits, n_steps)
if len(self.H.size()) < 3:
self.H = self.H.unsqueeze(2)
batch_size_h = self.H.size()[BATCH_DIM]
batch_size_t = self.t.size(0)
self._eigs = []
if batch_size_h == batch_size_t or batch_size_t == 1:
for i in range(batch_size_h):
eig_values, eig_vectors = diagonalize(self.H[..., i])
self._eigs.append((eig_values, eig_vectors))
elif batch_size_h == 1:
eig_values, eig_vectors = diagonalize(self.H[..., 0])
for i in range(batch_size_t):
self._eigs.append((eig_values, eig_vectors))
else:
msg = "H and t batchsizes either have to match or (one of them has to) be equal to one."
raise ValueError(msg)
self.batch_size = max(batch_size_h, batch_size_t)
def apply(self, state: torch.Tensor) -> torch.Tensor:
"""
Applies the Hamiltonian evolution operation on the given state
using Eigenvalue Decomposition method.
Args:
state (tensor): Input quantum state.
Returns:
tensor: Output state after Hamiltonian evolution.
"""
(x, y, _) = self.H.size()
evol_operator = torch.zeros(x, y, self.batch_size).to(torch.cdouble)
t_evo = self.t.repeat(self.batch_size) if self.t.size(0) == 1 else self.t
for i, (eig_values, eig_vectors) in enumerate(self._eigs):
if eig_vectors is None:
# Compute e^(-i H t)
evol_operator[..., i] = torch.diag(torch.exp(-1j * eig_values * t_evo[i]))
else:
# Compute e^(-i D t)
eig_exp = torch.diag(torch.exp(-1j * eig_values * t_evo[i]))
# e^(-i H t) = V.e^(-i D t).V^\dagger
evol_operator[..., i] = torch.matmul(
torch.matmul(eig_vectors, eig_exp),
torch.conj(eig_vectors.transpose(0, 1)),
)
return _apply_einsum(
state, evol_operator, self.qubit_support, self.n_qubits, self.batch_size
)
The text was updated successfully, but these errors were encountered:
With the refactoring of
pyqtorch
we are for now prioritizingHamEvo
based ontorch.matrix_exp
since it has proven to be the best compromise between efficiency and support with torch autograd.Below is the previous code computing
HamEvo
based on matrix diagonalization. It could prove useful in the future, as diagonalization is the best method to evaluate the same hamiltonian for various time values.The text was updated successfully, but these errors were encountered: