Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

CP^N-1 model #133

Open
wants to merge 33 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
33 commits
Select commit Hold shift + click to select a range
a77dc0f
sparse domain improved memory footprint
mbruno46 Jul 28, 2022
6c6b798
Merge branch 'master' of https://github.com/lehner/gpt into features-…
mbruno46 Jul 28, 2022
9a3ea84
extended functionality of divisible_by in sparse domain
mbruno46 Aug 23, 2022
425bfdb
Merge branch 'master' of https://github.com/lehner/gpt into features-…
mbruno46 Jan 15, 2023
de4c2bf
Merge branch 'features-mbruno' of github.com:mbruno46/gpt into featur…
mbruno46 Aug 29, 2023
7138082
fixing merge with lehner master
mbruno46 Aug 29, 2023
b481e17
added support for CP^N-1 model
mbruno46 Sep 1, 2023
eacc2ff
introduced step_size verbosity for symplectic integrators
mbruno46 Sep 1, 2023
ea2e239
towards inner product iVSinglet4-10-30 [missing expr template]
mbruno46 Sep 3, 2023
bbfd183
style
mbruno46 Sep 3, 2023
c947436
added support site inner product vcomplex
mbruno46 Oct 20, 2023
67b6ed1
timings
lehner May 31, 2023
6920f16
new parallel_transport_matrix
lehner May 31, 2023
5839742
added support for CP^N-1 model
mbruno46 Sep 1, 2023
fd3aed5
introduced step_size verbosity for symplectic integrators
mbruno46 Sep 1, 2023
eac6d45
towards inner product iVSinglet4-10-30 [missing expr template]
mbruno46 Sep 3, 2023
e000a8a
style
mbruno46 Sep 3, 2023
936490d
added support site inner product vcomplex
mbruno46 Oct 20, 2023
504ce22
timings
lehner May 31, 2023
a9b3248
new parallel_transport_matrix
lehner May 31, 2023
8ac4693
Merge branch 'features-mbruno' of github.com:mbruno46/gpt into featur…
mbruno46 Dec 5, 2023
7169764
added support for CP^N-1 model
mbruno46 Sep 1, 2023
deee67d
towards inner product iVSinglet4-10-30 [missing expr template]
mbruno46 Sep 3, 2023
9fececb
added support site inner product vcomplex
mbruno46 Oct 20, 2023
8fe9b1d
timings
lehner May 31, 2023
cdbad82
new parallel_transport_matrix
lehner May 31, 2023
4c92490
introduced step_size verbosity for symplectic integrators
mbruno46 Sep 1, 2023
9c2664b
style
mbruno46 Sep 3, 2023
4e4ef05
timings
lehner May 31, 2023
aef135e
new parallel_transport_matrix
lehner May 31, 2023
f2a3249
Merge branch 'features-mbruno' of github.com:mbruno46/gpt into featur…
mbruno46 Mar 31, 2024
fc724a3
merging upstream/master
mbruno46 May 15, 2024
1bbe4d7
Merge branch 'lehner-master' into features-mbruno
mbruno46 May 15, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
125 changes: 125 additions & 0 deletions applications/hmc/cpn.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,125 @@
#!/usr/bin/env python3
#
# Authors: Christoph Lehner, Mattia Bruno, Gabriele Morandi 2023
#
# HMC for 2D CP^N-1 theory
#
import gpt as g
import sys, os
import numpy

g.default.set_verbose("step_size", False)
grid = g.grid([42, 42], g.double)
rng = g.random("hmc-cpn-model")

# action for conj. momenta:
g.message(f"Lattice = {grid.fdimensions}")
g.message("Actions:")
a0 = g.qcd.scalar.action.mass_term()
g.message(f" - {a0.__name__}")

# CP^N-1 action
beta = 0.70
N = 10
a1 = g.qcd.scalar.action.cpn(N, beta)
g.message(f" - {a1.__name__}")

# fields
z = g.vcomplex(grid, N)
a1.draw(z, rng)
l = [g.u1(grid) for _ in range(grid.nd)]
rng.element(l)
fields = [z] + l

# conjugate momenta
mom_z = g.group.cartesian(z)
mom_l = g.group.cartesian(l)
moms = [mom_z] + mom_l


def hamiltonian():
return a0(mom_z) + a0(mom_l) + a1(fields)


# molecular dynamics
sympl = g.algorithms.integrator.symplectic

ip_z = sympl.update_p(mom_z, lambda: a1.gradient(fields, z))
ip_l = sympl.update_p(mom_l, lambda: a1.gradient(fields, l))


class constrained_iq(sympl.symplectic_base):
def __init__(self, fields, moms):
z = fields[0]
l = fields[1:]
mom_z = moms[0]
mom_l = moms[1:]

iq_l = sympl.update_q(l, lambda: a0.gradient(mom_l, mom_l))

def inner(eps):
a1.constrained_leap_frog(eps, z, mom_z)
iq_l(eps)

super().__init__(1, [], [inner], None, "constrained iq")


iq = constrained_iq(fields, moms)

# integrator
mdint = sympl.leap_frog(50, [ip_z, ip_l], iq)
g.message(f"Integration scheme:\n{mdint}")

# metropolis
metro = g.algorithms.markov.metropolis(rng)

# MD units
tau = 1.0
g.message(f"tau = {tau} MD units")


def hmc(tau, moms):
mom_z = moms[0]
mom_l = moms[1:]

rng.normal_element(mom_l)
a1.draw(mom_z, rng, z)

accrej = metro(fields)
h0 = hamiltonian()
mdint(tau)
h1 = hamiltonian()
return [accrej(h1, h0), h1 - h0]


# thermalization
for i in range(1, 21):
h = []
for _ in range(10):
h += [hmc(tau, moms)]
h = numpy.array(h)
g.message(f"{i*5} % of thermalization completed")
g.message(
f"Action = {a1(fields)}, Acceptance = {numpy.mean(h[:,0]):.2f}, |dH| = {numpy.mean(numpy.abs(h[:,1])):.4e}"
)

# measure action
def measure():
return [a1(fields) / (N * beta * grid.fsites * grid.nd)]


# production
history = []
data = []
for i in range(100):
for k in range(10):
history += [hmc(tau, moms)]
data += [measure()]
g.message(f"Trajectory {i}")

history = numpy.array(history)
g.message(f"Acceptance rate = {numpy.mean(history[:,0]):.2f}")
g.message(f"<|dH|> = {numpy.mean(numpy.abs(history[:,1])):.4e}")

data = numpy.array(data)
g.message(f"Energy density = {numpy.mean(data[:,0])}")
7 changes: 3 additions & 4 deletions lib/cgpt/lib/eval/mul_vlat_vlat.h
Original file line number Diff line number Diff line change
Expand Up @@ -59,9 +59,9 @@ void eval_mul_vlat_vlat(std::vector<cgpt_Lattice_base*> & dst_vl,

// VV -> S/M
if (lhs_singlet_rank == 1 && rhs_singlet_rank == 1) {
ASSERT(lhs_singlet_dim == rhs_singlet_dim);
if (lhs_unary == 0 && rhs_unary == (BIT_TRANS|BIT_CONJ)) {
// outer product -> M
ASSERT(lhs_singlet_dim == rhs_singlet_dim);
dst_vl.resize(lhs_singlet_dim*rhs_singlet_dim, 0);
for (int i=0;i<lhs_singlet_dim;i++) {
for (int j=0;j<rhs_singlet_dim;j++) {
Expand All @@ -71,9 +71,8 @@ void eval_mul_vlat_vlat(std::vector<cgpt_Lattice_base*> & dst_vl,
}
return;
} else if (lhs_unary == (BIT_TRANS|BIT_CONJ) && rhs_unary == 0) {
ERR("Not implemented");
//ERR("Not implemented");
// inner product -> S
/*ASSERT(lhs_singlet_dim == rhs_singlet_dim);
dst_vl.resize(1, 0);
bool _ac = ac;
for (int i=0;i<lhs_singlet_dim;i++) {
Expand All @@ -82,7 +81,7 @@ void eval_mul_vlat_vlat(std::vector<cgpt_Lattice_base*> & dst_vl,
_ac = true;
}
}
return;*/
return;
} else {
ERR("Invalid combination of two vectors");
}
Expand Down
2 changes: 1 addition & 1 deletion lib/cgpt/lib/expression/unary.h
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ cgpt_Lattice_base* lattice_lat(cgpt_Lattice_base* dst, bool ac, const A& lat, Co
template<typename A>
cgpt_Lattice_base* lattice_unary_lat(cgpt_Lattice_base* dst, bool ac, const A& la,int unary_expr,ComplexD coef) {
if (unary_expr == 0) {
return lattice_lat(dst, ac, la, coef);
return lattice_lat(dst, ac, closure(ToSinglet(la)), coef);
} else if (unary_expr == (BIT_SPINTRACE|BIT_COLORTRACE)) {
return lattice_expr(dst, ac, coef*ToSinglet(trace(la)));
} else if (unary_expr == BIT_SPINTRACE) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,6 @@ cgpt_Lattice_base* cgpt_lattice_mul(cgpt_Lattice_base* dst, bool ac, int unary_a
typedef vComplexD vtype;
_COMPATIBLE_MSR_(iSinglet);
_OUTER_PRODUCT_(iVSinglet10);
// _INNER_PRODUCT_(iVSinglet10);
_INNER_PRODUCT_(iVSinglet10);
ERR("Not implemented");
}
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,6 @@ cgpt_Lattice_base* cgpt_lattice_mul(cgpt_Lattice_base* dst, bool ac, int unary_a
typedef vComplexD vtype;
_COMPATIBLE_MSR_(iSinglet);
_OUTER_PRODUCT_(iVSinglet30);
// _INNER_PRODUCT_(iVSinglet30);
_INNER_PRODUCT_(iVSinglet30);
ERR("Not implemented");
}
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,6 @@ cgpt_Lattice_base* cgpt_lattice_mul(cgpt_Lattice_base* dst, bool ac, int unary_a
typedef vComplexD vtype;
_COMPATIBLE_MSR_(iSinglet);
_OUTER_PRODUCT_(iVSinglet4);
// _INNER_PRODUCT_(iVSinglet4);
_INNER_PRODUCT_(iVSinglet4);
ERR("Not implemented");
}
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,6 @@ cgpt_Lattice_base* cgpt_lattice_mul(cgpt_Lattice_base* dst, bool ac, int unary_a
typedef vComplexF vtype;
_COMPATIBLE_MSR_(iSinglet);
_OUTER_PRODUCT_(iVSinglet10);
// _INNER_PRODUCT_(iVSinglet10);
_INNER_PRODUCT_(iVSinglet10);
ERR("Not implemented");
}
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,6 @@ cgpt_Lattice_base* cgpt_lattice_mul(cgpt_Lattice_base* dst, bool ac, int unary_a
typedef vComplexF vtype;
_COMPATIBLE_MSR_(iSinglet);
_OUTER_PRODUCT_(iVSinglet30);
// _INNER_PRODUCT_(iVSinglet30);
_INNER_PRODUCT_(iVSinglet30);
ERR("Not implemented");
}
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,6 @@ cgpt_Lattice_base* cgpt_lattice_mul(cgpt_Lattice_base* dst, bool ac, int unary_a
typedef vComplexF vtype;
_COMPATIBLE_MSR_(iSinglet);
_OUTER_PRODUCT_(iVSinglet4);
// _INNER_PRODUCT_(iVSinglet4);
_INNER_PRODUCT_(iVSinglet4);
ERR("Not implemented");
}
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,6 @@ cgpt_Lattice_base* cgpt_lattice_mul(cgpt_Lattice_base* dst, bool ac, int unary_a
typedef {precision_vector} vtype;
_COMPATIBLE_MSR_(iSinglet);
_OUTER_PRODUCT_(iVSinglet{tensor_arg_1});
// _INNER_PRODUCT_(iVSinglet{tensor_arg_1});
_INNER_PRODUCT_(iVSinglet{tensor_arg_1});
ERR("Not implemented");
}
5 changes: 4 additions & 1 deletion lib/gpt/algorithms/integrator/symplectic.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,8 +81,11 @@ def __mul__(self, f):
return step(self.funcs, [c * f for c in self.c], self.n)

def __call__(self, eps):
verbose = gpt.default.is_verbose("step_size")

for i in range(self.nf):
# gpt.message(f"call eps = {eps}, {i} / {self.nf}")
if verbose:
gpt.message(f"call eps = {eps}, {i} / {self.nf}")
self.funcs[i](self.c[i] * eps**self.n)


Expand Down
2 changes: 1 addition & 1 deletion lib/gpt/default.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ def get_ivec(tag, default, ndim):
"io,bicgstab,cg,defect_correcting,cagcr,fgcr,fgmres,mr,irl,repository,arnoldi,power_iteration,"
+ "checkpointer,modes,random,split,coarse_grid,gradient_descent,adam,non_linear_cg,"
+ "coarsen,qis_map,metropolis,su2_heat_bath,u1_heat_bath,fom,chronological,minimal_residual_extrapolation,"
+ "subspace_minimal_residual"
+ "subspace_minimal_residual,step_size"
)
verbose_additional = "eval,merge,orthogonalize,copy_plan"
verbose = set()
Expand Down
1 change: 1 addition & 0 deletions lib/gpt/qcd/scalar/action/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,4 +18,5 @@
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
from gpt.qcd.scalar.action.phi4 import phi4
from gpt.qcd.scalar.action.cpn import cpn
from gpt.qcd.scalar.action.mass_term import mass_term, fourier_mass_term
122 changes: 122 additions & 0 deletions lib/gpt/qcd/scalar/action/cpn.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,122 @@
#
# GPT - Grid Python Toolkit
# Copyright (C) 2023 Christoph Lehner ([email protected], https://github.com/lehner/gpt)
# 2023 Mattia Bruno, Gabriele Morandi
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
import gpt as g
from gpt.core.group import differentiable_functional
import numpy

# CP^{N-1} model

# S[z,l] = -N * beta * sum_n,mu (z_{n+mu}^dag * z_n * l_{n,mu} + z_n^dag * z_{n+mu} * l_{n,mu}^dag)
# = -2 * N * beta * sum_n,mu [Re(z_{n+mu}^dag * z_n * l_{n,mu}) - 1]
class cpn(differentiable_functional):
def __init__(self, N, b):
self.N = N
self.beta = b
self.__name__ = f"cpn(N = {self.N}, beta = {self.beta})"

# z = fields[0], l = fields[1:]
def split(self, fields):
return fields[0], fields[1:]

def __call__(self, fields):
z, l = self.split(fields)

J = g.lattice(z)
J[:] = 0.0
for mu in range(z.grid.nd):
J += g.cshift(z, mu, +1) * g.adj(l[mu])

action = -2 * self.N * self.beta * (g.inner_product(J, z).real - z.grid.fsites * z.grid.nd)
return action

@differentiable_functional.multi_field_gradient
def gradient(self, fields, dfields):
def gradient_l(z, l, mu):
frc = g.lattice(l[0])
frc @= (
2
* self.beta
* self.N
* g.component.imag(g.trace(z * g.adj(g.cshift(z, mu, +1))) * l[mu])
)
frc.otype = l[0].otype.cartesian()
return frc

def gradient_z(z, l):
J = g.lattice(z)
J[:] = 0.0
for mu in range(z.grid.nd):
J += g.cshift(z, mu, +1) * g.adj(l[mu])
J += g.cshift(z * l[mu], mu, -1)

frc = g.lattice(z)
frc @= -2 * self.beta * self.N * J

frc -= g.trace(frc * g.adj(z)) * z
return frc

z, l = self.split(fields)
frc = []
for df in g.core.util.to_list(dfields):
k = fields.index(df)
if k == 0:
frc.append(gradient_z(z, l))
else:
frc.append(gradient_l(z, l, k - 1))
return frc

# https://arxiv.org/abs/1102.1852
def constrained_leap_frog(self, eps, z, mom_z):
# TO DO: replace with g.adj(v1) * v2
def dot(v1, v2):
return g.trace(v2 * g.adj(v1))

n = g.real(z.grid)
n @= g.component.sqrt(g.component.real(dot(mom_z, mom_z)))

# z' = cos(alpha) z + (1/|pi|) sin(alpha) mom_z
# mom_z' = -|pi| sin(alpha) z + cos(alpha) mom_z
# alpha = eps |pi|
_z = g.lattice(z)
_z @= z

cos = g.real(z.grid)
cos @= g.component.cos(eps * n)

sin = g.real(z.grid)
sin @= g.component.sin(eps * n)

z @= cos * _z + g(g.component.inv(n) * sin) * mom_z
mom_z @= -g(n * sin) * _z + cos * mom_z
del _z, cos, sin, n

# https://arxiv.org/abs/1102.1852
def draw(self, field, rng, constraint=None):
if constraint is None:
z = field
rng.element(z)
n = g.component.real(g.trace(z * g.adj(z)))
z @= z * g.component.inv(g.component.sqrt(n))
else:
mom_z = field
z = constraint
rng.normal_element(mom_z)
# TO DO change to z * g(g.adj(z) * mom_z)
mom_z @= mom_z - g(z * g.adj(z)) * mom_z
21 changes: 21 additions & 0 deletions tests/core/vcomplex.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
#!/usr/bin/env python3
#
# Authors: Christoph Lehner 2020, Mattia Bruno 2023
#
# Desc.: Illustrate core concepts and features
#
import gpt as g

L = [8, 4, 4, 4]
grid = g.grid(L, g.double)
rng = g.random("vcomplex")

def test_local_inner_product(v):
i0 = g(g.adj(v) * v)
i1 = g(g.trace(v * g.adj(v)))
assert g.norm2(i0 - i1) < 1e-15

for N in [4, 10, 30]:
v1 = g.vcomplex(grid, N)
test_local_inner_product(v1)
del v1
Loading