From f6047700fb5dcf30fc5bff4d405b184c1e7b3308 Mon Sep 17 00:00:00 2001 From: "David J. Gardner" Date: Sat, 29 Jun 2024 21:35:38 -0700 Subject: [PATCH] apply formatting --- .../scripts/compare_error.py | 83 +- .../scripts/compute_error.py | 77 +- .../scripts/make_plots.py | 305 +-- .../scripts/pickle_solution_output.py | 64 +- .../plot_nvector_performance_results.py | 345 ++-- .../plot_nvector_performance_speedup.py | 222 ++- .../arkode/CXX_parallel/plot_brusselator1D.py | 63 +- examples/arkode/CXX_parallel/plot_heat2D_p.py | 85 +- examples/arkode/CXX_parhyp/plot_heat2D_p.py | 85 +- examples/arkode/CXX_serial/plot_heat2D.py | 53 +- examples/arkode/CXX_serial/plot_sol.py | 26 +- examples/arkode/CXX_xbraid/plot_heat2D.py | 51 +- .../arkode/C_manyvector/plot_brusselator1D.py | 40 +- .../arkode/C_openmp/plot_brusselator1D.py | 40 +- .../arkode/C_parallel/plot_brusselator1D.py | 63 +- examples/arkode/C_serial/ark_kepler_plot.py | 74 +- .../arkode/C_serial/plot_brusselator1D.py | 40 +- .../arkode/C_serial/plot_brusselator1D_FEM.py | 48 +- examples/arkode/C_serial/plot_heat1D.py | 20 +- examples/arkode/C_serial/plot_heat1D_adapt.py | 32 +- examples/arkode/C_serial/plot_sol.py | 26 +- examples/arkode/C_serial/plot_sol_log.py | 26 +- examples/cvode/CXX_parallel/plot_heat2D_p.py | 81 +- examples/cvode/CXX_parhyp/plot_heat2D_p.py | 81 +- examples/cvode/CXX_serial/plot_heat2D.py | 49 +- examples/cvode/serial/plot_cvParticle.py | 82 +- examples/cvode/serial/plot_cvPendulum.py | 63 +- examples/cvodes/serial/plot_cvsParticle.py | 82 +- examples/cvodes/serial/plot_cvsPendulum.py | 63 +- examples/utilities/plot_data_2d.py | 677 ++++--- examples/utilities/plot_data_time_series.py | 97 +- test/compare_benchmarks.py | 97 +- test/compare_examples.py | 86 +- test/config_cmake.py | 1732 ++++++++++++----- test/notify.py | 53 +- test/test_install.py | 76 +- tools/log_example.py | 168 +- tools/log_example_mri.py | 70 +- tools/suntools/__init__.py | 1 - tools/suntools/csv.py | 5 +- 40 files changed, 3345 insertions(+), 2086 deletions(-) diff --git a/benchmarks/advection_reaction_3D/scripts/compare_error.py b/benchmarks/advection_reaction_3D/scripts/compare_error.py index 2dc66d23fa..4dd1ff7ee1 100755 --- a/benchmarks/advection_reaction_3D/scripts/compare_error.py +++ b/benchmarks/advection_reaction_3D/scripts/compare_error.py @@ -15,7 +15,8 @@ import glob import sys import matplotlib -matplotlib.use('Agg') + +matplotlib.use("Agg") from mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot as plt import pandas as pd @@ -25,57 +26,57 @@ # load pickled data def load_data(file): data = np.load(file) - m = data['mesh'] - t = data['t'] - u = data['u'] - v = data['v'] - w = data['w'] + m = data["mesh"] + t = data["t"] + u = data["u"] + v = data["v"] + w = data["w"] - hx = m[0,1] - m[0,0] - hy = m[1,1] - m[1,0] - hz = m[2,1] - m[2,0] + hx = m[0, 1] - m[0, 0] + hy = m[1, 1] - m[1, 0] + hz = m[2, 1] - m[2, 0] - return { 'm': m, 'h': (hx,hy,hz), 't': t, 'u': u, 'v': v, 'w': w } + return {"m": m, "h": (hx, hy, hz), "t": t, "u": u, "v": v, "w": w} # grid function norm def norm_3Dgrid(h, x, q=1): - hx,hy,hz = h + hx, hy, hz = h s = np.shape(x) - return (hx*hy*hz*np.sum(np.abs(x)**q, axis=(1,2,3)))**(1./q) + return (hx * hy * hz * np.sum(np.abs(x) ** q, axis=(1, 2, 3))) ** (1.0 / q) # load data files -np111 = load_data('np-111/output-with-h-8.33e-02.npz') -np211 = load_data('np-211/output-with-h-8.33e-02.npz') -np311 = load_data('np-311/output-with-h-8.33e-02.npz') -np131 = load_data('np-131/output-with-h-8.33e-02.npz') -np113 = load_data('np-113/output-with-h-8.33e-02.npz') -np911 = load_data('np-911/output-with-h-8.33e-02.npz') +np111 = load_data("np-111/output-with-h-8.33e-02.npz") +np211 = load_data("np-211/output-with-h-8.33e-02.npz") +np311 = load_data("np-311/output-with-h-8.33e-02.npz") +np131 = load_data("np-131/output-with-h-8.33e-02.npz") +np113 = load_data("np-113/output-with-h-8.33e-02.npz") +np911 = load_data("np-911/output-with-h-8.33e-02.npz") # np133 = load_data('np-133/output-with-h-8.33e-02.npz') -np313 = load_data('np-313/output-with-h-8.33e-02.npz') -np331 = load_data('np-331/output-with-h-8.33e-02.npz') -np333 = load_data('np-333/output-with-h-8.33e-02.npz') +np313 = load_data("np-313/output-with-h-8.33e-02.npz") +np331 = load_data("np-331/output-with-h-8.33e-02.npz") +np333 = load_data("np-333/output-with-h-8.33e-02.npz") # np666 = load_data('np-666/output-with-h-8.33e-02.npz') -for component in ['u', 'v', 'w']: +for component in ["u", "v", "w"]: # Reference solution ref = np111[component] # Now compute E(h) = ||U(h) - \bar{U}(h)|| using the grid-function norm - E_np211 = norm_3Dgrid(np211['h'], np211[component] - ref) - E_np311 = norm_3Dgrid(np311['h'], np311[component] - ref) - E_np131 = norm_3Dgrid(np131['h'], np131[component] - ref) - E_np113 = norm_3Dgrid(np113['h'], np113[component] - ref) - E_np911 = norm_3Dgrid(np911['h'], np911[component] - ref) + E_np211 = norm_3Dgrid(np211["h"], np211[component] - ref) + E_np311 = norm_3Dgrid(np311["h"], np311[component] - ref) + E_np131 = norm_3Dgrid(np131["h"], np131[component] - ref) + E_np113 = norm_3Dgrid(np113["h"], np113[component] - ref) + E_np911 = norm_3Dgrid(np911["h"], np911[component] - ref) # E_np133 = norm_3Dgrid(np133['h'], np133[component] - ref) - E_np313 = norm_3Dgrid(np313['h'], np313[component] - ref) - E_np331 = norm_3Dgrid(np331['h'], np331[component] - ref) - E_np333 = norm_3Dgrid(np333['h'], np333[component] - ref) + E_np313 = norm_3Dgrid(np313["h"], np313[component] - ref) + E_np331 = norm_3Dgrid(np331["h"], np331[component] - ref) + E_np333 = norm_3Dgrid(np333["h"], np333[component] - ref) # E_np666 = norm_3Dgrid(np666['h'], np666[component] - ref) # Plot error across time - X, Y = np.meshgrid(np111['m'][0,:], np111['t']) + X, Y = np.meshgrid(np111["m"][0, :], np111["t"]) # fig = plt.figure() # ax = plt.subplot(311, projection='3d') # ax.plot_surface(X, Y, np.abs(np911[component][:,:,0,0] - ref[:,:,0,0])) @@ -83,17 +84,17 @@ def norm_3Dgrid(h, x, q=1): # ax.plot_surface(X, Y, np.abs(np911[component][:,0,:,0] - ref[:,0,:,0])) # ax = plt.subplot(313, projection='3d') # ax.plot_surface(X, Y, np.abs(np911[component][:,0,0,:] - ref[:,0,0,:])) - plt.plot(np111['t'], E_np211) - plt.plot(np111['t'], E_np131) - plt.plot(np111['t'], E_np113) - plt.plot(np111['t'], E_np911) + plt.plot(np111["t"], E_np211) + plt.plot(np111["t"], E_np131) + plt.plot(np111["t"], E_np113) + plt.plot(np111["t"], E_np911) # plt.plot(np111['t'], E_np133) - plt.plot(np111['t'], E_np313) - plt.plot(np111['t'], E_np331) - plt.plot(np111['t'], E_np333) + plt.plot(np111["t"], E_np313) + plt.plot(np111["t"], E_np331) + plt.plot(np111["t"], E_np333) # plt.plot(np111['t'], E_np666) # plt.legend(['2 1 1', '3 1 1', '1 3 3', '3 1 3', '3 3 1', '3 3 3', '6 6 6']) # plt.legend(['3 1 1', '1 3 1', '1 1 3', '9 1 1', '1 3 3', '3 1 3', '3 3 1']) - plt.ylabel('||E(hx,hy,hz)||') - plt.xlabel('time') - plt.savefig('compare-error-plot-%s.png' % component) + plt.ylabel("||E(hx,hy,hz)||") + plt.xlabel("time") + plt.savefig("compare-error-plot-%s.png" % component) diff --git a/benchmarks/advection_reaction_3D/scripts/compute_error.py b/benchmarks/advection_reaction_3D/scripts/compute_error.py index 2c01826b29..85f151ed59 100755 --- a/benchmarks/advection_reaction_3D/scripts/compute_error.py +++ b/benchmarks/advection_reaction_3D/scripts/compute_error.py @@ -15,7 +15,8 @@ import glob import sys import matplotlib -matplotlib.use('Agg') + +matplotlib.use("Agg") import matplotlib.pyplot as plt import pandas as pd import numpy as np @@ -24,65 +25,67 @@ # load pickled data def load_data(file): data = np.load(file) - m = data['mesh'] - t = data['t'] - u = data['u'] - v = data['v'] - w = data['w'] + m = data["mesh"] + t = data["t"] + u = data["u"] + v = data["v"] + w = data["w"] - hx = m[0,1] - m[0,0] - hy = m[1,1] - m[1,0] - hz = m[2,1] - m[2,0] + hx = m[0, 1] - m[0, 0] + hy = m[1, 1] - m[1, 0] + hz = m[2, 1] - m[2, 0] - return { 'm': m, 'h': (hx,hy,hz), 't': t, 'u': u, 'v': v, 'w': w } + return {"m": m, "h": (hx, hy, hz), "t": t, "u": u, "v": v, "w": w} # grid function norm def norm_3Dgrid(h, x, q=1): - hx,hy,hz = h - return (hx*hy*hz*np.sum(np.abs(x)**q, axis=(1,2,3)))**(1/q) + hx, hy, hz = h + return (hx * hy * hz * np.sum(np.abs(x) ** q, axis=(1, 2, 3))) ** (1 / q) # computer order of accuracy p def calc_order(h1, Eh1, h2, Eh2): - return np.log( Eh1/Eh2 ) / np.log( np.prod(h1)/np.prod(h2) ) + return np.log(Eh1 / Eh2) / np.log(np.prod(h1) / np.prod(h2)) # load data files -h_over_8 = load_data('middle-h/output-with-h-1.04e-02.npz') -h_over_4 = load_data('large-h/output-with-h-2.08e-02.npz') +h_over_8 = load_data("middle-h/output-with-h-1.04e-02.npz") +h_over_4 = load_data("large-h/output-with-h-2.08e-02.npz") # h_over_2 = load_data('larger-h/output-with-h-4.16e-02.npz') -h_over_1 = load_data('largest-h/output-with-h-8.33e-02.npz') +h_over_1 = load_data("largest-h/output-with-h-8.33e-02.npz") -for component in ['u', 'v', 'w']: +for component in ["u", "v", "w"]: # Restrict reference to the coarsest grid - ref = h_over_8[component][:,::8,::8,::8] + ref = h_over_8[component][:, ::8, ::8, ::8] # Now compute E(h) = ||U(h) - \bar{U}(h)|| using the grid-function norm - Eh_over_4 = norm_3Dgrid(h_over_4['h'], h_over_4[component][:,::4,::4,::4] - ref) - Eh_over_1 = norm_3Dgrid(h_over_1['h'], h_over_1[component][:,:,:,:] - ref) + Eh_over_4 = norm_3Dgrid(h_over_4["h"], h_over_4[component][:, ::4, ::4, ::4] - ref) + Eh_over_1 = norm_3Dgrid(h_over_1["h"], h_over_1[component][:, :, :, :] - ref) # Compute order p as in O(h^p) - p = calc_order(h_over_1['h'], Eh_over_1, h_over_4['h'], Eh_over_4) - print('min p for %s component: %.4f' % (component, np.min(p))) + p = calc_order(h_over_1["h"], Eh_over_1, h_over_4["h"], Eh_over_4) + print("min p for %s component: %.4f" % (component, np.min(p))) # Plot error across time plt.figure() - plt.plot(h_over_8['t'], Eh_over_4, 'r-') - plt.plot(h_over_8['t'], Eh_over_1, 'b-') - plt.ylabel('||E(hx,hy,hz)||') - plt.xlabel('time') - plt.savefig('error-in-time-plot-%s.png' % component) + plt.plot(h_over_8["t"], Eh_over_4, "r-") + plt.plot(h_over_8["t"], Eh_over_1, "b-") + plt.ylabel("||E(hx,hy,hz)||") + plt.xlabel("time") + plt.savefig("error-in-time-plot-%s.png" % component) # Plot error norm with respect to h plt.figure() - x = np.array([np.prod(h_over_4['h']), np.prod(h_over_1['h'])]) - plt.plot(x, x, 'k-') - plt.plot(x, x**2, 'k-') - plt.plot(x, [np.linalg.norm(Eh_over_4, np.Inf), np.linalg.norm(Eh_over_1, np.Inf)], 'r-') - plt.legend(['1st order', '2nd order', 'actual']) - plt.ylabel('|| ||E(hx,hy,hz)|| ||_inf') - plt.xlabel('hx * hy * hz') - plt.yscale('log') - plt.xscale('log') - plt.savefig('error-plot-%s.png' % component) + x = np.array([np.prod(h_over_4["h"]), np.prod(h_over_1["h"])]) + plt.plot(x, x, "k-") + plt.plot(x, x**2, "k-") + plt.plot( + x, [np.linalg.norm(Eh_over_4, np.Inf), np.linalg.norm(Eh_over_1, np.Inf)], "r-" + ) + plt.legend(["1st order", "2nd order", "actual"]) + plt.ylabel("|| ||E(hx,hy,hz)|| ||_inf") + plt.xlabel("hx * hy * hz") + plt.yscale("log") + plt.xscale("log") + plt.savefig("error-plot-%s.png" % component) diff --git a/benchmarks/advection_reaction_3D/scripts/make_plots.py b/benchmarks/advection_reaction_3D/scripts/make_plots.py index 69a0168d79..a4dfa87840 100755 --- a/benchmarks/advection_reaction_3D/scripts/make_plots.py +++ b/benchmarks/advection_reaction_3D/scripts/make_plots.py @@ -22,218 +22,265 @@ # ------------------------------------------------------------------------------ + # utility functions def parallel_coords(rank): - if (rank == 0): + if rank == 0: return [0, 0, 0] - if (rank == 1): + if rank == 1: return [0, 0, 1] - if (rank == 2): + if rank == 2: return [0, 1, 0] - if (rank == 3): + if rank == 3: return [0, 1, 1] - if (rank == 4): + if rank == 4: return [1, 0, 0] - if (rank == 5): + if rank == 5: return [1, 0, 1] - if (rank == 6): + if rank == 6: return [1, 1, 0] - if (rank == 7): + if rank == 7: return [1, 1, 1] -def xslice(u,it,ix): - return u[it,ix,:,:] -def yslice(u,it,iy): - return u[it,:,iy,:] +def xslice(u, it, ix): + return u[it, ix, :, :] + + +def yslice(u, it, iy): + return u[it, :, iy, :] + + +def zslice(u, it, iz): + return u[it, :, :, iz] -def zslice(u,it,iz): - return u[it,:,:,iz] -def xproj(u,it): - return np.average(u[it,:,:,:], axis=0) +def xproj(u, it): + return np.average(u[it, :, :, :], axis=0) -def yproj(u,it): - return np.average(u[it,:,:,:], axis=1) -def zproj(u,it): - return np.average(u[it,:,:,:], axis=2) +def yproj(u, it): + return np.average(u[it, :, :, :], axis=1) -def myplot(axis, X, Y, Z, xlabel='none', ylabel='none'): + +def zproj(u, it): + return np.average(u[it, :, :, :], axis=2) + + +def myplot(axis, X, Y, Z, xlabel="none", ylabel="none"): frame = axis.contourf(X, Y, Z) plt.colorbar(frame, ax=axis) - if (xlabel != 'none'): + if xlabel != "none": axis.set_xlabel(xlabel) - if (ylabel != 'none'): + if ylabel != "none": axis.set_ylabel(ylabel) - # read time mesh times = np.loadtxt("t.000000.txt") nt = times.size # read spatial mesh mesh = np.loadtxt("mesh.txt", dtype=float) -x = mesh[0,:] -y = mesh[1,:] -z = mesh[2,:] +x = mesh[0, :] +y = mesh[1, :] +z = mesh[2, :] nx = x.size ny = y.size nz = z.size # ensure that the run used exactly 1 or 8 MPI ranks for i in range(9): - if (exists("u.00000" + str(i) + ".txt" ) and - not exists("u.00000" + str(i+1) + ".txt" )): - nprocs = i+1 -if ((nprocs != 1) and (nprocs != 8)): + if exists("u.00000" + str(i) + ".txt") and not exists( + "u.00000" + str(i + 1) + ".txt" + ): + nprocs = i + 1 +if (nprocs != 1) and (nprocs != 8): print("make_plots.py error: run must have used either 1 or 8 MPI ranks") exit() # load data for run -if (nprocs == 1): - u = np.zeros((nt,nx,ny,nz), dtype=float) - v = np.zeros((nt,nx,ny,nz), dtype=float) - w = np.zeros((nt,nx,ny,nz), dtype=float) +if nprocs == 1: + u = np.zeros((nt, nx, ny, nz), dtype=float) + v = np.zeros((nt, nx, ny, nz), dtype=float) + w = np.zeros((nt, nx, ny, nz), dtype=float) udata = np.loadtxt("u.000000.txt") vdata = np.loadtxt("v.000000.txt") wdata = np.loadtxt("w.000000.txt") - if (nt != udata.shape[0]): + if nt != udata.shape[0]: print("make_plots.py error: mesh and data have incompatible sizes") exit() - if (nx*ny*nz != udata.shape[1]): + if nx * ny * nz != udata.shape[1]: print("make_plots.py error: mesh and data have incompatible sizes") exit() for it in range(nt): - u[it,:,:,:] = np.reshape(udata[it,:], (nx,ny,nz), order='C') - v[it,:,:,:] = np.reshape(vdata[it,:], (nx,ny,nz), order='C') - w[it,:,:,:] = np.reshape(wdata[it,:], (nx,ny,nz), order='C') + u[it, :, :, :] = np.reshape(udata[it, :], (nx, ny, nz), order="C") + v[it, :, :, :] = np.reshape(vdata[it, :], (nx, ny, nz), order="C") + w[it, :, :, :] = np.reshape(wdata[it, :], (nx, ny, nz), order="C") else: - u = np.zeros((nt,nx,ny,nz), dtype=float) - v = np.zeros((nt,nx,ny,nz), dtype=float) - w = np.zeros((nt,nx,ny,nz), dtype=float) - nxl = nx//2 - nyl = ny//2 - nzl = nz//2 + u = np.zeros((nt, nx, ny, nz), dtype=float) + v = np.zeros((nt, nx, ny, nz), dtype=float) + w = np.zeros((nt, nx, ny, nz), dtype=float) + nxl = nx // 2 + nyl = ny // 2 + nzl = nz // 2 for ip in range(8): udata = np.loadtxt("u.00000" + str(ip) + ".txt") vdata = np.loadtxt("v.00000" + str(ip) + ".txt") wdata = np.loadtxt("w.00000" + str(ip) + ".txt") - if (nt != udata.shape[0]): + if nt != udata.shape[0]: print("make_plots.py error: mesh and data have incompatible sizes") exit() - if (nxl*nyl*nzl != udata.shape[1]): + if nxl * nyl * nzl != udata.shape[1]: print("make_plots.py error: mesh and data have incompatible sizes") exit() coords = parallel_coords(ip) - ilo = coords[0]*nxl - ihi = (coords[0]+1)*nxl - jlo = coords[1]*nyl - jhi = (coords[1]+1)*nyl - klo = coords[2]*nzl - khi = (coords[2]+1)*nzl + ilo = coords[0] * nxl + ihi = (coords[0] + 1) * nxl + jlo = coords[1] * nyl + jhi = (coords[1] + 1) * nyl + klo = coords[2] * nzl + khi = (coords[2] + 1) * nzl for it in range(nt): - u[it,ilo:ihi,jlo:jhi,klo:khi] = np.reshape(udata[it,:], (nxl,nyl,nzl), order='C') - v[it,ilo:ihi,jlo:jhi,klo:khi] = np.reshape(vdata[it,:], (nxl,nyl,nzl), order='C') - w[it,ilo:ihi,jlo:jhi,klo:khi] = np.reshape(wdata[it,:], (nxl,nyl,nzl), order='C') + u[it, ilo:ihi, jlo:jhi, klo:khi] = np.reshape( + udata[it, :], (nxl, nyl, nzl), order="C" + ) + v[it, ilo:ihi, jlo:jhi, klo:khi] = np.reshape( + vdata[it, :], (nxl, nyl, nzl), order="C" + ) + w[it, ilo:ihi, jlo:jhi, klo:khi] = np.reshape( + wdata[it, :], (nxl, nyl, nzl), order="C" + ) # set meshgrid objects -xy0,xy1 = np.meshgrid(x, y) -yz0,yz1 = np.meshgrid(y, z) -xz0,xz1 = np.meshgrid(x, z) +xy0, xy1 = np.meshgrid(x, y) +yz0, yz1 = np.meshgrid(y, z) +xz0, xz1 = np.meshgrid(x, z) # generate plots sliceidx = 25 tslice = [0, 5, 10] -figsize = (9,7) +figsize = (9, 7) # xy slices at various times plt.figure(1) -fig, ((ax1,ax2,ax3), (ax4,ax5,ax6), (ax7,ax8,ax9)) = plt.subplots(3, 3, sharex=True, sharey=True, figsize=figsize) -myplot(ax1, xy0, xy1, zslice(u,tslice[0],sliceidx), ylabel = 'u') -myplot(ax2, xy0, xy1, zslice(u,tslice[1],sliceidx)) -myplot(ax3, xy0, xy1, zslice(u,tslice[2],sliceidx)) -myplot(ax4, xy0, xy1, zslice(v,tslice[0],sliceidx), ylabel = 'v') -myplot(ax5, xy0, xy1, zslice(v,tslice[1],sliceidx)) -myplot(ax6, xy0, xy1, zslice(v,tslice[2],sliceidx)) -myplot(ax7, xy0, xy1, zslice(w,tslice[0],sliceidx), ylabel = 'w', xlabel = 't = ' + str(times[0])) -myplot(ax8, xy0, xy1, zslice(w,tslice[1],sliceidx), xlabel = 't = ' + str(times[1])) -myplot(ax9, xy0, xy1, zslice(w,tslice[2],sliceidx), xlabel = 't = ' + str(times[2])) -plt.savefig('xy-slices.png') +fig, ((ax1, ax2, ax3), (ax4, ax5, ax6), (ax7, ax8, ax9)) = plt.subplots( + 3, 3, sharex=True, sharey=True, figsize=figsize +) +myplot(ax1, xy0, xy1, zslice(u, tslice[0], sliceidx), ylabel="u") +myplot(ax2, xy0, xy1, zslice(u, tslice[1], sliceidx)) +myplot(ax3, xy0, xy1, zslice(u, tslice[2], sliceidx)) +myplot(ax4, xy0, xy1, zslice(v, tslice[0], sliceidx), ylabel="v") +myplot(ax5, xy0, xy1, zslice(v, tslice[1], sliceidx)) +myplot(ax6, xy0, xy1, zslice(v, tslice[2], sliceidx)) +myplot( + ax7, + xy0, + xy1, + zslice(w, tslice[0], sliceidx), + ylabel="w", + xlabel="t = " + str(times[0]), +) +myplot(ax8, xy0, xy1, zslice(w, tslice[1], sliceidx), xlabel="t = " + str(times[1])) +myplot(ax9, xy0, xy1, zslice(w, tslice[2], sliceidx), xlabel="t = " + str(times[2])) +plt.savefig("xy-slices.png") # yz slices at various times plt.figure(2) -fig, ((ax1,ax2,ax3), (ax4,ax5,ax6), (ax7,ax8,ax9)) = plt.subplots(3, 3, sharex=True, sharey=True, figsize=figsize) -myplot(ax1, yz0, yz1, xslice(u,tslice[0],sliceidx), ylabel = 'u') -myplot(ax2, yz0, yz1, xslice(u,tslice[1],sliceidx)) -myplot(ax3, yz0, yz1, xslice(u,tslice[2],sliceidx)) -myplot(ax4, yz0, yz1, xslice(v,tslice[0],sliceidx), ylabel = 'v') -myplot(ax5, yz0, yz1, xslice(v,tslice[1],sliceidx)) -myplot(ax6, yz0, yz1, xslice(v,tslice[2],sliceidx)) -myplot(ax7, yz0, yz1, xslice(w,tslice[0],sliceidx), ylabel = 'w', xlabel = 't = ' + str(times[0])) -myplot(ax8, yz0, yz1, xslice(w,tslice[1],sliceidx), xlabel = 't = ' + str(times[1])) -myplot(ax9, yz0, yz1, xslice(w,tslice[2],sliceidx), xlabel = 't = ' + str(times[2])) -plt.savefig('yz-slices.png') +fig, ((ax1, ax2, ax3), (ax4, ax5, ax6), (ax7, ax8, ax9)) = plt.subplots( + 3, 3, sharex=True, sharey=True, figsize=figsize +) +myplot(ax1, yz0, yz1, xslice(u, tslice[0], sliceidx), ylabel="u") +myplot(ax2, yz0, yz1, xslice(u, tslice[1], sliceidx)) +myplot(ax3, yz0, yz1, xslice(u, tslice[2], sliceidx)) +myplot(ax4, yz0, yz1, xslice(v, tslice[0], sliceidx), ylabel="v") +myplot(ax5, yz0, yz1, xslice(v, tslice[1], sliceidx)) +myplot(ax6, yz0, yz1, xslice(v, tslice[2], sliceidx)) +myplot( + ax7, + yz0, + yz1, + xslice(w, tslice[0], sliceidx), + ylabel="w", + xlabel="t = " + str(times[0]), +) +myplot(ax8, yz0, yz1, xslice(w, tslice[1], sliceidx), xlabel="t = " + str(times[1])) +myplot(ax9, yz0, yz1, xslice(w, tslice[2], sliceidx), xlabel="t = " + str(times[2])) +plt.savefig("yz-slices.png") # xz slices at various times plt.figure(3) -fig, ((ax1,ax2,ax3), (ax4,ax5,ax6), (ax7,ax8,ax9)) = plt.subplots(3, 3, sharex=True, sharey=True, figsize=figsize) -myplot(ax1, xz0, xz1, yslice(u,tslice[0],sliceidx), ylabel ='u') -myplot(ax2, xz0, xz1, yslice(u,tslice[1],sliceidx)) -myplot(ax3, xz0, xz1, yslice(u,tslice[2],sliceidx)) -myplot(ax4, xz0, xz1, yslice(v,tslice[0],sliceidx), ylabel = 'v') -myplot(ax5, xz0, xz1, yslice(v,tslice[1],sliceidx)) -myplot(ax6, xz0, xz1, yslice(v,tslice[2],sliceidx)) -myplot(ax7, xz0, xz1, yslice(w,tslice[0],sliceidx), ylabel= 'w', xlabel = 't = ' + str(times[0])) -myplot(ax8, xz0, xz1, yslice(w,tslice[1],sliceidx), xlabel ='t = ' + str(times[1])) -myplot(ax9, xz0, xz1, yslice(w,tslice[2],sliceidx), xlabel = 't = ' + str(times[2])) -plt.savefig('xz-slices.png') +fig, ((ax1, ax2, ax3), (ax4, ax5, ax6), (ax7, ax8, ax9)) = plt.subplots( + 3, 3, sharex=True, sharey=True, figsize=figsize +) +myplot(ax1, xz0, xz1, yslice(u, tslice[0], sliceidx), ylabel="u") +myplot(ax2, xz0, xz1, yslice(u, tslice[1], sliceidx)) +myplot(ax3, xz0, xz1, yslice(u, tslice[2], sliceidx)) +myplot(ax4, xz0, xz1, yslice(v, tslice[0], sliceidx), ylabel="v") +myplot(ax5, xz0, xz1, yslice(v, tslice[1], sliceidx)) +myplot(ax6, xz0, xz1, yslice(v, tslice[2], sliceidx)) +myplot( + ax7, + xz0, + xz1, + yslice(w, tslice[0], sliceidx), + ylabel="w", + xlabel="t = " + str(times[0]), +) +myplot(ax8, xz0, xz1, yslice(w, tslice[1], sliceidx), xlabel="t = " + str(times[1])) +myplot(ax9, xz0, xz1, yslice(w, tslice[2], sliceidx), xlabel="t = " + str(times[2])) +plt.savefig("xz-slices.png") # xy projection at various times plt.figure(4) -fig, ((ax1,ax2,ax3), (ax4,ax5,ax6), (ax7,ax8,ax9)) = plt.subplots(3, 3, sharex=True, sharey=True, figsize=figsize) -myplot(ax1, xy0, xy1, zproj(u,tslice[0]), ylabel = 'u') -myplot(ax2, xy0, xy1, zproj(u,tslice[1])) -myplot(ax3, xy0, xy1, zproj(u,tslice[2])) -myplot(ax4, xy0, xy1, zproj(v,tslice[0]), ylabel = 'v') -myplot(ax5, xy0, xy1, zproj(v,tslice[1])) -myplot(ax6, xy0, xy1, zproj(v,tslice[2])) -myplot(ax7, xy0, xy1, zproj(w,tslice[0]), ylabel = 'w', xlabel = 't = ' + str(times[0])) -myplot(ax8, xy0, xy1, zproj(w,tslice[1]), xlabel = 't = ' + str(times[1])) -myplot(ax9, xy0, xy1, zproj(w,tslice[2]), xlabel = 't = ' + str(times[2])) -plt.savefig('xy-projections.png') +fig, ((ax1, ax2, ax3), (ax4, ax5, ax6), (ax7, ax8, ax9)) = plt.subplots( + 3, 3, sharex=True, sharey=True, figsize=figsize +) +myplot(ax1, xy0, xy1, zproj(u, tslice[0]), ylabel="u") +myplot(ax2, xy0, xy1, zproj(u, tslice[1])) +myplot(ax3, xy0, xy1, zproj(u, tslice[2])) +myplot(ax4, xy0, xy1, zproj(v, tslice[0]), ylabel="v") +myplot(ax5, xy0, xy1, zproj(v, tslice[1])) +myplot(ax6, xy0, xy1, zproj(v, tslice[2])) +myplot(ax7, xy0, xy1, zproj(w, tslice[0]), ylabel="w", xlabel="t = " + str(times[0])) +myplot(ax8, xy0, xy1, zproj(w, tslice[1]), xlabel="t = " + str(times[1])) +myplot(ax9, xy0, xy1, zproj(w, tslice[2]), xlabel="t = " + str(times[2])) +plt.savefig("xy-projections.png") # yz projection at various times fig = plt.figure(5) -fig, ((ax1,ax2,ax3), (ax4,ax5,ax6), (ax7,ax8,ax9)) = plt.subplots(3, 3, sharex=True, sharey=True, figsize=figsize) -myplot(ax1, yz0, yz1, xproj(u,tslice[0]), ylabel = 'u') -myplot(ax2, yz0, yz1, xproj(u,tslice[1])) -myplot(ax3, yz0, yz1, xproj(u,tslice[2])) -myplot(ax4, yz0, yz1, xproj(v,tslice[0]), ylabel = 'v') -myplot(ax5, yz0, yz1, xproj(v,tslice[1])) -myplot(ax6, yz0, yz1, xproj(v,tslice[2])) -myplot(ax7, yz0, yz1, xproj(w,tslice[0]), ylabel = 'w', xlabel = 't = ' + str(times[0])) -myplot(ax8, yz0, yz1, xproj(w,tslice[1]), xlabel = 't = ' + str(times[1])) -myplot(ax9, yz0, yz1, xproj(w,tslice[2]), xlabel = 't = ' + str(times[2])) -plt.savefig('yz-projections.png') +fig, ((ax1, ax2, ax3), (ax4, ax5, ax6), (ax7, ax8, ax9)) = plt.subplots( + 3, 3, sharex=True, sharey=True, figsize=figsize +) +myplot(ax1, yz0, yz1, xproj(u, tslice[0]), ylabel="u") +myplot(ax2, yz0, yz1, xproj(u, tslice[1])) +myplot(ax3, yz0, yz1, xproj(u, tslice[2])) +myplot(ax4, yz0, yz1, xproj(v, tslice[0]), ylabel="v") +myplot(ax5, yz0, yz1, xproj(v, tslice[1])) +myplot(ax6, yz0, yz1, xproj(v, tslice[2])) +myplot(ax7, yz0, yz1, xproj(w, tslice[0]), ylabel="w", xlabel="t = " + str(times[0])) +myplot(ax8, yz0, yz1, xproj(w, tslice[1]), xlabel="t = " + str(times[1])) +myplot(ax9, yz0, yz1, xproj(w, tslice[2]), xlabel="t = " + str(times[2])) +plt.savefig("yz-projections.png") # xz projection at various times fig = plt.figure(6) -fig, ((ax1,ax2,ax3), (ax4,ax5,ax6), (ax7,ax8,ax9)) = plt.subplots(3, 3, sharex=True, sharey=True, figsize=figsize) -myplot(ax1, xz0, xz1, yproj(u,tslice[0]), ylabel = 'u') -myplot(ax2, xz0, xz1, yproj(u,tslice[1])) -myplot(ax3, xz0, xz1, yproj(u,tslice[2])) -myplot(ax4, xz0, xz1, yproj(v,tslice[0]), ylabel = 'v') -myplot(ax5, xz0, xz1, yproj(v,tslice[1])) -myplot(ax6, xz0, xz1, yproj(v,tslice[2])) -myplot(ax7, xz0, xz1, yproj(w,tslice[0]), ylabel = 'w', xlabel = 't = ' + str(times[0])) -myplot(ax8, xz0, xz1, yproj(w,tslice[1]), xlabel = 't = ' + str(times[1])) -myplot(ax9, xz0, xz1, yproj(w,tslice[2]), xlabel = 't = ' + str(times[2])) -plt.savefig('xz-projections.png') - -#plt.show() +fig, ((ax1, ax2, ax3), (ax4, ax5, ax6), (ax7, ax8, ax9)) = plt.subplots( + 3, 3, sharex=True, sharey=True, figsize=figsize +) +myplot(ax1, xz0, xz1, yproj(u, tslice[0]), ylabel="u") +myplot(ax2, xz0, xz1, yproj(u, tslice[1])) +myplot(ax3, xz0, xz1, yproj(u, tslice[2])) +myplot(ax4, xz0, xz1, yproj(v, tslice[0]), ylabel="v") +myplot(ax5, xz0, xz1, yproj(v, tslice[1])) +myplot(ax6, xz0, xz1, yproj(v, tslice[2])) +myplot(ax7, xz0, xz1, yproj(w, tslice[0]), ylabel="w", xlabel="t = " + str(times[0])) +myplot(ax8, xz0, xz1, yproj(w, tslice[1]), xlabel="t = " + str(times[1])) +myplot(ax9, xz0, xz1, yproj(w, tslice[2]), xlabel="t = " + str(times[2])) +plt.savefig("xz-projections.png") + +# plt.show() plt.close() ##### end of script ##### diff --git a/benchmarks/advection_reaction_3D/scripts/pickle_solution_output.py b/benchmarks/advection_reaction_3D/scripts/pickle_solution_output.py index 407c34921a..a51fade40f 100755 --- a/benchmarks/advection_reaction_3D/scripts/pickle_solution_output.py +++ b/benchmarks/advection_reaction_3D/scripts/pickle_solution_output.py @@ -19,39 +19,66 @@ import numpy as np # load mesh data file -mesh = np.loadtxt('mesh.txt', dtype=np.double) +mesh = np.loadtxt("mesh.txt", dtype=np.double) # X,Y,Z = np.meshgrid(mesh[0,:], mesh[1,:], mesh[2,:]) # calculate h -hx = mesh[0,1] - mesh[0,0] -hy = mesh[1,1] - mesh[1,0] -hz = mesh[2,1] - mesh[2,0] -nx = len(mesh[0,:]) -ny = len(mesh[1,:]) -nz = len(mesh[2,:]) +hx = mesh[0, 1] - mesh[0, 0] +hy = mesh[1, 1] - mesh[1, 0] +hz = mesh[2, 1] - mesh[2, 0] +nx = len(mesh[0, :]) +ny = len(mesh[1, :]) +nz = len(mesh[2, :]) print("nx, ny, nz = %d, %d, %d" % (nx, ny, nz)) print("hx, hy, hz = %g, %g, %g" % (hx, hy, hz)) # load output time file -times = np.loadtxt('t.000000.txt', dtype=np.double) +times = np.loadtxt("t.000000.txt", dtype=np.double) # load solution data files -ufiles = glob.glob('u.' + ('[0-9]'*6) + '.txt'); ufiles.sort() -vfiles = glob.glob('v.' + ('[0-9]'*6) + '.txt'); vfiles.sort() -wfiles = glob.glob('w.' + ('[0-9]'*6) + '.txt'); wfiles.sort() +ufiles = glob.glob("u." + ("[0-9]" * 6) + ".txt") +ufiles.sort() +vfiles = glob.glob("v." + ("[0-9]" * 6) + ".txt") +vfiles.sort() +wfiles = glob.glob("w." + ("[0-9]" * 6) + ".txt") +wfiles.sort() udata = [] vdata = [] wdata = [] sys.stdout.write("reading 1/%d...\r" % len(ufiles)) sys.stdout.flush() -for idx in range(0,len(ufiles)): - sys.stdout.write("reading %d/%d...\r" % (idx+1,len(ufiles))) +for idx in range(0, len(ufiles)): + sys.stdout.write("reading %d/%d...\r" % (idx + 1, len(ufiles))) sys.stdout.flush() - udata.append(pd.read_csv(ufiles[idx], header=None, delimiter=' ', skipinitialspace=True, dtype=np.double)) - vdata.append(pd.read_csv(vfiles[idx], header=None, delimiter=' ', skipinitialspace=True, dtype=np.double)) - wdata.append(pd.read_csv(wfiles[idx], header=None, delimiter=' ', skipinitialspace=True, dtype=np.double)) + udata.append( + pd.read_csv( + ufiles[idx], + header=None, + delimiter=" ", + skipinitialspace=True, + dtype=np.double, + ) + ) + vdata.append( + pd.read_csv( + vfiles[idx], + header=None, + delimiter=" ", + skipinitialspace=True, + dtype=np.double, + ) + ) + wdata.append( + pd.read_csv( + wfiles[idx], + header=None, + delimiter=" ", + skipinitialspace=True, + dtype=np.double, + ) + ) sys.stdout.write("\n") sys.stdout.flush() @@ -69,5 +96,6 @@ # save data to pickle print("saving...") -np.savez_compressed('output-with-h-%.2e.npz' % hx, t=times, u=udata, v=vdata, w=wdata, mesh=mesh) - +np.savez_compressed( + "output-with-h-%.2e.npz" % hx, t=times, u=udata, v=vdata, w=wdata, mesh=mesh +) diff --git a/benchmarks/nvector/plot_nvector_performance_results.py b/benchmarks/nvector/plot_nvector_performance_results.py index 02c45665e6..55bb5ae32e 100755 --- a/benchmarks/nvector/plot_nvector_performance_results.py +++ b/benchmarks/nvector/plot_nvector_performance_results.py @@ -20,6 +20,7 @@ # indicates if timing was enabled. # ----------------------------------------------------------------------------- + def main(): import argparse @@ -35,42 +36,60 @@ def main(): import matplotlib.ticker as mtick parser = argparse.ArgumentParser( - description='Plot data from NVector performance tests') - - parser.add_argument('op', type=str, - help='Which NVector operation to plot') - - parser.add_argument('datadir', type=str, - help='Directory where test output files are located') - - parser.add_argument('--timevelem', dest='timevelem', action='store_true', - help='Turn on plots for time vs number of elements') - - parser.add_argument('--noheatmap', dest='heatmap', action='store_false', - help='Turn off heatmap plots') - - parser.add_argument('--loglog', dest='loglog', action='store_true', - help='Generate loglog plots for time vs number of elements') - - parser.add_argument('--show', dest='show', action='store_true', - help='Display plots rather than saving to file') - - parser.add_argument('--debug', dest='debug', action='store_true', - help='Turn on debugging output') + description="Plot data from NVector performance tests" + ) + + parser.add_argument("op", type=str, help="Which NVector operation to plot") + + parser.add_argument( + "datadir", type=str, help="Directory where test output files are located" + ) + + parser.add_argument( + "--timevelem", + dest="timevelem", + action="store_true", + help="Turn on plots for time vs number of elements", + ) + + parser.add_argument( + "--noheatmap", + dest="heatmap", + action="store_false", + help="Turn off heatmap plots", + ) + + parser.add_argument( + "--loglog", + dest="loglog", + action="store_true", + help="Generate loglog plots for time vs number of elements", + ) + + parser.add_argument( + "--show", + dest="show", + action="store_true", + help="Display plots rather than saving to file", + ) + + parser.add_argument( + "--debug", dest="debug", action="store_true", help="Turn on debugging output" + ) # parse command line args args = parser.parse_args() - if (args.debug): + if args.debug: print(args) # check for test data directory - if (not os.path.isdir(args.datadir)): - print("ERROR:",args.datadir,"does not exist") + if not os.path.isdir(args.datadir): + print("ERROR:", args.datadir, "does not exist") sys.exit() # sort output files - output = sorted(glob.glob(args.datadir+'/output*.txt')) + output = sorted(glob.glob(args.datadir + "/output*.txt")) # if (args.debug): # print("output files") @@ -80,8 +99,8 @@ def main(): # figure out vector sizes, number of vectors, and number of sums nelem = [] - nvec = [] - nsum = [] + nvec = [] + nsum = [] ntest = [] # parse file names to get input parameters @@ -95,32 +114,32 @@ def main(): ns = int(split_fout[3]) nt = int(split_fout[4]) - if (not ne in nelem): + if not ne in nelem: nelem.append(ne) - if (not nv in nvec): + if not nv in nvec: nvec.append(nv) - if (not ns in nsum): + if not ns in nsum: nsum.append(ns) - if (not nt in ntest): + if not nt in ntest: ntest.append(nt) - if (len(ntest) != 1): + if len(ntest) != 1: print("Warning: Unequal numbers of tests") - if (args.debug): - print("nelem:",nelem, len(nelem)) - print("nvec: ",nvec, len(nvec)) - print("nsum: ",nsum, len(nsum)) - print("ntest:",ntest, len(ntest)) + if args.debug: + print("nelem:", nelem, len(nelem)) + print("nvec: ", nvec, len(nvec)) + print("nsum: ", nsum, len(nsum)) + print("ntest:", ntest, len(ntest)) # allocate numpy arrays for timing data - avg_fused = np.zeros([len(nvec), len(nelem)]) + avg_fused = np.zeros([len(nvec), len(nelem)]) sdev_fused = np.zeros([len(nvec), len(nelem)]) - avg_unfused = np.zeros([len(nvec), len(nelem)]) + avg_unfused = np.zeros([len(nvec), len(nelem)]) sdev_unfused = np.zeros([len(nvec), len(nelem)]) avg_ratio = np.zeros([len(nvec), len(nelem)]) @@ -131,8 +150,8 @@ def main(): # read output files for f in output: - if (args.debug): - print("Reading:",f) + if args.debug: + print("Reading:", f) # get test inputs from file name split_fout = f.split("/")[-1] @@ -149,15 +168,15 @@ def main(): split_line = shlex.split(line) # skip blank lines - if (not split_line): + if not split_line: continue # tests finished, stop reading file - if (split_line[0] == "Finished"): + if split_line[0] == "Finished": break # check if the operation is the one we want and get data - if (args.op == split_line[0]): + if args.op == split_line[0]: i = nvec.index(nv) j = nelem.index(ne) @@ -165,15 +184,15 @@ def main(): # NVEC[i][j] = nv # NELM[i][j] = ne - avg_fused[i][j] = float(split_line[1]) + avg_fused[i][j] = float(split_line[1]) sdev_fused[i][j] = float(split_line[2]) - avg_unfused[i][j] = float(split_line[5]) + avg_unfused[i][j] = float(split_line[5]) sdev_unfused[i][j] = float(split_line[6]) avg_ratio[i][j] = avg_fused[i][j] / avg_unfused[i][j] - if (args.debug): + if args.debug: print(avg_fused) print(avg_unfused) print(avg_ratio) @@ -185,35 +204,37 @@ def main(): # -------------------------------------------------------------------------- # allocate arrays for the upper and lower bounds of the confidence interval - lower_fused = np.zeros([len(nvec), len(nelem)]) - upper_fused = np.zeros([len(nvec), len(nelem)]) + lower_fused = np.zeros([len(nvec), len(nelem)]) + upper_fused = np.zeros([len(nvec), len(nelem)]) lower_unfused = np.zeros([len(nvec), len(nelem)]) upper_unfused = np.zeros([len(nvec), len(nelem)]) # critical value for 99% confidence interval - if (ntest[0] < 30): + if ntest[0] < 30: # student's t distribution - cv = st.t.interval(0.99, ntest[0]-1)[1] + cv = st.t.interval(0.99, ntest[0] - 1)[1] else: # normal distribution cv = st.norm.ppf(0.995) # confidence intervals - cdev_fused = cv * sdev_fused / np.sqrt(ntest[0]) + cdev_fused = cv * sdev_fused / np.sqrt(ntest[0]) lower_fused = avg_fused - cdev_fused upper_fused = avg_fused + cdev_fused - cdev_unfused = cv * sdev_unfused / np.sqrt(ntest[0]) + cdev_unfused = cv * sdev_unfused / np.sqrt(ntest[0]) lower_unfused = avg_unfused - cdev_unfused upper_unfused = avg_unfused + cdev_unfused # check if the fused average times are within the unfused confidence interval - fused_in = np.where(np.logical_and(avg_fused < upper_unfused, - avg_fused > lower_unfused)) + fused_in = np.where( + np.logical_and(avg_fused < upper_unfused, avg_fused > lower_unfused) + ) # check if the unfused average times are within the fused confidence interval - unfused_in = np.where(np.logical_and(avg_unfused < upper_fused, - avg_unfused > lower_fused)) + unfused_in = np.where( + np.logical_and(avg_unfused < upper_fused, avg_unfused > lower_fused) + ) # get which numbers of vectors and elements for fused tests are in the # confidence interval of the unfused times @@ -226,7 +247,7 @@ def main(): ef[i] = np.log2(nelem[fused_in[1][i]]) df[i] = 1 - if (args.debug): + if args.debug: print(vf) print(ef) @@ -241,7 +262,7 @@ def main(): eu[i] = np.log2(nelem[unfused_in[1][i]]) du[i] = 1 - if (args.debug): + if args.debug: print(vu) print(eu) @@ -266,20 +287,20 @@ def main(): # print(NELM) # print(avg_ratio) for i in reversed(range(len(nvec))): - print('%2d' % int(i+1), str(avg_ratio[i]).replace('\n', '')) + print("%2d" % int(i + 1), str(avg_ratio[i]).replace("\n", "")) print # -------------------------------------------------------------------------- # Heat Map # -------------------------------------------------------------------------- - if (args.heatmap): + if args.heatmap: - x = np.arange(len(nelem)+1)-0.5 # x = log2(number of elements) = 0,1,2,... - y = np.arange(len(nvec)+1)+1.5 # y = number of vectors = 2,3,4,... + x = np.arange(len(nelem) + 1) - 0.5 # x = log2(number of elements) = 0,1,2,... + y = np.arange(len(nvec) + 1) + 1.5 # y = number of vectors = 2,3,4,... # y = np.arange(len(nvec)+1)+0.5 # y = number of vectors = 1,2,3,... X, Y = np.meshgrid(x, y) - if (args.debug): + if args.debug: print(x) print(y) @@ -287,67 +308,79 @@ def main(): rmax = np.amax(avg_ratio) rmin = np.amin(avg_ratio) - ext = 'neither' - if (rmin > 1): - cmap='Reds' - norm = mpl.colors.Normalize(vmin=rmin, vmax=min(rmax,2)) - v = np.linspace(rmin, min(rmax,2), 10, endpoint=True) - if (rmax > 2): - ext = 'max' + ext = "neither" + if rmin > 1: + cmap = "Reds" + norm = mpl.colors.Normalize(vmin=rmin, vmax=min(rmax, 2)) + v = np.linspace(rmin, min(rmax, 2), 10, endpoint=True) + if rmax > 2: + ext = "max" else: - cmap='seismic' - if (rmax-1 > 1): + cmap = "seismic" + if rmax - 1 > 1: rrange = 1 - ext = 'max' + ext = "max" else: - rrange = max(abs(rmax-1),abs(rmin-1)) + rrange = max(abs(rmax - 1), abs(rmin - 1)) - v1 = np.linspace(1-rrange, 1, 5, endpoint=True) - v2 = np.linspace(1, 1+rrange, 5, endpoint=True) - v = np.append(v1,v2[1:]) - norm = mpl.colors.Normalize(vmin=1-rrange, vmax=1+rrange) + v1 = np.linspace(1 - rrange, 1, 5, endpoint=True) + v2 = np.linspace(1, 1 + rrange, 5, endpoint=True) + v = np.append(v1, v2[1:]) + norm = mpl.colors.Normalize(vmin=1 - rrange, vmax=1 + rrange) # plot heatmap plt.pcolormesh(X, Y, avg_ratio, cmap=cmap, norm=norm) clb = plt.colorbar(ticks=v, extend=ext) - clb.ax.set_title('Max = {0:.2f}\nMin = {1:.2f}'.format(rmax,rmin)) + clb.ax.set_title("Max = {0:.2f}\nMin = {1:.2f}".format(rmax, rmin)) # aff markers to indicate if the average time falls in a confidence interval - plt.scatter(ef,vf,s=40,marker='^',c=df,label='fused') - plt.scatter(eu,vu,s=40,marker='v',c=du,label='unfused') + plt.scatter(ef, vf, s=40, marker="^", c=df, label="fused") + plt.scatter(eu, vu, s=40, marker="v", c=du, label="unfused") plt.legend(loc=9, bbox_to_anchor=(0.5, -0.1), ncol=2) # add legend for scatter plot art = [] - lgd = plt.legend(loc='lower right', bbox_to_anchor=(1.34, -0.17)) + lgd = plt.legend(loc="lower right", bbox_to_anchor=(1.34, -0.17)) art.append(lgd) # add labels and title plt.xticks(np.log2(nelem)) plt.yticks(nvec) - plt.xlabel('log2(num elements)') - plt.ylabel('num vectors') - plt.title('avg fused time / avg unfused time \n'+args.op) + plt.xlabel("log2(num elements)") + plt.ylabel("num vectors") + plt.title("avg fused time / avg unfused time \n" + args.op) # display or save figure - if (args.show): + if args.show: plt.show() else: - plt.savefig(args.op+'-heatmap.pdf', - additional_artists=art, - bbox_inches="tight") + plt.savefig( + args.op + "-heatmap.pdf", additional_artists=art, bbox_inches="tight" + ) plt.close() # -------------------------------------------------------------------------- # Time vs Number of Elements Plots # -------------------------------------------------------------------------- - if (args.timevelem): - - colors = ['#000000','#a6cee3','#1f78b4','#b2df8a','#33a02c', - '#fb9a99','#e31a1c','#fdbf6f','#ff7f00','#cab2d6', - '#6a3d9a','#ffff99','#b15928'] - - hatch = [ '/','\\','-','+','x','o','O','.','*'] + if args.timevelem: + + colors = [ + "#000000", + "#a6cee3", + "#1f78b4", + "#b2df8a", + "#33a02c", + "#fb9a99", + "#e31a1c", + "#fdbf6f", + "#ff7f00", + "#cab2d6", + "#6a3d9a", + "#ffff99", + "#b15928", + ] + + hatch = ["/", "\\", "-", "+", "x", "o", "O", ".", "*"] # -------------------------------------------------------------------------- # Combined Number of Vectors Plots @@ -359,38 +392,45 @@ def main(): i = nvec.index(nv) - if (args.loglog): - ax.loglog(nelem, avg_fused[i], - color=colors[i], linestyle='-', label=nv) - ax.loglog(nelem, avg_unfused[i], - color=colors[i], linestyle='--', label=None) + if args.loglog: + ax.loglog(nelem, avg_fused[i], color=colors[i], linestyle="-", label=nv) + ax.loglog( + nelem, avg_unfused[i], color=colors[i], linestyle="--", label=None + ) else: - ax.plot(nelem, avg_fused[i], - color=colors[i], linestyle='-', label=nv) - ax.plot(nelem, avg_unfused[i], - color=colors[i], linestyle='--', label=None) + ax.plot(nelem, avg_fused[i], color=colors[i], linestyle="-", label=nv) + ax.plot( + nelem, avg_unfused[i], color=colors[i], linestyle="--", label=None + ) # plot confidence interval - ax.fill_between(nelem, lower_fused[i], upper_fused[i], - color=colors[i], alpha=0.3) - ax.fill_between(nelem, lower_unfused[i], upper_unfused[i], - color=colors[i], hatch='.', alpha=0.3) + ax.fill_between( + nelem, lower_fused[i], upper_fused[i], color=colors[i], alpha=0.3 + ) + ax.fill_between( + nelem, + lower_unfused[i], + upper_unfused[i], + color=colors[i], + hatch=".", + alpha=0.3, + ) ax.legend() ax.grid() - plt.title('Average Time Fused vs Unfused \n'+args.op) - plt.xlabel('vector length') - plt.ylabel('time (s)') + plt.title("Average Time Fused vs Unfused \n" + args.op) + plt.xlabel("vector length") + plt.ylabel("time (s)") - if (args.show): + if args.show: plt.show() else: - if (args.loglog): - fname=args.op+'-nvec-all-loglog.pdf' + if args.loglog: + fname = args.op + "-nvec-all-loglog.pdf" else: - fname=args.op+'-nvec-all.pdf' - plt.ticklabel_format(axis='both',style='sci') + fname = args.op + "-nvec-all.pdf" + plt.ticklabel_format(axis="both", style="sci") plt.savefig(fname) plt.close() @@ -400,49 +440,70 @@ def main(): for nv in nvec: fig = plt.figure() - ax = fig.add_subplot(111) + ax = fig.add_subplot(111) idx = nvec.index(nv) # plot run times - if (args.loglog): - ax.loglog(nelem, avg_fused[idx], - color='red', linestyle='-', label='Fused') - ax.loglog(nelem, avg_unfused[idx], - color='blue', linestyle='--', label='Unfused') + if args.loglog: + ax.loglog( + nelem, avg_fused[idx], color="red", linestyle="-", label="Fused" + ) + ax.loglog( + nelem, + avg_unfused[idx], + color="blue", + linestyle="--", + label="Unfused", + ) else: - ax.plot(nelem, avg_fused[idx], - color='red', linestyle='-', label='Fused') - ax.plot(nelem, avg_unfused[idx], - color='blue', linestyle='--', label='Unfused') + ax.plot( + nelem, avg_fused[idx], color="red", linestyle="-", label="Fused" + ) + ax.plot( + nelem, + avg_unfused[idx], + color="blue", + linestyle="--", + label="Unfused", + ) # plot confidence intervals - ax.fill_between(nelem, lower_fused[idx], upper_fused[idx], - color='red', alpha=0.2) - ax.fill_between(nelem, lower_unfused[idx], upper_unfused[idx], - color='blue', hatch='.', alpha=0.2) + ax.fill_between( + nelem, lower_fused[idx], upper_fused[idx], color="red", alpha=0.2 + ) + ax.fill_between( + nelem, + lower_unfused[idx], + upper_unfused[idx], + color="blue", + hatch=".", + alpha=0.2, + ) ax.legend() ax.grid() - plt.title('Average Time Fused vs Unfused with '+str(nv)+' vectors\n'+args.op) - plt.xlabel('vector length') - ax.set_ylabel('time (s)') + plt.title( + "Average Time Fused vs Unfused with " + str(nv) + " vectors\n" + args.op + ) + plt.xlabel("vector length") + ax.set_ylabel("time (s)") - if (args.show): + if args.show: plt.show() else: - if (args.loglog): - fname=args.op+'-nvec-'+str(nv)+'-loglog.pdf' + if args.loglog: + fname = args.op + "-nvec-" + str(nv) + "-loglog.pdf" else: - fname=args.op+'-nvec-'+str(nv)+'.pdf' - plt.ticklabel_format(axis='both',style='sci') + fname = args.op + "-nvec-" + str(nv) + ".pdf" + plt.ticklabel_format(axis="both", style="sci") plt.savefig(fname) plt.close() + # =============================================================================== if __name__ == "__main__": main() # EOF - diff --git a/benchmarks/nvector/plot_nvector_performance_speedup.py b/benchmarks/nvector/plot_nvector_performance_speedup.py index fb421f5573..623d716c01 100755 --- a/benchmarks/nvector/plot_nvector_performance_speedup.py +++ b/benchmarks/nvector/plot_nvector_performance_speedup.py @@ -20,6 +20,7 @@ # indicates if timing was enabled. # ----------------------------------------------------------------------------- + def main(): import argparse @@ -35,46 +36,61 @@ def main(): import matplotlib.ticker as mtick parser = argparse.ArgumentParser( - description='Plot data from NVector performance tests') - - parser.add_argument('op', type=str, - help='Which NVector operation to plot') - - parser.add_argument('datadir', type=str, - help='Directory where test output files are located') - - parser.add_argument('--noplots', dest='noplots', action='store_true', - help='Turn on plots for time vs number of elements') - - parser.add_argument('--logx', dest='logx', action='store_true', - help='Generate plots for speedup with log scale for the x axis (number of elements') - - parser.add_argument('--fused', dest='fused', action='store_true', - help='Operation is a fused op') - - parser.add_argument('--show', dest='show', action='store_true', - help='Display plots rather than saving to file') - - parser.add_argument('--debug', dest='debug', action='store_true', - help='Turn on debugging output') + description="Plot data from NVector performance tests" + ) + + parser.add_argument("op", type=str, help="Which NVector operation to plot") + + parser.add_argument( + "datadir", type=str, help="Directory where test output files are located" + ) + + parser.add_argument( + "--noplots", + dest="noplots", + action="store_true", + help="Turn on plots for time vs number of elements", + ) + + parser.add_argument( + "--logx", + dest="logx", + action="store_true", + help="Generate plots for speedup with log scale for the x axis (number of elements", + ) + + parser.add_argument( + "--fused", dest="fused", action="store_true", help="Operation is a fused op" + ) + + parser.add_argument( + "--show", + dest="show", + action="store_true", + help="Display plots rather than saving to file", + ) + + parser.add_argument( + "--debug", dest="debug", action="store_true", help="Turn on debugging output" + ) # parse command line args args = parser.parse_args() - if (args.debug): + if args.debug: print(args) # check for test data directory - if (not os.path.isdir(args.datadir)): - print("ERROR:",args.datadir,"does not exist") + if not os.path.isdir(args.datadir): + print("ERROR:", args.datadir, "does not exist") sys.exit() # sort output files - output_baseline = sorted(glob.glob(args.datadir+'/output*-old.log')) - output_new = sorted(glob.glob(args.datadir+'/output*-new.log')) + output_baseline = sorted(glob.glob(args.datadir + "/output*-old.log")) + output_new = sorted(glob.glob(args.datadir + "/output*-new.log")) output = output_baseline + output_new - if (args.debug): + if args.debug: print("output files") print(len(output)) for i in range(len(output)): @@ -82,8 +98,8 @@ def main(): # figure out vector sizes, number of vectors, and number of sums nelem = [] - nvec = [] - nsum = [] + nvec = [] + nsum = [] ntest = [] # parse file names to get input parameters @@ -97,40 +113,40 @@ def main(): ns = int(split_fout[3]) nt = int(split_fout[4]) - if (not ne in nelem): + if not ne in nelem: nelem.append(ne) - if (not nv in nvec): + if not nv in nvec: nvec.append(nv) - if (not ns in nsum): + if not ns in nsum: nsum.append(ns) - if (not nt in ntest): + if not nt in ntest: ntest.append(nt) - if (len(ntest) != 1): + if len(ntest) != 1: print("Warning: Unequal numbers of tests") nelem.sort() - if (args.debug): - print("nelem:",nelem, len(nelem)) - print("nvec: ",nvec, len(nvec)) - print("nsum: ",nsum, len(nsum)) - print("ntest:",ntest, len(ntest)) + if args.debug: + print("nelem:", nelem, len(nelem)) + print("nvec: ", nvec, len(nvec)) + print("nsum: ", nsum, len(nsum)) + print("ntest:", ntest, len(ntest)) # allocate numpy arrays for timing data - avg_denom = np.zeros([len(nvec), len(nelem)]) + avg_denom = np.zeros([len(nvec), len(nelem)]) sdev_denom = np.zeros([len(nvec), len(nelem)]) - avg_numer = np.zeros([len(nvec), len(nelem)]) + avg_numer = np.zeros([len(nvec), len(nelem)]) sdev_numer = np.zeros([len(nvec), len(nelem)]) avg_ratio = np.zeros([len(nvec), len(nelem)]) # read 'baseline' files for f in output_baseline: - if (args.debug): - print("Reading:",f) + if args.debug: + print("Reading:", f) # get test inputs from file name split_fout = f.split("/")[-1] split_fout = split_fout.split("_") @@ -142,22 +158,22 @@ def main(): # split line into list split_line = shlex.split(line) # skip blank lines - if (not split_line): + if not split_line: continue # tests finished, stop reading file - if (split_line[0] == "Finished"): + if split_line[0] == "Finished": break # check if the operation is the one we want and get data - if (args.op == split_line[0]): + if args.op == split_line[0]: i = nvec.index(nv) j = nelem.index(ne) - avg_numer[i][j] = float(split_line[1]) + avg_numer[i][j] = float(split_line[1]) sdev_numer[i][j] = float(split_line[2]) # read output files for f in output_new: - if (args.debug): - print("Reading:",f) + if args.debug: + print("Reading:", f) # get test inputs from file name split_fout = f.split("/")[-1] split_fout = split_fout.split("_") @@ -169,16 +185,16 @@ def main(): # split line into list split_line = shlex.split(line) # skip blank lines - if (not split_line): + if not split_line: continue # tests finished, stop reading file - if (split_line[0] == "Finished"): + if split_line[0] == "Finished": break # check if the operation is the one we want and get data - if (args.op == split_line[0]): + if args.op == split_line[0]: i = nvec.index(nv) j = nelem.index(ne) - avg_denom[i][j] = float(split_line[1]) + avg_denom[i][j] = float(split_line[1]) sdev_denom[i][j] = float(split_line[2]) avg_ratio[i][j] = avg_numer[i][j] / avg_denom[i][j] @@ -187,35 +203,37 @@ def main(): # -------------------------------------------------------------------------- # allocate arrays for the upper and lower bounds of the confidence interval - lower_denom = np.zeros([len(nvec), len(nelem)]) - upper_denom = np.zeros([len(nvec), len(nelem)]) + lower_denom = np.zeros([len(nvec), len(nelem)]) + upper_denom = np.zeros([len(nvec), len(nelem)]) lower_numer = np.zeros([len(nvec), len(nelem)]) upper_numer = np.zeros([len(nvec), len(nelem)]) # critical value for 99% confidence interval - if (ntest[0] < 30): + if ntest[0] < 30: # student's t distribution - cv = st.t.interval(0.99, ntest[0]-1)[1] + cv = st.t.interval(0.99, ntest[0] - 1)[1] else: # normal distribution cv = st.norm.ppf(0.995) # confidence intervals - cdev_denom = cv * sdev_denom / np.sqrt(ntest[0]) + cdev_denom = cv * sdev_denom / np.sqrt(ntest[0]) lower_denom = avg_denom - cdev_denom upper_denom = avg_denom + cdev_denom - cdev_numer = cv * sdev_numer / np.sqrt(ntest[0]) + cdev_numer = cv * sdev_numer / np.sqrt(ntest[0]) lower_numer = avg_numer - cdev_numer upper_numer = avg_numer + cdev_numer # check if the new average times are within the baseline confidence interval - denom_in = np.where(np.logical_and(avg_denom < upper_numer, - avg_denom > lower_numer)) + denom_in = np.where( + np.logical_and(avg_denom < upper_numer, avg_denom > lower_numer) + ) # check if the baseline average times are within the new confidence interval - numer_in = np.where(np.logical_and(avg_numer < upper_denom, - avg_numer > lower_denom)) + numer_in = np.where( + np.logical_and(avg_numer < upper_denom, avg_numer > lower_denom) + ) # get which numbers of vectors and elements for new tests are in the # confidence interval of the baseline times @@ -228,9 +246,9 @@ def main(): ef[i] = np.log2(nelem[denom_in[1][i]]) df[i] = 1 - if (args.debug): - print('vf:', vf) - print('ef:', ef) + if args.debug: + print("vf:", vf) + print("ef:", ef) # get which numbers of vectors and elements for baseline tests are in the # confidence interval of the new times @@ -243,9 +261,9 @@ def main(): eu[i] = np.log2(nelem[numer_in[1][i]]) du[i] = 1 - if (args.debug): - print('vu:', vu) - print('eu:', eu) + if args.debug: + print("vu:", vu) + print("eu:", eu) # -------------------------------------------------------------------------- # Output ratios @@ -256,29 +274,41 @@ def main(): print("avg. new") for i in reversed(range(len(nvec))): - print('%2d' % int(i+1), str(avg_denom[i]).replace('\n', '')) + print("%2d" % int(i + 1), str(avg_denom[i]).replace("\n", "")) print() print("avg. baseline") for i in reversed(range(len(nvec))): - print('%2d' % int(i+1), str(avg_numer[i]).replace('\n', '')) + print("%2d" % int(i + 1), str(avg_numer[i]).replace("\n", "")) print() print("avg. ratio (speedup)") for i in reversed(range(len(nvec))): - print('%2d' % int(i+1), str(avg_ratio[i]).replace('\n', '')) + print("%2d" % int(i + 1), str(avg_ratio[i]).replace("\n", "")) print() # -------------------------------------------------------------------------- # Speedup v. Number of Elements Plots # -------------------------------------------------------------------------- - if (not args.noplots): - - colors = ['#000000','#a6cee3','#1f78b4','#b2df8a','#33a02c', - '#fb9a99','#e31a1c','#fdbf6f','#ff7f00','#cab2d6', - '#6a3d9a','#ffff99','#b15928'] - - hatch = [ '/','\\','-','+','x','o','O','.','*'] + if not args.noplots: + + colors = [ + "#000000", + "#a6cee3", + "#1f78b4", + "#b2df8a", + "#33a02c", + "#fb9a99", + "#e31a1c", + "#fdbf6f", + "#ff7f00", + "#cab2d6", + "#6a3d9a", + "#ffff99", + "#b15928", + ] + + hatch = ["/", "\\", "-", "+", "x", "o", "O", ".", "*"] # -------------------------------------------------------------------------- # Combined Number of Vectors Plots @@ -287,19 +317,17 @@ def main(): ax = fig.add_subplot(111) if args.fused: - indices = range(0,len(nvec)) + indices = range(0, len(nvec)) else: - indices = range(len(nvec)-1,len(nvec)) + indices = range(len(nvec) - 1, len(nvec)) for i in indices: - lab = 'num. vecs %d' % nvec[i] - if (args.logx): - ax.plot(nelem, avg_ratio[i], - color=colors[i], linestyle='-', label=lab) - ax.set_xscale('log') + lab = "num. vecs %d" % nvec[i] + if args.logx: + ax.plot(nelem, avg_ratio[i], color=colors[i], linestyle="-", label=lab) + ax.set_xscale("log") else: - ax.plot(nelem, avg_ratio[i], - color=colors[i], linestyle='-', label=lab) + ax.plot(nelem, avg_ratio[i], color=colors[i], linestyle="-", label=lab) # # plot confidence interval # ax.fill_between(nelem, lower_denom[i], upper_denom[i], # color=colors[i], alpha=0.3) @@ -309,18 +337,18 @@ def main(): ax.legend() ax.grid() - plt.title('Average Speedup \n'+args.op) - plt.xlabel('vector length') - plt.ylabel('speedup (baseline/new)') + plt.title("Average Speedup \n" + args.op) + plt.xlabel("vector length") + plt.ylabel("speedup (baseline/new)") - if (args.show): + if args.show: plt.show() else: - if (args.logx): - fname=args.op+'-nvec-all-logx.pdf' + if args.logx: + fname = args.op + "-nvec-all-logx.pdf" else: - fname=args.op+'-nvec-all.pdf' - plt.ticklabel_format(axis='both',style='sci') + fname = args.op + "-nvec-all.pdf" + plt.ticklabel_format(axis="both", style="sci") plt.savefig(fname) plt.close() diff --git a/examples/arkode/CXX_parallel/plot_brusselator1D.py b/examples/arkode/CXX_parallel/plot_brusselator1D.py index 2bcc7d1af7..087577ff0e 100755 --- a/examples/arkode/CXX_parallel/plot_brusselator1D.py +++ b/examples/arkode/CXX_parallel/plot_brusselator1D.py @@ -22,33 +22,36 @@ import numpy as np # load mesh data file -mesh = np.loadtxt('mesh.txt', dtype=np.double) +mesh = np.loadtxt("mesh.txt", dtype=np.double) # load output time file -times = np.loadtxt('t.000000.txt', dtype=np.double) +times = np.loadtxt("t.000000.txt", dtype=np.double) # load solution data files -ufiles = glob.glob('u.' + ('[0-9]'*6) + '.txt'); ufiles.sort() -vfiles = glob.glob('v.' + ('[0-9]'*6) + '.txt'); vfiles.sort() -wfiles = glob.glob('w.' + ('[0-9]'*6) + '.txt'); wfiles.sort() +ufiles = glob.glob("u." + ("[0-9]" * 6) + ".txt") +ufiles.sort() +vfiles = glob.glob("v." + ("[0-9]" * 6) + ".txt") +vfiles.sort() +wfiles = glob.glob("w." + ("[0-9]" * 6) + ".txt") +wfiles.sort() udata = np.loadtxt(ufiles[0], dtype=np.double) vdata = np.loadtxt(vfiles[0], dtype=np.double) wdata = np.loadtxt(wfiles[0], dtype=np.double) -for idx in range(1,len(ufiles)): +for idx in range(1, len(ufiles)): udata = np.hstack((udata, np.loadtxt(ufiles[idx], dtype=np.double))) vdata = np.hstack((vdata, np.loadtxt(vfiles[idx], dtype=np.double))) wdata = np.hstack((wdata, np.loadtxt(wfiles[idx], dtype=np.double))) # determine number of time steps, mesh size -nt,nx = np.shape(udata) +nt, nx = np.shape(udata) # determine min/max values -umin = 0.9*udata.min() -umax = 1.1*udata.max() -vmin = 0.9*vdata.min() -vmax = 1.1*vdata.max() -wmin = 0.9*wdata.min() -wmax = 1.1*wdata.max() +umin = 0.9 * udata.min() +umax = 1.1 * udata.max() +vmin = 0.9 * vdata.min() +vmax = 1.1 * vdata.max() +wmin = 0.9 * wdata.min() +wmax = 1.1 * wdata.max() xmax = mesh.max() minval = np.array([umin, vmin, wmin]).min() maxval = np.array([umax, vmax, wmax]).max() @@ -57,39 +60,39 @@ for tstep in range(nt): # set string constants for output plots, current time, mesh size - pname = 'solution.' + repr(tstep).zfill(3) + '.png' - tstr = repr(tstep) + pname = "solution." + repr(tstep).zfill(3) + ".png" + tstr = repr(tstep) nxstr = repr(nx) # plot current solution and save to disk plt.figure(1) - plt.plot(mesh,udata[tstep,:],label="u") - plt.plot(mesh,vdata[tstep,:],label="v") - plt.plot(mesh,wdata[tstep,:],label="w") - plt.xlabel('x') - plt.ylabel('solution') - plt.title('Solutions at output ' + tstr + ', mesh = ' + nxstr) + plt.plot(mesh, udata[tstep, :], label="u") + plt.plot(mesh, vdata[tstep, :], label="v") + plt.plot(mesh, wdata[tstep, :], label="w") + plt.xlabel("x") + plt.ylabel("solution") + plt.title("Solutions at output " + tstr + ", mesh = " + nxstr) plt.axis((0.0, xmax, minval, maxval)) plt.grid() - plt.legend(loc='upper right', shadow=True) + plt.legend(loc="upper right", shadow=True) plt.savefig(pname) plt.close() # set string constants for output plots, current time, mesh size -pname = 'solution_at_x0.png' +pname = "solution_at_x0.png" xstr = repr(mesh[0]) # plot current solution and save to disk plt.figure(1) -plt.plot(times,udata[:,0],label="u") -plt.plot(times,vdata[:,0],label="v") -plt.plot(times,wdata[:,0],label="w") -plt.xlabel('t') -plt.ylabel('solution') -plt.title('Solutions at output at x = '+xstr) +plt.plot(times, udata[:, 0], label="u") +plt.plot(times, vdata[:, 0], label="v") +plt.plot(times, wdata[:, 0], label="w") +plt.xlabel("t") +plt.ylabel("solution") +plt.title("Solutions at output at x = " + xstr) plt.axis((times[0], times[-1], minval, maxval)) plt.grid() -plt.legend(loc='upper right', shadow=True) +plt.legend(loc="upper right", shadow=True) plt.savefig(pname) plt.close() diff --git a/examples/arkode/CXX_parallel/plot_heat2D_p.py b/examples/arkode/CXX_parallel/plot_heat2D_p.py index 7b7f83d929..0a99dfbc5e 100755 --- a/examples/arkode/CXX_parallel/plot_heat2D_p.py +++ b/examples/arkode/CXX_parallel/plot_heat2D_p.py @@ -28,7 +28,7 @@ # ------------------------------------------------------------------------------ # read MPI root process problem info file -infofile = 'heat2d_info.00000.txt' +infofile = "heat2d_info.00000.txt" with open(infofile) as fn: @@ -59,7 +59,7 @@ continue # total number of MPI processes - if "np"in line: + if "np" in line: nprocs = int(text[1]) continue @@ -71,11 +71,11 @@ # ------------------------------------------------------------------------------ # load subdomain information, store in table -subdomains = np.zeros((nprocs,4), dtype=np.int) +subdomains = np.zeros((nprocs, 4), dtype=np.int) for i in range(nprocs): - infofile = 'heat2d_info.' + repr(i).zfill(5) + '.txt' + infofile = "heat2d_info." + repr(i).zfill(5) + ".txt" with open(infofile) as fn: @@ -87,62 +87,64 @@ # x-direction starting index if "is" in line: - subdomains[i,0] = float(text[1]) + subdomains[i, 0] = float(text[1]) continue # x-direction ending index if "ie" in line: - subdomains[i,1] = float(text[1]) + subdomains[i, 1] = float(text[1]) continue # y-direction starting index if "js" in line: - subdomains[i,2] = float(text[1]) + subdomains[i, 2] = float(text[1]) continue # y-direction ending index if "je" in line: - subdomains[i,3] = float(text[1]) + subdomains[i, 3] = float(text[1]) continue # ------------------------------------------------------------------------------ # check if the error was output -fname = 'heat2d_error.00000.txt' +fname = "heat2d_error.00000.txt" if os.path.isfile(fname): - plottype = ['solution', 'error'] + plottype = ["solution", "error"] else: - plottype = ['solution'] + plottype = ["solution"] for pt in plottype: # fill array with data - time = np.zeros(nt) + time = np.zeros(nt) result = np.zeros((nt, ny, nx)) for i in range(nprocs): - datafile = 'heat2d_' + pt + '.' + repr(i).zfill(5) + '.txt' + datafile = "heat2d_" + pt + "." + repr(i).zfill(5) + ".txt" # load data data = np.loadtxt(datafile, dtype=np.double) - if (np.shape(data)[0] != nt): - sys.exit('error: subdomain ' + i + ' has an incorrect number of time steps') + if np.shape(data)[0] != nt: + sys.exit("error: subdomain " + i + " has an incorrect number of time steps") # subdomain indices - istart = subdomains[i,0] - iend = subdomains[i,1] - jstart = subdomains[i,2] - jend = subdomains[i,3] - nxl = iend - istart + 1 - nyl = jend - jstart + 1 + istart = subdomains[i, 0] + iend = subdomains[i, 1] + jstart = subdomains[i, 2] + jend = subdomains[i, 3] + nxl = iend - istart + 1 + nyl = jend - jstart + 1 # extract data for i in range(nt): - time[i] = data[i,0] - result[i,jstart:jend+1,istart:iend+1] = np.reshape(data[i,1:], (nyl,nxl)) + time[i] = data[i, 0] + result[i, jstart : jend + 1, istart : iend + 1] = np.reshape( + data[i, 1:], (nyl, nxl) + ) # determine extents of plots maxtemp = 1.1 * result.max() @@ -151,7 +153,7 @@ # set x and y meshgrid objects xspan = np.linspace(0.0, xu, nx) yspan = np.linspace(0.0, yu, ny) - X,Y = np.meshgrid(xspan, yspan) + X, Y = np.meshgrid(xspan, yspan) nxstr = repr(nx) nystr = repr(ny) @@ -160,24 +162,33 @@ for tstep in range(nt): # set string constants for output plots, current time, mesh size - pname = 'heat2d_surf_' + pt + '.' + repr(tstep).zfill(3) + '.png' - tstr = str(time[tstep]) + pname = "heat2d_surf_" + pt + "." + repr(tstep).zfill(3) + ".png" + tstr = str(time[tstep]) # plot surface and save to disk fig = plt.figure(1) - ax = fig.add_subplot(111, projection='3d') - - ax.plot_surface(X, Y, result[tstep,:,:], rstride=1, cstride=1, - cmap=cm.jet, linewidth=0, antialiased=True, shade=True) - - ax.set_xlabel('x') - ax.set_ylabel('y') + ax = fig.add_subplot(111, projection="3d") + + ax.plot_surface( + X, + Y, + result[tstep, :, :], + rstride=1, + cstride=1, + cmap=cm.jet, + linewidth=0, + antialiased=True, + shade=True, + ) + + ax.set_xlabel("x") + ax.set_ylabel("y") ax.set_zlim((mintemp, maxtemp)) - ax.view_init(20,45) - if (pt == 'solution'): - title('u(x,y) at t = ' + tstr) + ax.view_init(20, 45) + if pt == "solution": + title("u(x,y) at t = " + tstr) else: - title('error(x,y) at t = ' + tstr) + title("error(x,y) at t = " + tstr) savefig(pname) plt.close() diff --git a/examples/arkode/CXX_parhyp/plot_heat2D_p.py b/examples/arkode/CXX_parhyp/plot_heat2D_p.py index 7b7f83d929..0a99dfbc5e 100755 --- a/examples/arkode/CXX_parhyp/plot_heat2D_p.py +++ b/examples/arkode/CXX_parhyp/plot_heat2D_p.py @@ -28,7 +28,7 @@ # ------------------------------------------------------------------------------ # read MPI root process problem info file -infofile = 'heat2d_info.00000.txt' +infofile = "heat2d_info.00000.txt" with open(infofile) as fn: @@ -59,7 +59,7 @@ continue # total number of MPI processes - if "np"in line: + if "np" in line: nprocs = int(text[1]) continue @@ -71,11 +71,11 @@ # ------------------------------------------------------------------------------ # load subdomain information, store in table -subdomains = np.zeros((nprocs,4), dtype=np.int) +subdomains = np.zeros((nprocs, 4), dtype=np.int) for i in range(nprocs): - infofile = 'heat2d_info.' + repr(i).zfill(5) + '.txt' + infofile = "heat2d_info." + repr(i).zfill(5) + ".txt" with open(infofile) as fn: @@ -87,62 +87,64 @@ # x-direction starting index if "is" in line: - subdomains[i,0] = float(text[1]) + subdomains[i, 0] = float(text[1]) continue # x-direction ending index if "ie" in line: - subdomains[i,1] = float(text[1]) + subdomains[i, 1] = float(text[1]) continue # y-direction starting index if "js" in line: - subdomains[i,2] = float(text[1]) + subdomains[i, 2] = float(text[1]) continue # y-direction ending index if "je" in line: - subdomains[i,3] = float(text[1]) + subdomains[i, 3] = float(text[1]) continue # ------------------------------------------------------------------------------ # check if the error was output -fname = 'heat2d_error.00000.txt' +fname = "heat2d_error.00000.txt" if os.path.isfile(fname): - plottype = ['solution', 'error'] + plottype = ["solution", "error"] else: - plottype = ['solution'] + plottype = ["solution"] for pt in plottype: # fill array with data - time = np.zeros(nt) + time = np.zeros(nt) result = np.zeros((nt, ny, nx)) for i in range(nprocs): - datafile = 'heat2d_' + pt + '.' + repr(i).zfill(5) + '.txt' + datafile = "heat2d_" + pt + "." + repr(i).zfill(5) + ".txt" # load data data = np.loadtxt(datafile, dtype=np.double) - if (np.shape(data)[0] != nt): - sys.exit('error: subdomain ' + i + ' has an incorrect number of time steps') + if np.shape(data)[0] != nt: + sys.exit("error: subdomain " + i + " has an incorrect number of time steps") # subdomain indices - istart = subdomains[i,0] - iend = subdomains[i,1] - jstart = subdomains[i,2] - jend = subdomains[i,3] - nxl = iend - istart + 1 - nyl = jend - jstart + 1 + istart = subdomains[i, 0] + iend = subdomains[i, 1] + jstart = subdomains[i, 2] + jend = subdomains[i, 3] + nxl = iend - istart + 1 + nyl = jend - jstart + 1 # extract data for i in range(nt): - time[i] = data[i,0] - result[i,jstart:jend+1,istart:iend+1] = np.reshape(data[i,1:], (nyl,nxl)) + time[i] = data[i, 0] + result[i, jstart : jend + 1, istart : iend + 1] = np.reshape( + data[i, 1:], (nyl, nxl) + ) # determine extents of plots maxtemp = 1.1 * result.max() @@ -151,7 +153,7 @@ # set x and y meshgrid objects xspan = np.linspace(0.0, xu, nx) yspan = np.linspace(0.0, yu, ny) - X,Y = np.meshgrid(xspan, yspan) + X, Y = np.meshgrid(xspan, yspan) nxstr = repr(nx) nystr = repr(ny) @@ -160,24 +162,33 @@ for tstep in range(nt): # set string constants for output plots, current time, mesh size - pname = 'heat2d_surf_' + pt + '.' + repr(tstep).zfill(3) + '.png' - tstr = str(time[tstep]) + pname = "heat2d_surf_" + pt + "." + repr(tstep).zfill(3) + ".png" + tstr = str(time[tstep]) # plot surface and save to disk fig = plt.figure(1) - ax = fig.add_subplot(111, projection='3d') - - ax.plot_surface(X, Y, result[tstep,:,:], rstride=1, cstride=1, - cmap=cm.jet, linewidth=0, antialiased=True, shade=True) - - ax.set_xlabel('x') - ax.set_ylabel('y') + ax = fig.add_subplot(111, projection="3d") + + ax.plot_surface( + X, + Y, + result[tstep, :, :], + rstride=1, + cstride=1, + cmap=cm.jet, + linewidth=0, + antialiased=True, + shade=True, + ) + + ax.set_xlabel("x") + ax.set_ylabel("y") ax.set_zlim((mintemp, maxtemp)) - ax.view_init(20,45) - if (pt == 'solution'): - title('u(x,y) at t = ' + tstr) + ax.view_init(20, 45) + if pt == "solution": + title("u(x,y) at t = " + tstr) else: - title('error(x,y) at t = ' + tstr) + title("error(x,y) at t = " + tstr) savefig(pname) plt.close() diff --git a/examples/arkode/CXX_serial/plot_heat2D.py b/examples/arkode/CXX_serial/plot_heat2D.py index 6c97cdc112..c494bc06de 100755 --- a/examples/arkode/CXX_serial/plot_heat2D.py +++ b/examples/arkode/CXX_serial/plot_heat2D.py @@ -28,7 +28,7 @@ # ------------------------------------------------------------------------------ # read problem info file -infofile = 'heat2d_info.txt' +infofile = "heat2d_info.txt" with open(infofile) as fn: @@ -66,26 +66,26 @@ # ------------------------------------------------------------------------------ # check if the error was output -fname = 'heat2d_error.txt' +fname = "heat2d_error.txt" if os.path.isfile(fname): - plottype = ['solution', 'error'] + plottype = ["solution", "error"] else: - plottype = ['solution'] + plottype = ["solution"] for pt in plottype: # fill array with data - time = np.zeros(nt) + time = np.zeros(nt) result = np.zeros((nt, ny, nx)) # load data - data = np.loadtxt('heat2d_' + pt + '.txt', dtype=np.double) + data = np.loadtxt("heat2d_" + pt + ".txt", dtype=np.double) # extract data for i in range(nt): - time[i] = data[i,0] - result[i,0:ny+1,0:nx+1] = np.reshape(data[i,1:], (ny,nx)) + time[i] = data[i, 0] + result[i, 0 : ny + 1, 0 : nx + 1] = np.reshape(data[i, 1:], (ny, nx)) # determine extents of plots maxtemp = 1.1 * result.max() @@ -94,7 +94,7 @@ # set x and y meshgrid objects xspan = np.linspace(0.0, xu, nx) yspan = np.linspace(0.0, yu, ny) - X,Y = np.meshgrid(xspan, yspan) + X, Y = np.meshgrid(xspan, yspan) nxstr = repr(nx) nystr = repr(ny) @@ -103,24 +103,33 @@ for tstep in range(nt): # set string constants for output plots, current time, mesh size - pname = 'heat2d_surf_' + pt + '.' + repr(tstep).zfill(3) + '.png' - tstr = str(time[tstep]) + pname = "heat2d_surf_" + pt + "." + repr(tstep).zfill(3) + ".png" + tstr = str(time[tstep]) # plot surface and save to disk fig = plt.figure(1) - ax = fig.add_subplot(111, projection='3d') - - ax.plot_surface(X, Y, result[tstep,:,:], rstride=1, cstride=1, - cmap=cm.jet, linewidth=0, antialiased=True, shade=True) - - ax.set_xlabel('x') - ax.set_ylabel('y') + ax = fig.add_subplot(111, projection="3d") + + ax.plot_surface( + X, + Y, + result[tstep, :, :], + rstride=1, + cstride=1, + cmap=cm.jet, + linewidth=0, + antialiased=True, + shade=True, + ) + + ax.set_xlabel("x") + ax.set_ylabel("y") ax.set_zlim((mintemp, maxtemp)) - ax.view_init(20,45) - if (pt == 'solution'): - title('u(x,y) at t = ' + tstr) + ax.view_init(20, 45) + if pt == "solution": + title("u(x,y) at t = " + tstr) else: - title('error(x,y) at t = ' + tstr) + title("error(x,y) at t = " + tstr) savefig(pname) plt.close() diff --git a/examples/arkode/CXX_serial/plot_sol.py b/examples/arkode/CXX_serial/plot_sol.py index ab463fac6c..fe3d875340 100755 --- a/examples/arkode/CXX_serial/plot_sol.py +++ b/examples/arkode/CXX_serial/plot_sol.py @@ -20,16 +20,16 @@ import numpy as np # load solution data file -data = np.loadtxt('solution.txt', dtype=np.double) +data = np.loadtxt("solution.txt", dtype=np.double) # determine number of time steps, number of fields -nt,nv = np.shape(data) +nt, nv = np.shape(data) # extract time array -times = data[:,0] +times = data[:, 0] # parse comment line to determine solution names -f = open('solution.txt', 'r') +f = open("solution.txt", "r") commentline = f.readline() commentsplit = commentline.split() names = commentsplit[2:] @@ -38,18 +38,16 @@ plt.figure() # add curves to figure -for i in range(nv-1): - plt.plot(times,data[:,i+1],label=names[i]) -plt.xlabel('t') -if (nv > 2): - plt.ylabel('solutions') +for i in range(nv - 1): + plt.plot(times, data[:, i + 1], label=names[i]) +plt.xlabel("t") +if nv > 2: + plt.ylabel("solutions") else: - plt.ylabel('solution') -plt.legend(loc='upper right', shadow=True) + plt.ylabel("solution") +plt.legend(loc="upper right", shadow=True) plt.grid() -plt.savefig('solution.png') - - +plt.savefig("solution.png") ##### end of script ##### diff --git a/examples/arkode/CXX_xbraid/plot_heat2D.py b/examples/arkode/CXX_xbraid/plot_heat2D.py index 72aaa2adea..f24592bfdc 100755 --- a/examples/arkode/CXX_xbraid/plot_heat2D.py +++ b/examples/arkode/CXX_xbraid/plot_heat2D.py @@ -28,7 +28,7 @@ # ------------------------------------------------------------------------------ # read problem info file -infofile = 'heat2d_info.txt' +infofile = "heat2d_info.txt" with open(infofile) as fn: @@ -66,17 +66,17 @@ # ------------------------------------------------------------------------------ # check if the error was output -fname = 'heat2d_error.000000.txt' +fname = "heat2d_error.000000.txt" if os.path.isfile(fname): - plottype = ['solution', 'error'] + plottype = ["solution", "error"] else: - plottype = ['solution'] + plottype = ["solution"] for pt in plottype: # fill array with data - time = np.zeros(nt) + time = np.zeros(nt) result = np.zeros((nt, ny, nx)) tindex = range(0, nt) @@ -86,14 +86,14 @@ for t in tindex: # output file name - datafile = 'heat2d_' + pt + '.' + repr(t).zfill(6) + '.txt' + datafile = "heat2d_" + pt + "." + repr(t).zfill(6) + ".txt" # load data data = np.loadtxt(datafile, dtype=np.double) # extract data time[i] = data[0] - result[i,0:ny+1,0:nx+1] = np.reshape(data[1:], (ny,nx)) + result[i, 0 : ny + 1, 0 : nx + 1] = np.reshape(data[1:], (ny, nx)) i += 1 # determine extents of plots @@ -103,7 +103,7 @@ # set x and y meshgrid objects xspan = np.linspace(0.0, xu, nx) yspan = np.linspace(0.0, yu, ny) - X,Y = np.meshgrid(xspan, yspan) + X, Y = np.meshgrid(xspan, yspan) nxstr = repr(nx) nystr = repr(ny) @@ -112,24 +112,33 @@ for tstep in range(nt): # set string constants for output plots, current time, mesh size - pname = 'heat2d_surf_' + pt + '.' + repr(tstep).zfill(6) + '.png' - tstr = str(time[tstep]) + pname = "heat2d_surf_" + pt + "." + repr(tstep).zfill(6) + ".png" + tstr = str(time[tstep]) # plot surface and save to disk fig = plt.figure(1) - ax = fig.add_subplot(111, projection='3d') - - ax.plot_surface(X, Y, result[tstep,:,:], rstride=1, cstride=1, - cmap=cm.jet, linewidth=0, antialiased=True, shade=True) - - ax.set_xlabel('x') - ax.set_ylabel('y') + ax = fig.add_subplot(111, projection="3d") + + ax.plot_surface( + X, + Y, + result[tstep, :, :], + rstride=1, + cstride=1, + cmap=cm.jet, + linewidth=0, + antialiased=True, + shade=True, + ) + + ax.set_xlabel("x") + ax.set_ylabel("y") ax.set_zlim((mintemp, maxtemp)) - ax.view_init(20,45) - if (pt == 'solution'): - title('u(x,y) at t = ' + tstr) + ax.view_init(20, 45) + if pt == "solution": + title("u(x,y) at t = " + tstr) else: - title('error(x,y) at t = ' + tstr) + title("error(x,y) at t = " + tstr) savefig(pname) plt.close() diff --git a/examples/arkode/C_manyvector/plot_brusselator1D.py b/examples/arkode/C_manyvector/plot_brusselator1D.py index 3cc29051e5..72a3402c4a 100755 --- a/examples/arkode/C_manyvector/plot_brusselator1D.py +++ b/examples/arkode/C_manyvector/plot_brusselator1D.py @@ -20,23 +20,23 @@ import numpy as np # load mesh data file -mesh = np.loadtxt('bruss_mesh.txt', dtype=np.double) +mesh = np.loadtxt("bruss_mesh.txt", dtype=np.double) # load solution data files -udata = np.loadtxt('bruss_u.txt', dtype=np.double) -vdata = np.loadtxt('bruss_v.txt', dtype=np.double) -wdata = np.loadtxt('bruss_w.txt', dtype=np.double) +udata = np.loadtxt("bruss_u.txt", dtype=np.double) +vdata = np.loadtxt("bruss_v.txt", dtype=np.double) +wdata = np.loadtxt("bruss_w.txt", dtype=np.double) # determine number of time steps, mesh size -nt,nx = np.shape(udata) +nt, nx = np.shape(udata) # determine min/max values -umin = 0.9*udata.min() -umax = 1.1*udata.max() -vmin = 0.9*vdata.min() -vmax = 1.1*vdata.max() -wmin = 0.9*wdata.min() -wmax = 1.1*wdata.max() +umin = 0.9 * udata.min() +umax = 1.1 * udata.max() +vmin = 0.9 * vdata.min() +vmax = 1.1 * vdata.max() +wmin = 0.9 * wdata.min() +wmax = 1.1 * wdata.max() minval = np.array([umin, vmin, wmin]).min() maxval = np.array([umax, vmax, wmax]).max() @@ -44,21 +44,21 @@ for tstep in range(nt): # set string constants for output plots, current time, mesh size - pname = 'brusselator1D.' + repr(tstep).zfill(3) + '.png' - tstr = repr(tstep) + pname = "brusselator1D." + repr(tstep).zfill(3) + ".png" + tstr = repr(tstep) nxstr = repr(nx) # plot current solution and save to disk plt.figure(1) - plt.plot(mesh,udata[tstep,:],label="u") - plt.plot(mesh,vdata[tstep,:],label="v") - plt.plot(mesh,wdata[tstep,:],label="w") - plt.xlabel('x') - plt.ylabel('solution') - plt.title('Solutions at output ' + tstr + ', mesh = ' + nxstr) + plt.plot(mesh, udata[tstep, :], label="u") + plt.plot(mesh, vdata[tstep, :], label="v") + plt.plot(mesh, wdata[tstep, :], label="w") + plt.xlabel("x") + plt.ylabel("solution") + plt.title("Solutions at output " + tstr + ", mesh = " + nxstr) plt.axis((0.0, 1.0, minval, maxval)) plt.grid() - plt.legend(loc='upper right', shadow=True) + plt.legend(loc="upper right", shadow=True) plt.savefig(pname) plt.close() diff --git a/examples/arkode/C_openmp/plot_brusselator1D.py b/examples/arkode/C_openmp/plot_brusselator1D.py index 3cc29051e5..72a3402c4a 100755 --- a/examples/arkode/C_openmp/plot_brusselator1D.py +++ b/examples/arkode/C_openmp/plot_brusselator1D.py @@ -20,23 +20,23 @@ import numpy as np # load mesh data file -mesh = np.loadtxt('bruss_mesh.txt', dtype=np.double) +mesh = np.loadtxt("bruss_mesh.txt", dtype=np.double) # load solution data files -udata = np.loadtxt('bruss_u.txt', dtype=np.double) -vdata = np.loadtxt('bruss_v.txt', dtype=np.double) -wdata = np.loadtxt('bruss_w.txt', dtype=np.double) +udata = np.loadtxt("bruss_u.txt", dtype=np.double) +vdata = np.loadtxt("bruss_v.txt", dtype=np.double) +wdata = np.loadtxt("bruss_w.txt", dtype=np.double) # determine number of time steps, mesh size -nt,nx = np.shape(udata) +nt, nx = np.shape(udata) # determine min/max values -umin = 0.9*udata.min() -umax = 1.1*udata.max() -vmin = 0.9*vdata.min() -vmax = 1.1*vdata.max() -wmin = 0.9*wdata.min() -wmax = 1.1*wdata.max() +umin = 0.9 * udata.min() +umax = 1.1 * udata.max() +vmin = 0.9 * vdata.min() +vmax = 1.1 * vdata.max() +wmin = 0.9 * wdata.min() +wmax = 1.1 * wdata.max() minval = np.array([umin, vmin, wmin]).min() maxval = np.array([umax, vmax, wmax]).max() @@ -44,21 +44,21 @@ for tstep in range(nt): # set string constants for output plots, current time, mesh size - pname = 'brusselator1D.' + repr(tstep).zfill(3) + '.png' - tstr = repr(tstep) + pname = "brusselator1D." + repr(tstep).zfill(3) + ".png" + tstr = repr(tstep) nxstr = repr(nx) # plot current solution and save to disk plt.figure(1) - plt.plot(mesh,udata[tstep,:],label="u") - plt.plot(mesh,vdata[tstep,:],label="v") - plt.plot(mesh,wdata[tstep,:],label="w") - plt.xlabel('x') - plt.ylabel('solution') - plt.title('Solutions at output ' + tstr + ', mesh = ' + nxstr) + plt.plot(mesh, udata[tstep, :], label="u") + plt.plot(mesh, vdata[tstep, :], label="v") + plt.plot(mesh, wdata[tstep, :], label="w") + plt.xlabel("x") + plt.ylabel("solution") + plt.title("Solutions at output " + tstr + ", mesh = " + nxstr) plt.axis((0.0, 1.0, minval, maxval)) plt.grid() - plt.legend(loc='upper right', shadow=True) + plt.legend(loc="upper right", shadow=True) plt.savefig(pname) plt.close() diff --git a/examples/arkode/C_parallel/plot_brusselator1D.py b/examples/arkode/C_parallel/plot_brusselator1D.py index 2bcc7d1af7..087577ff0e 100755 --- a/examples/arkode/C_parallel/plot_brusselator1D.py +++ b/examples/arkode/C_parallel/plot_brusselator1D.py @@ -22,33 +22,36 @@ import numpy as np # load mesh data file -mesh = np.loadtxt('mesh.txt', dtype=np.double) +mesh = np.loadtxt("mesh.txt", dtype=np.double) # load output time file -times = np.loadtxt('t.000000.txt', dtype=np.double) +times = np.loadtxt("t.000000.txt", dtype=np.double) # load solution data files -ufiles = glob.glob('u.' + ('[0-9]'*6) + '.txt'); ufiles.sort() -vfiles = glob.glob('v.' + ('[0-9]'*6) + '.txt'); vfiles.sort() -wfiles = glob.glob('w.' + ('[0-9]'*6) + '.txt'); wfiles.sort() +ufiles = glob.glob("u." + ("[0-9]" * 6) + ".txt") +ufiles.sort() +vfiles = glob.glob("v." + ("[0-9]" * 6) + ".txt") +vfiles.sort() +wfiles = glob.glob("w." + ("[0-9]" * 6) + ".txt") +wfiles.sort() udata = np.loadtxt(ufiles[0], dtype=np.double) vdata = np.loadtxt(vfiles[0], dtype=np.double) wdata = np.loadtxt(wfiles[0], dtype=np.double) -for idx in range(1,len(ufiles)): +for idx in range(1, len(ufiles)): udata = np.hstack((udata, np.loadtxt(ufiles[idx], dtype=np.double))) vdata = np.hstack((vdata, np.loadtxt(vfiles[idx], dtype=np.double))) wdata = np.hstack((wdata, np.loadtxt(wfiles[idx], dtype=np.double))) # determine number of time steps, mesh size -nt,nx = np.shape(udata) +nt, nx = np.shape(udata) # determine min/max values -umin = 0.9*udata.min() -umax = 1.1*udata.max() -vmin = 0.9*vdata.min() -vmax = 1.1*vdata.max() -wmin = 0.9*wdata.min() -wmax = 1.1*wdata.max() +umin = 0.9 * udata.min() +umax = 1.1 * udata.max() +vmin = 0.9 * vdata.min() +vmax = 1.1 * vdata.max() +wmin = 0.9 * wdata.min() +wmax = 1.1 * wdata.max() xmax = mesh.max() minval = np.array([umin, vmin, wmin]).min() maxval = np.array([umax, vmax, wmax]).max() @@ -57,39 +60,39 @@ for tstep in range(nt): # set string constants for output plots, current time, mesh size - pname = 'solution.' + repr(tstep).zfill(3) + '.png' - tstr = repr(tstep) + pname = "solution." + repr(tstep).zfill(3) + ".png" + tstr = repr(tstep) nxstr = repr(nx) # plot current solution and save to disk plt.figure(1) - plt.plot(mesh,udata[tstep,:],label="u") - plt.plot(mesh,vdata[tstep,:],label="v") - plt.plot(mesh,wdata[tstep,:],label="w") - plt.xlabel('x') - plt.ylabel('solution') - plt.title('Solutions at output ' + tstr + ', mesh = ' + nxstr) + plt.plot(mesh, udata[tstep, :], label="u") + plt.plot(mesh, vdata[tstep, :], label="v") + plt.plot(mesh, wdata[tstep, :], label="w") + plt.xlabel("x") + plt.ylabel("solution") + plt.title("Solutions at output " + tstr + ", mesh = " + nxstr) plt.axis((0.0, xmax, minval, maxval)) plt.grid() - plt.legend(loc='upper right', shadow=True) + plt.legend(loc="upper right", shadow=True) plt.savefig(pname) plt.close() # set string constants for output plots, current time, mesh size -pname = 'solution_at_x0.png' +pname = "solution_at_x0.png" xstr = repr(mesh[0]) # plot current solution and save to disk plt.figure(1) -plt.plot(times,udata[:,0],label="u") -plt.plot(times,vdata[:,0],label="v") -plt.plot(times,wdata[:,0],label="w") -plt.xlabel('t') -plt.ylabel('solution') -plt.title('Solutions at output at x = '+xstr) +plt.plot(times, udata[:, 0], label="u") +plt.plot(times, vdata[:, 0], label="v") +plt.plot(times, wdata[:, 0], label="w") +plt.xlabel("t") +plt.ylabel("solution") +plt.title("Solutions at output at x = " + xstr) plt.axis((times[0], times[-1], minval, maxval)) plt.grid() -plt.legend(loc='upper right', shadow=True) +plt.legend(loc="upper right", shadow=True) plt.savefig(pname) plt.close() diff --git a/examples/arkode/C_serial/ark_kepler_plot.py b/examples/arkode/C_serial/ark_kepler_plot.py index 2d499d850e..7a50dc3ebb 100755 --- a/examples/arkode/C_serial/ark_kepler_plot.py +++ b/examples/arkode/C_serial/ark_kepler_plot.py @@ -19,70 +19,72 @@ import matplotlib.pyplot as plt import argparse -parser = argparse.ArgumentParser(description='Script for plotting the energy, angular momentum, and phase space solution for ark_kepler.c') -parser.add_argument('output_times', help='file with the output times') -parser.add_argument('solution', help='file with the solution') -parser.add_argument('conserved_quantities', help='file with conserved quantities') +parser = argparse.ArgumentParser( + description="Script for plotting the energy, angular momentum, and phase space solution for ark_kepler.c" +) +parser.add_argument("output_times", help="file with the output times") +parser.add_argument("solution", help="file with the solution") +parser.add_argument("conserved_quantities", help="file with conserved quantities") args = parser.parse_args() t = np.loadtxt(args.output_times, dtype=np.float64) y = np.loadtxt(args.solution, dtype=np.float64) -y = np.reshape(y, (y.shape[0]//4, 4)) +y = np.reshape(y, (y.shape[0] // 4, 4)) plt.figure(dpi=200) -plt.plot(y[:,0], y[:,1]) -plt.savefig('ark_kepler_phase.png') +plt.plot(y[:, 0], y[:, 1]) +plt.savefig("ark_kepler_phase.png") plt.close() -conserved = np.loadtxt(args.conserved_quantities, delimiter=',', dtype=np.float64) -energy = conserved[:,0] -energy_0 = conserved[0,0] -L = conserved[:,1] -L_0 = conserved[0,1] +conserved = np.loadtxt(args.conserved_quantities, delimiter=",", dtype=np.float64) +energy = conserved[:, 0] +energy_0 = conserved[0, 0] +L = conserved[:, 1] +L_0 = conserved[0, 1] plt.figure(dpi=200) -plt.title('Energy') +plt.title("Energy") plt.plot(t, np.abs(energy)) -plt.ylabel('H(t,p,q)') -plt.xlabel('<--- t --->') -plt.xscale('log') -plt.savefig('ark_kepler_energy.png') +plt.ylabel("H(t,p,q)") +plt.xlabel("<--- t --->") +plt.xscale("log") +plt.savefig("ark_kepler_energy.png") plt.close() plt.figure(dpi=200) -plt.title('Momentum') +plt.title("Momentum") plt.plot(t, L) -plt.ylabel('L(t,p,q)') -plt.xlabel('<--- t --->') -plt.xscale('log') -plt.savefig('ark_kepler_momentum.png') +plt.ylabel("L(t,p,q)") +plt.xlabel("<--- t --->") +plt.xscale("log") +plt.savefig("ark_kepler_momentum.png") plt.close() # # Time plot. # plt.figure(dpi=200) -plt.plot(t, y[:,0], linewidth = 2) -plt.plot(t, y[:,1], linewidth = 2) -plt.plot(t, y[:,2], linewidth = 2) -plt.plot(t, y[:,3], linewidth = 2) +plt.plot(t, y[:, 0], linewidth=2) +plt.plot(t, y[:, 1], linewidth=2) +plt.plot(t, y[:, 2], linewidth=2) +plt.plot(t, y[:, 3], linewidth=2) plt.grid(True) -plt.legend(['P', 'P\'', 'Q', 'Q\'']) -plt.xlabel('<--- t --->') -plt.ylabel('<--- y(1:4) --->') -plt.title('Solution in Time') -plt.savefig('ark_kepler_plot.png') +plt.legend(["P", "P'", "Q", "Q'"]) +plt.xlabel("<--- t --->") +plt.ylabel("<--- y(1:4) --->") +plt.title("Solution in Time") +plt.savefig("ark_kepler_plot.png") plt.close() # # Phase plot. # plt.figure(dpi=200) -plt.plot(y[:,0], y[:,1], linewidth=0.1) +plt.plot(y[:, 0], y[:, 1], linewidth=0.1) plt.grid(True) -plt.xlabel('<--- y1 --->') -plt.ylabel('<--- y2 --->') -plt.title('Phase Plot') -plt.savefig('ark_kepler_phase.png') +plt.xlabel("<--- y1 --->") +plt.ylabel("<--- y2 --->") +plt.title("Phase Plot") +plt.savefig("ark_kepler_phase.png") plt.close() diff --git a/examples/arkode/C_serial/plot_brusselator1D.py b/examples/arkode/C_serial/plot_brusselator1D.py index 3cc29051e5..72a3402c4a 100755 --- a/examples/arkode/C_serial/plot_brusselator1D.py +++ b/examples/arkode/C_serial/plot_brusselator1D.py @@ -20,23 +20,23 @@ import numpy as np # load mesh data file -mesh = np.loadtxt('bruss_mesh.txt', dtype=np.double) +mesh = np.loadtxt("bruss_mesh.txt", dtype=np.double) # load solution data files -udata = np.loadtxt('bruss_u.txt', dtype=np.double) -vdata = np.loadtxt('bruss_v.txt', dtype=np.double) -wdata = np.loadtxt('bruss_w.txt', dtype=np.double) +udata = np.loadtxt("bruss_u.txt", dtype=np.double) +vdata = np.loadtxt("bruss_v.txt", dtype=np.double) +wdata = np.loadtxt("bruss_w.txt", dtype=np.double) # determine number of time steps, mesh size -nt,nx = np.shape(udata) +nt, nx = np.shape(udata) # determine min/max values -umin = 0.9*udata.min() -umax = 1.1*udata.max() -vmin = 0.9*vdata.min() -vmax = 1.1*vdata.max() -wmin = 0.9*wdata.min() -wmax = 1.1*wdata.max() +umin = 0.9 * udata.min() +umax = 1.1 * udata.max() +vmin = 0.9 * vdata.min() +vmax = 1.1 * vdata.max() +wmin = 0.9 * wdata.min() +wmax = 1.1 * wdata.max() minval = np.array([umin, vmin, wmin]).min() maxval = np.array([umax, vmax, wmax]).max() @@ -44,21 +44,21 @@ for tstep in range(nt): # set string constants for output plots, current time, mesh size - pname = 'brusselator1D.' + repr(tstep).zfill(3) + '.png' - tstr = repr(tstep) + pname = "brusselator1D." + repr(tstep).zfill(3) + ".png" + tstr = repr(tstep) nxstr = repr(nx) # plot current solution and save to disk plt.figure(1) - plt.plot(mesh,udata[tstep,:],label="u") - plt.plot(mesh,vdata[tstep,:],label="v") - plt.plot(mesh,wdata[tstep,:],label="w") - plt.xlabel('x') - plt.ylabel('solution') - plt.title('Solutions at output ' + tstr + ', mesh = ' + nxstr) + plt.plot(mesh, udata[tstep, :], label="u") + plt.plot(mesh, vdata[tstep, :], label="v") + plt.plot(mesh, wdata[tstep, :], label="w") + plt.xlabel("x") + plt.ylabel("solution") + plt.title("Solutions at output " + tstr + ", mesh = " + nxstr) plt.axis((0.0, 1.0, minval, maxval)) plt.grid() - plt.legend(loc='upper right', shadow=True) + plt.legend(loc="upper right", shadow=True) plt.savefig(pname) plt.close() diff --git a/examples/arkode/C_serial/plot_brusselator1D_FEM.py b/examples/arkode/C_serial/plot_brusselator1D_FEM.py index d47bf2b40e..61bbf4f069 100755 --- a/examples/arkode/C_serial/plot_brusselator1D_FEM.py +++ b/examples/arkode/C_serial/plot_brusselator1D_FEM.py @@ -20,52 +20,52 @@ import numpy as np # load mesh data file -mesh = np.loadtxt('bruss_FEM_mesh.txt', dtype=np.double) +mesh = np.loadtxt("bruss_FEM_mesh.txt", dtype=np.double) # load solution data files -udata = np.loadtxt('bruss_FEM_u.txt', dtype=np.double) -vdata = np.loadtxt('bruss_FEM_v.txt', dtype=np.double) -wdata = np.loadtxt('bruss_FEM_w.txt', dtype=np.double) +udata = np.loadtxt("bruss_FEM_u.txt", dtype=np.double) +vdata = np.loadtxt("bruss_FEM_v.txt", dtype=np.double) +wdata = np.loadtxt("bruss_FEM_w.txt", dtype=np.double) # determine number of time steps, mesh size -nt,nx = np.shape(udata) +nt, nx = np.shape(udata) # determine min/max values -umin = 0.9*udata.min() -umax = 1.1*udata.max() -vmin = 0.9*vdata.min() -vmax = 1.1*vdata.max() -wmin = 0.9*wdata.min() -wmax = 1.1*wdata.max() +umin = 0.9 * udata.min() +umax = 1.1 * udata.max() +vmin = 0.9 * vdata.min() +vmax = 1.1 * vdata.max() +wmin = 0.9 * wdata.min() +wmax = 1.1 * wdata.max() minval = np.array([umin, vmin, wmin]).min() maxval = np.array([umax, vmax, wmax]).max() # plot the mesh plt.figure(1) -plt.plot(mesh,0.0*mesh,'o') -plt.xlabel('x') -plt.title('FEM mesh') -plt.savefig('brusselator1D_FEM_mesh.png') +plt.plot(mesh, 0.0 * mesh, "o") +plt.xlabel("x") +plt.title("FEM mesh") +plt.savefig("brusselator1D_FEM_mesh.png") # generate plots of results for tstep in range(nt): # set string constants for output plots, current time, mesh size - pname = 'brusselator1D_FEM.' + repr(tstep).zfill(3) + '.png' - tstr = repr(tstep) + pname = "brusselator1D_FEM." + repr(tstep).zfill(3) + ".png" + tstr = repr(tstep) nxstr = repr(nx) # plot current solution and save to disk plt.figure(1) - plt.plot(mesh,udata[tstep,:],label="u") - plt.plot(mesh,vdata[tstep,:],label="v") - plt.plot(mesh,wdata[tstep,:],label="w") - plt.xlabel('x') - plt.ylabel('solution') - plt.title('Solutions at output ' + tstr + ', mesh = ' + nxstr) + plt.plot(mesh, udata[tstep, :], label="u") + plt.plot(mesh, vdata[tstep, :], label="v") + plt.plot(mesh, wdata[tstep, :], label="w") + plt.xlabel("x") + plt.ylabel("solution") + plt.title("Solutions at output " + tstr + ", mesh = " + nxstr) plt.axis((0.0, 1.0, minval, maxval)) plt.grid() - plt.legend(loc='upper right', shadow=True) + plt.legend(loc="upper right", shadow=True) plt.savefig(pname) plt.close() diff --git a/examples/arkode/C_serial/plot_heat1D.py b/examples/arkode/C_serial/plot_heat1D.py index d1c8e2bfdf..7b7b0e3fd2 100755 --- a/examples/arkode/C_serial/plot_heat1D.py +++ b/examples/arkode/C_serial/plot_heat1D.py @@ -20,31 +20,31 @@ import numpy as np # load mesh data file -mesh = np.loadtxt('heat_mesh.txt', dtype=np.double) +mesh = np.loadtxt("heat_mesh.txt", dtype=np.double) # load solution data file -data = np.loadtxt('heat1D.txt', dtype=np.double) +data = np.loadtxt("heat1D.txt", dtype=np.double) # determine number of time steps, mesh size -nt,nx = np.shape(data) +nt, nx = np.shape(data) # determine maximum temperature -maxtemp = 1.1*data.max() +maxtemp = 1.1 * data.max() # generate plots of results for tstep in range(nt): # set string constants for output plots, current time, mesh size - pname = 'heat1d.' + repr(tstep).zfill(3) + '.png' - tstr = repr(tstep) + pname = "heat1d." + repr(tstep).zfill(3) + ".png" + tstr = repr(tstep) nxstr = repr(nx) # plot current solution and save to disk plt.figure(1) - plt.plot(mesh,data[tstep,:]) - plt.xlabel('x') - plt.ylabel('solution') - plt.title('u(x) at output ' + tstr + ', mesh = ' + nxstr) + plt.plot(mesh, data[tstep, :]) + plt.xlabel("x") + plt.ylabel("solution") + plt.title("u(x) at output " + tstr + ", mesh = " + nxstr) plt.axis((0.0, 1.0, 0.0, maxtemp)) plt.grid() plt.savefig(pname) diff --git a/examples/arkode/C_serial/plot_heat1D_adapt.py b/examples/arkode/C_serial/plot_heat1D_adapt.py index fa813fff04..ab361a968e 100755 --- a/examples/arkode/C_serial/plot_heat1D_adapt.py +++ b/examples/arkode/C_serial/plot_heat1D_adapt.py @@ -20,39 +20,41 @@ import numpy as np # load mesh data file as list of NumPy arrays -inp = open('heat_mesh.txt').readlines() +inp = open("heat_mesh.txt").readlines() mesh = [] for line in inp: mesh.append(np.array(str.split(line), dtype=np.double)) # load solution data file as list of NumPy arrays -inp = open('heat1D.txt').readlines() +inp = open("heat1D.txt").readlines() data = [] for line in inp: data.append(np.array(str.split(line), dtype=np.double)) # determine number of time steps -nt = len(mesh) +nt = len(mesh) nt2 = len(data) -if (nt != nt2): - sys.exit('plot_heat1D_adapt.py error: data and mesh files have different numbers of time steps') +if nt != nt2: + sys.exit( + "plot_heat1D_adapt.py error: data and mesh files have different numbers of time steps" + ) # determine minimum/maximum temperature mintemp = 0.0 maxtemp = 0.0 for tstep in range(nt): mx = data[tstep].max() - if (mx > maxtemp): + if mx > maxtemp: maxtemp = mx mn = data[tstep].min() - if (mn < mintemp): + if mn < mintemp: mintemp = mn -if (maxtemp > 0.0): +if maxtemp > 0.0: maxtemp *= 1.1 else: maxtemp *= 0.9 -if (mintemp > 0.0): +if mintemp > 0.0: mintemp *= 0.9 else: mintemp *= 1.1 @@ -62,16 +64,16 @@ for tstep in range(nt): # set string constants for output plots, current time, mesh size - pname = 'heat1d.' + repr(tstep).zfill(3) + '.png' - tstr = repr(tstep) + pname = "heat1d." + repr(tstep).zfill(3) + ".png" + tstr = repr(tstep) nxstr = repr(len(data[tstep])) # plot current solution and save to disk plt.figure(1) - plt.plot(mesh[tstep],data[tstep],'-o') - plt.xlabel('x') - plt.ylabel('solution') - plt.title('u(x) at output ' + tstr + ', mesh = ' + nxstr) + plt.plot(mesh[tstep], data[tstep], "-o") + plt.xlabel("x") + plt.ylabel("solution") + plt.title("u(x) at output " + tstr + ", mesh = " + nxstr) plt.axis((0.0, 1.0, mintemp, maxtemp)) plt.grid() plt.savefig(pname) diff --git a/examples/arkode/C_serial/plot_sol.py b/examples/arkode/C_serial/plot_sol.py index af783fb053..813f35bec2 100755 --- a/examples/arkode/C_serial/plot_sol.py +++ b/examples/arkode/C_serial/plot_sol.py @@ -20,16 +20,16 @@ import numpy as np # load solution data file -data = np.loadtxt('solution.txt', dtype=np.double) +data = np.loadtxt("solution.txt", dtype=np.double) # determine number of time steps, number of fields -nt,nv = np.shape(data) +nt, nv = np.shape(data) # extract time array -times = data[:,0] +times = data[:, 0] # parse comment line to determine solution names -f = open('solution.txt', 'r') +f = open("solution.txt", "r") commentline = f.readline() commentsplit = commentline.split() names = commentsplit[2:] @@ -38,18 +38,16 @@ plt.figure() # add curves to figure -for i in range(nv-1): - plt.plot(times,data[:,i+1],label=names[i]) -plt.xlabel('t') -if (nv > 2): - plt.ylabel('solutions') +for i in range(nv - 1): + plt.plot(times, data[:, i + 1], label=names[i]) +plt.xlabel("t") +if nv > 2: + plt.ylabel("solutions") else: - plt.ylabel('solution') -plt.legend(loc='upper right', shadow=True) + plt.ylabel("solution") +plt.legend(loc="upper right", shadow=True) plt.grid() -plt.savefig('solution.png') - - +plt.savefig("solution.png") ##### end of script ##### diff --git a/examples/arkode/C_serial/plot_sol_log.py b/examples/arkode/C_serial/plot_sol_log.py index ca27f9eb59..2437cce448 100755 --- a/examples/arkode/C_serial/plot_sol_log.py +++ b/examples/arkode/C_serial/plot_sol_log.py @@ -20,16 +20,16 @@ import numpy as np # load solution data file -data = np.loadtxt('solution.txt', dtype=np.double) +data = np.loadtxt("solution.txt", dtype=np.double) # determine number of time steps, number of fields -nt,nv = np.shape(data) +nt, nv = np.shape(data) # extract time array -times = data[:,0] +times = data[:, 0] # parse comment line to determine solution names -f = open('solution.txt', 'r') +f = open("solution.txt", "r") commentline = f.readline() commentsplit = commentline.split() names = commentsplit[2:] @@ -38,18 +38,16 @@ plt.figure() # add curves to figure -for i in range(nv-1): - plt.loglog(times,data[:,i+1],label=names[i]) -plt.xlabel('t') -if (nv > 2): - plt.ylabel('solutions') +for i in range(nv - 1): + plt.loglog(times, data[:, i + 1], label=names[i]) +plt.xlabel("t") +if nv > 2: + plt.ylabel("solutions") else: - plt.ylabel('solution') -plt.legend(loc='upper right', shadow=True) + plt.ylabel("solution") +plt.legend(loc="upper right", shadow=True) plt.grid() -plt.savefig('solution.png') - - +plt.savefig("solution.png") ##### end of script ##### diff --git a/examples/cvode/CXX_parallel/plot_heat2D_p.py b/examples/cvode/CXX_parallel/plot_heat2D_p.py index 5e5357873a..9f320f609b 100755 --- a/examples/cvode/CXX_parallel/plot_heat2D_p.py +++ b/examples/cvode/CXX_parallel/plot_heat2D_p.py @@ -28,7 +28,7 @@ # ------------------------------------------------------------------------------ # read MPI root process problem info file -infofile = 'heat2d_info.00000.txt' +infofile = "heat2d_info.00000.txt" with open(infofile) as fn: @@ -59,7 +59,7 @@ continue # total number of MPI processes - if "np"in line: + if "np" in line: nprocs = int(text[1]) continue @@ -71,11 +71,11 @@ # ------------------------------------------------------------------------------ # load subdomain information, store in table -subdomains = np.zeros((nprocs,4), dtype=np.int) +subdomains = np.zeros((nprocs, 4), dtype=np.int) for i in range(nprocs): - infofile = 'heat2d_info.' + repr(i).zfill(5) + '.txt' + infofile = "heat2d_info." + repr(i).zfill(5) + ".txt" with open(infofile) as fn: @@ -87,56 +87,58 @@ # x-direction starting index if "is" in line: - subdomains[i,0] = float(text[1]) + subdomains[i, 0] = float(text[1]) continue # x-direction ending index if "ie" in line: - subdomains[i,1] = float(text[1]) + subdomains[i, 1] = float(text[1]) continue # y-direction starting index if "js" in line: - subdomains[i,2] = float(text[1]) + subdomains[i, 2] = float(text[1]) continue # y-direction ending index if "je" in line: - subdomains[i,3] = float(text[1]) + subdomains[i, 3] = float(text[1]) continue # ------------------------------------------------------------------------------ -plottype = ['solution', 'error'] +plottype = ["solution", "error"] for pt in plottype: # fill array with data - time = np.zeros(nt) + time = np.zeros(nt) result = np.zeros((nt, ny, nx)) for i in range(nprocs): - datafile = 'heat2d_' + pt + '.' + repr(i).zfill(5) + '.txt' + datafile = "heat2d_" + pt + "." + repr(i).zfill(5) + ".txt" # load data data = np.loadtxt(datafile, dtype=np.double) - if (np.shape(data)[0] != nt): - sys.exit('error: subdomain ' + i + ' has an incorrect number of time steps') + if np.shape(data)[0] != nt: + sys.exit("error: subdomain " + i + " has an incorrect number of time steps") # subdomain indices - istart = subdomains[i,0] - iend = subdomains[i,1] - jstart = subdomains[i,2] - jend = subdomains[i,3] - nxl = iend - istart + 1 - nyl = jend - jstart + 1 + istart = subdomains[i, 0] + iend = subdomains[i, 1] + jstart = subdomains[i, 2] + jend = subdomains[i, 3] + nxl = iend - istart + 1 + nyl = jend - jstart + 1 # extract data for i in range(nt): - time[i] = data[i,0] - result[i,jstart:jend+1,istart:iend+1] = np.reshape(data[i,1:], (nyl,nxl)) + time[i] = data[i, 0] + result[i, jstart : jend + 1, istart : iend + 1] = np.reshape( + data[i, 1:], (nyl, nxl) + ) # determine extents of plots maxtemp = 1.1 * result.max() @@ -145,7 +147,7 @@ # set x and y meshgrid objects xspan = np.linspace(0.0, xu, nx) yspan = np.linspace(0.0, yu, ny) - X,Y = np.meshgrid(xspan, yspan) + X, Y = np.meshgrid(xspan, yspan) nxstr = repr(nx) nystr = repr(ny) @@ -154,24 +156,33 @@ for tstep in range(nt): # set string constants for output plots, current time, mesh size - pname = 'heat2d_surf_' + pt + '.' + repr(tstep).zfill(3) + '.png' - tstr = str(time[tstep]) + pname = "heat2d_surf_" + pt + "." + repr(tstep).zfill(3) + ".png" + tstr = str(time[tstep]) # plot surface and save to disk fig = plt.figure(1) - ax = fig.add_subplot(111, projection='3d') - - ax.plot_surface(X, Y, result[tstep,:,:], rstride=1, cstride=1, - cmap=cm.jet, linewidth=0, antialiased=True, shade=True) - - ax.set_xlabel('x') - ax.set_ylabel('y') + ax = fig.add_subplot(111, projection="3d") + + ax.plot_surface( + X, + Y, + result[tstep, :, :], + rstride=1, + cstride=1, + cmap=cm.jet, + linewidth=0, + antialiased=True, + shade=True, + ) + + ax.set_xlabel("x") + ax.set_ylabel("y") ax.set_zlim((mintemp, maxtemp)) - ax.view_init(20,45) - if (pt == 'solution'): - title('u(x,y) at t = ' + tstr) + ax.view_init(20, 45) + if pt == "solution": + title("u(x,y) at t = " + tstr) else: - title('error(x,y) at t = ' + tstr) + title("error(x,y) at t = " + tstr) savefig(pname) plt.close() diff --git a/examples/cvode/CXX_parhyp/plot_heat2D_p.py b/examples/cvode/CXX_parhyp/plot_heat2D_p.py index 58673d17b2..f567f1c621 100755 --- a/examples/cvode/CXX_parhyp/plot_heat2D_p.py +++ b/examples/cvode/CXX_parhyp/plot_heat2D_p.py @@ -28,7 +28,7 @@ # ------------------------------------------------------------------------------ # read MPI root process problem info file -infofile = 'heat2d_info.00000.txt' +infofile = "heat2d_info.00000.txt" with open(infofile) as fn: @@ -59,7 +59,7 @@ continue # total number of MPI processes - if "np"in line: + if "np" in line: nprocs = int(text[1]) continue @@ -71,11 +71,11 @@ # ------------------------------------------------------------------------------ # load subdomain information, store in table -subdomains = np.zeros((nprocs,4), dtype=np.int) +subdomains = np.zeros((nprocs, 4), dtype=np.int) for i in range(nprocs): - infofile = 'heat2d_info.' + repr(i).zfill(5) + '.txt' + infofile = "heat2d_info." + repr(i).zfill(5) + ".txt" with open(infofile) as fn: @@ -87,56 +87,58 @@ # x-direction starting index if "is" in line: - subdomains[i,0] = float(text[1]) + subdomains[i, 0] = float(text[1]) continue # x-direction ending index if "ie" in line: - subdomains[i,1] = float(text[1]) + subdomains[i, 1] = float(text[1]) continue # y-direction starting index if "js" in line: - subdomains[i,2] = float(text[1]) + subdomains[i, 2] = float(text[1]) continue # y-direction ending index if "je" in line: - subdomains[i,3] = float(text[1]) + subdomains[i, 3] = float(text[1]) continue # ------------------------------------------------------------------------------ -plottype = ['solution', 'error'] +plottype = ["solution", "error"] for pt in plottype: # fill array with data - time = np.zeros(nt) + time = np.zeros(nt) result = np.zeros((nt, ny, nx)) for i in range(nprocs): - datafile = 'heat2d_' + pt + '.' + repr(i).zfill(5) + '.txt' + datafile = "heat2d_" + pt + "." + repr(i).zfill(5) + ".txt" # load data data = np.loadtxt(datafile, dtype=np.double) - if (np.shape(data)[0] != nt): - sys.exit('error: subdomain ' + i + ' has an incorrect number of time steps') + if np.shape(data)[0] != nt: + sys.exit("error: subdomain " + i + " has an incorrect number of time steps") # subdomain indices - istart = subdomains[i,0] - iend = subdomains[i,1] - jstart = subdomains[i,2] - jend = subdomains[i,3] - nxl = iend - istart + 1 - nyl = jend - jstart + 1 + istart = subdomains[i, 0] + iend = subdomains[i, 1] + jstart = subdomains[i, 2] + jend = subdomains[i, 3] + nxl = iend - istart + 1 + nyl = jend - jstart + 1 # extract data for i in range(nt): - time[i] = data[i,0] - result[i,jstart:jend+1,istart:iend+1] = np.reshape(data[i,1:], (nyl,nxl)) + time[i] = data[i, 0] + result[i, jstart : jend + 1, istart : iend + 1] = np.reshape( + data[i, 1:], (nyl, nxl) + ) # determine extents of plots maxtemp = 1.1 * result.max() @@ -145,7 +147,7 @@ # set x and y meshgrid objects xspan = np.linspace(0.0, xu, nx) yspan = np.linspace(0.0, yu, ny) - X,Y = np.meshgrid(xspan, yspan) + X, Y = np.meshgrid(xspan, yspan) nxstr = repr(nx) nystr = repr(ny) @@ -154,24 +156,33 @@ for tstep in range(nt): # set string constants for output plots, current time, mesh size - pname = 'heat2d_surf_' + pt + '.' + repr(tstep).zfill(3) + '.png' - tstr = str(time[tstep]) + pname = "heat2d_surf_" + pt + "." + repr(tstep).zfill(3) + ".png" + tstr = str(time[tstep]) # plot surface and save to disk fig = plt.figure(1) - ax = fig.add_subplot(111, projection='3d') - - ax.plot_surface(X, Y, result[tstep,:,:], rstride=1, cstride=1, - cmap=cm.jet, linewidth=0, antialiased=True, shade=True) - - ax.set_xlabel('x') - ax.set_ylabel('y') + ax = fig.add_subplot(111, projection="3d") + + ax.plot_surface( + X, + Y, + result[tstep, :, :], + rstride=1, + cstride=1, + cmap=cm.jet, + linewidth=0, + antialiased=True, + shade=True, + ) + + ax.set_xlabel("x") + ax.set_ylabel("y") ax.set_zlim((mintemp, maxtemp)) - ax.view_init(20,45) - if (pt == 'solution'): - title('u(x,y) at t = ' + tstr) + ax.view_init(20, 45) + if pt == "solution": + title("u(x,y) at t = " + tstr) else: - title('error(x,y) at t = ' + tstr) + title("error(x,y) at t = " + tstr) savefig(pname) plt.close() diff --git a/examples/cvode/CXX_serial/plot_heat2D.py b/examples/cvode/CXX_serial/plot_heat2D.py index bbadc4de32..90ceba7d96 100755 --- a/examples/cvode/CXX_serial/plot_heat2D.py +++ b/examples/cvode/CXX_serial/plot_heat2D.py @@ -28,7 +28,7 @@ # ------------------------------------------------------------------------------ # read problem info file -infofile = 'heat2d_info.txt' +infofile = "heat2d_info.txt" with open(infofile) as fn: @@ -65,21 +65,21 @@ # ------------------------------------------------------------------------------ -plottype = ['solution', 'error'] +plottype = ["solution", "error"] for pt in plottype: # fill array with data - time = np.zeros(nt) + time = np.zeros(nt) result = np.zeros((nt, ny, nx)) # load data - data = np.loadtxt('heat2d_' + pt + '.txt', dtype=np.double) + data = np.loadtxt("heat2d_" + pt + ".txt", dtype=np.double) # extract data for i in range(nt): - time[i] = data[i,0] - result[i,0:ny+1,0:nx+1] = np.reshape(data[i,1:], (ny,nx)) + time[i] = data[i, 0] + result[i, 0 : ny + 1, 0 : nx + 1] = np.reshape(data[i, 1:], (ny, nx)) # determine extents of plots maxtemp = 1.1 * result.max() @@ -88,7 +88,7 @@ # set x and y meshgrid objects xspan = np.linspace(0.0, xu, nx) yspan = np.linspace(0.0, yu, ny) - X,Y = np.meshgrid(xspan, yspan) + X, Y = np.meshgrid(xspan, yspan) nxstr = repr(nx) nystr = repr(ny) @@ -97,24 +97,33 @@ for tstep in range(nt): # set string constants for output plots, current time, mesh size - pname = 'heat2d_surf_' + pt + '.' + repr(tstep).zfill(3) + '.png' - tstr = str(time[tstep]) + pname = "heat2d_surf_" + pt + "." + repr(tstep).zfill(3) + ".png" + tstr = str(time[tstep]) # plot surface and save to disk fig = plt.figure(1) - ax = fig.add_subplot(111, projection='3d') - - ax.plot_surface(X, Y, result[tstep,:,:], rstride=1, cstride=1, - cmap=cm.jet, linewidth=0, antialiased=True, shade=True) - - ax.set_xlabel('x') - ax.set_ylabel('y') + ax = fig.add_subplot(111, projection="3d") + + ax.plot_surface( + X, + Y, + result[tstep, :, :], + rstride=1, + cstride=1, + cmap=cm.jet, + linewidth=0, + antialiased=True, + shade=True, + ) + + ax.set_xlabel("x") + ax.set_ylabel("y") ax.set_zlim((mintemp, maxtemp)) - ax.view_init(20,45) - if (pt == 'solution'): - title('u(x,y) at t = ' + tstr) + ax.view_init(20, 45) + if pt == "solution": + title("u(x,y) at t = " + tstr) else: - title('error(x,y) at t = ' + tstr) + title("error(x,y) at t = " + tstr) savefig(pname) plt.close() diff --git a/examples/cvode/serial/plot_cvParticle.py b/examples/cvode/serial/plot_cvParticle.py index 6557000a0a..f686312a78 100755 --- a/examples/cvode/serial/plot_cvParticle.py +++ b/examples/cvode/serial/plot_cvParticle.py @@ -21,20 +21,26 @@ import matplotlib.pyplot as plt # command line options -parser = argparse.ArgumentParser(description='Plots cvPraticle_dns output') -parser.add_argument('--sfile', type=str, - default='cvParticle_solution.txt', - help='solution output file to read') -parser.add_argument('--efile', type=str, - default='cvParticle_error.txt', - help='error output file to read') -parser.add_argument('--alpha', type=float, nargs=1, - default=1.0, - help='set a non-default alpha value') -parser.add_argument('--slim', type=float, nargs=2, - help='x and y limits for solution plot') -parser.add_argument('--eylim', type=float, nargs=2, - help='y limits for error plot') +parser = argparse.ArgumentParser(description="Plots cvPraticle_dns output") +parser.add_argument( + "--sfile", + type=str, + default="cvParticle_solution.txt", + help="solution output file to read", +) +parser.add_argument( + "--efile", + type=str, + default="cvParticle_error.txt", + help="error output file to read", +) +parser.add_argument( + "--alpha", type=float, nargs=1, default=1.0, help="set a non-default alpha value" +) +parser.add_argument( + "--slim", type=float, nargs=2, help="x and y limits for solution plot" +) +parser.add_argument("--eylim", type=float, nargs=2, help="y limits for error plot") # parse inputs args = parser.parse_args() @@ -48,23 +54,23 @@ y = data[:, 2] # unit circle -tt = np.linspace(0,np.pi*2,10000) +tt = np.linspace(0, np.pi * 2, 10000) xt = np.cos(tt) yt = np.sin(tt) # plot solution fig, ax = plt.subplots() -plt.plot(xt, yt, color='black', linestyle='--') -plt.scatter(x, y, color='red') +plt.plot(xt, yt, color="black", linestyle="--") +plt.scatter(x, y, color="red") -if (args.slim): +if args.slim: plt.xlim((args.slim[0], args.slim[1])) plt.ylim((args.slim[0], args.slim[1])) -plt.xlabel('x') -plt.ylabel('y') -plt.title('Solution') -ax.set_aspect('equal') +plt.xlabel("x") +plt.ylabel("y") +plt.title("Solution") +ax.set_aspect("equal") # true solution xt = np.cos(args.alpha * t) @@ -72,15 +78,15 @@ # plot solution fig, ax = plt.subplots() -plt.plot(t, x, linestyle='-', label='x') -plt.plot(t, xt, linestyle='--', label='x true') -plt.plot(t, y, linestyle='-', label='y') -plt.plot(t, yt, linestyle='--', label='y true') +plt.plot(t, x, linestyle="-", label="x") +plt.plot(t, xt, linestyle="--", label="x true") +plt.plot(t, y, linestyle="-", label="y") +plt.plot(t, yt, linestyle="--", label="y true") -plt.xlabel('t') -plt.ylabel('position') -plt.title('Particle Position Over Time') -plt.legend(loc='lower right') +plt.xlabel("t") +plt.ylabel("position") +plt.title("Particle Position Over Time") +plt.legend(loc="lower right") # read error output file data = np.loadtxt(args.efile, dtype=np.double) @@ -93,17 +99,17 @@ # plot solution fig, ax = plt.subplots() -plt.semilogy(t, xerr, label='x err') -plt.semilogy(t, yerr, label='y err') -plt.semilogy(t, cerr, label='c err') +plt.semilogy(t, xerr, label="x err") +plt.semilogy(t, yerr, label="y err") +plt.semilogy(t, cerr, label="c err") -if (args.eylim): +if args.eylim: plt.ylim((args.eylim[0], args.eylim[1])) -plt.xlabel('time') -plt.ylabel('error') -plt.legend(loc='lower right') -plt.title('Error in position and constraint') +plt.xlabel("time") +plt.ylabel("error") +plt.legend(loc="lower right") +plt.title("Error in position and constraint") plt.grid() # display plots diff --git a/examples/cvode/serial/plot_cvPendulum.py b/examples/cvode/serial/plot_cvPendulum.py index 07314f2936..c855c70d6d 100755 --- a/examples/cvode/serial/plot_cvPendulum.py +++ b/examples/cvode/serial/plot_cvPendulum.py @@ -21,9 +21,8 @@ import matplotlib.pyplot as plt # command line options -parser = argparse.ArgumentParser(description='Plots cvPendulum_dns output') -parser.add_argument('sfile', type=str, - help='solution output file to read') +parser = argparse.ArgumentParser(description="Plots cvPendulum_dns output") +parser.add_argument("sfile", type=str, help="solution output file to read") # parse inputs args = parser.parse_args() @@ -32,9 +31,9 @@ data = np.loadtxt(args.sfile, dtype=np.double) # extract times, positions, and velocities -t = data[:, 0] -x = data[:, 1] -y = data[:, 2] +t = data[:, 0] +x = data[:, 1] +y = data[:, 2] vx = data[:, 3] vy = data[:, 4] @@ -42,50 +41,50 @@ ref = np.loadtxt("cvPendulum_dns_ref.txt", dtype=np.double) # extract positions and velocities -xr = ref[:, 1] -yr = ref[:, 2] +xr = ref[:, 1] +yr = ref[:, 2] vxr = ref[:, 3] vyr = ref[:, 4] # lower half of unit circle -tt = np.linspace(np.pi, 2*np.pi, 10000) +tt = np.linspace(np.pi, 2 * np.pi, 10000) xt = np.cos(tt) yt = np.sin(tt) # plot solution in xy plane fig, ax = plt.subplots() -ax.axhline(y=0, color='black', linestyle='--', label=None) -ax.axvline(x=0, color='black', linestyle='--', label=None) -plt.plot(xt, yt, color='black', linestyle='--', label=None) -plt.scatter(x, y, color='red', label='comp') -plt.scatter(xr, yr, color='blue', label='ref') - -plt.xlabel('x') -plt.ylabel('y') -plt.title('Pendulum') -ax.set_aspect('equal') -plt.legend(loc='lower right') +ax.axhline(y=0, color="black", linestyle="--", label=None) +ax.axvline(x=0, color="black", linestyle="--", label=None) +plt.plot(xt, yt, color="black", linestyle="--", label=None) +plt.scatter(x, y, color="red", label="comp") +plt.scatter(xr, yr, color="blue", label="ref") + +plt.xlabel("x") +plt.ylabel("y") +plt.title("Pendulum") +ax.set_aspect("equal") +plt.legend(loc="lower right") # plot position over time fig, ax = plt.subplots() -ax.axhline(y=0, color='black', linestyle='--') -plt.plot(t, x, label='x') -plt.plot(t, y, label='y') +ax.axhline(y=0, color="black", linestyle="--") +plt.plot(t, x, label="x") +plt.plot(t, y, label="y") -plt.xlabel('t') -plt.ylabel('position') -plt.title('Pendulum Position') +plt.xlabel("t") +plt.ylabel("position") +plt.title("Pendulum Position") plt.legend() # plot velocity over time fig, ax = plt.subplots() -ax.axhline(y=0, color='black', linestyle='--') -plt.plot(t, vx, label='$v_x$') -plt.plot(t, vy, label='$v_y$') +ax.axhline(y=0, color="black", linestyle="--") +plt.plot(t, vx, label="$v_x$") +plt.plot(t, vy, label="$v_y$") -plt.xlabel('t') -plt.ylabel('velocity') -plt.title('Pendulum Velocity') +plt.xlabel("t") +plt.ylabel("velocity") +plt.title("Pendulum Velocity") plt.legend() # display plots diff --git a/examples/cvodes/serial/plot_cvsParticle.py b/examples/cvodes/serial/plot_cvsParticle.py index fb0c66da1c..72e8736388 100755 --- a/examples/cvodes/serial/plot_cvsParticle.py +++ b/examples/cvodes/serial/plot_cvsParticle.py @@ -21,20 +21,26 @@ import matplotlib.pyplot as plt # command line options -parser = argparse.ArgumentParser(description='Plots cvsPraticle_dns output') -parser.add_argument('--sfile', type=str, - default='cvsParticle_solution.txt', - help='solution output file to read') -parser.add_argument('--efile', type=str, - default='cvsParticle_error.txt', - help='error output file to read') -parser.add_argument('--alpha', type=float, nargs=1, - default=1.0, - help='set a non-default alpha value') -parser.add_argument('--slim', type=float, nargs=2, - help='x and y limits for solution plot') -parser.add_argument('--eylim', type=float, nargs=2, - help='y limits for error plot') +parser = argparse.ArgumentParser(description="Plots cvsPraticle_dns output") +parser.add_argument( + "--sfile", + type=str, + default="cvsParticle_solution.txt", + help="solution output file to read", +) +parser.add_argument( + "--efile", + type=str, + default="cvsParticle_error.txt", + help="error output file to read", +) +parser.add_argument( + "--alpha", type=float, nargs=1, default=1.0, help="set a non-default alpha value" +) +parser.add_argument( + "--slim", type=float, nargs=2, help="x and y limits for solution plot" +) +parser.add_argument("--eylim", type=float, nargs=2, help="y limits for error plot") # parse inputs args = parser.parse_args() @@ -48,23 +54,23 @@ y = data[:, 2] # unit circle -tt = np.linspace(0,np.pi*2,10000) +tt = np.linspace(0, np.pi * 2, 10000) xt = np.cos(tt) yt = np.sin(tt) # plot solution fig, ax = plt.subplots() -plt.plot(xt, yt, color='black', linestyle='--') -plt.scatter(x, y, color='red') +plt.plot(xt, yt, color="black", linestyle="--") +plt.scatter(x, y, color="red") -if (args.slim): +if args.slim: plt.xlim((args.slim[0], args.slim[1])) plt.ylim((args.slim[0], args.slim[1])) -plt.xlabel('x') -plt.ylabel('y') -plt.title('Solution') -ax.set_aspect('equal') +plt.xlabel("x") +plt.ylabel("y") +plt.title("Solution") +ax.set_aspect("equal") # true solution xt = np.cos(args.alpha * t) @@ -72,15 +78,15 @@ # plot solution fig, ax = plt.subplots() -plt.plot(t, x, linestyle='-', label='x') -plt.plot(t, xt, linestyle='--', label='x true') -plt.plot(t, y, linestyle='-', label='y') -plt.plot(t, yt, linestyle='--', label='y true') +plt.plot(t, x, linestyle="-", label="x") +plt.plot(t, xt, linestyle="--", label="x true") +plt.plot(t, y, linestyle="-", label="y") +plt.plot(t, yt, linestyle="--", label="y true") -plt.xlabel('t') -plt.ylabel('position') -plt.title('Particle Position Over Time') -plt.legend(loc='lower right') +plt.xlabel("t") +plt.ylabel("position") +plt.title("Particle Position Over Time") +plt.legend(loc="lower right") # read error output file data = np.loadtxt(args.efile, dtype=np.double) @@ -93,17 +99,17 @@ # plot solution fig, ax = plt.subplots() -plt.semilogy(t, xerr, label='x err') -plt.semilogy(t, yerr, label='y err') -plt.semilogy(t, cerr, label='c err') +plt.semilogy(t, xerr, label="x err") +plt.semilogy(t, yerr, label="y err") +plt.semilogy(t, cerr, label="c err") -if (args.eylim): +if args.eylim: plt.ylim((args.eylim[0], args.eylim[1])) -plt.xlabel('time') -plt.ylabel('error') -plt.legend(loc='lower right') -plt.title('Error in position and constraint') +plt.xlabel("time") +plt.ylabel("error") +plt.legend(loc="lower right") +plt.title("Error in position and constraint") plt.grid() # display plots diff --git a/examples/cvodes/serial/plot_cvsPendulum.py b/examples/cvodes/serial/plot_cvsPendulum.py index 0376a755bb..87408f4634 100755 --- a/examples/cvodes/serial/plot_cvsPendulum.py +++ b/examples/cvodes/serial/plot_cvsPendulum.py @@ -21,9 +21,8 @@ import matplotlib.pyplot as plt # command line options -parser = argparse.ArgumentParser(description='Plots cvsPendulum_dns output') -parser.add_argument('sfile', type=str, - help='solution output file to read') +parser = argparse.ArgumentParser(description="Plots cvsPendulum_dns output") +parser.add_argument("sfile", type=str, help="solution output file to read") # parse inputs args = parser.parse_args() @@ -32,9 +31,9 @@ data = np.loadtxt(args.sfile, dtype=np.double) # extract times, positions, and velocities -t = data[:, 0] -x = data[:, 1] -y = data[:, 2] +t = data[:, 0] +x = data[:, 1] +y = data[:, 2] vx = data[:, 3] vy = data[:, 4] @@ -42,50 +41,50 @@ ref = np.loadtxt("cvsPendulum_dns_ref.txt", dtype=np.double) # extract positions and velocities -xr = ref[:, 1] -yr = ref[:, 2] +xr = ref[:, 1] +yr = ref[:, 2] vxr = ref[:, 3] vyr = ref[:, 4] # lower half of unit circle -tt = np.linspace(np.pi, 2*np.pi, 10000) +tt = np.linspace(np.pi, 2 * np.pi, 10000) xt = np.cos(tt) yt = np.sin(tt) # plot solution in xy plane fig, ax = plt.subplots() -ax.axhline(y=0, color='black', linestyle='--', label=None) -ax.axvline(x=0, color='black', linestyle='--', label=None) -plt.plot(xt, yt, color='black', linestyle='--', label=None) -plt.scatter(x, y, color='red', label='comp') -plt.scatter(xr, yr, color='blue', label='ref') - -plt.xlabel('x') -plt.ylabel('y') -plt.title('Pendulum') -ax.set_aspect('equal') -plt.legend(loc='lower right') +ax.axhline(y=0, color="black", linestyle="--", label=None) +ax.axvline(x=0, color="black", linestyle="--", label=None) +plt.plot(xt, yt, color="black", linestyle="--", label=None) +plt.scatter(x, y, color="red", label="comp") +plt.scatter(xr, yr, color="blue", label="ref") + +plt.xlabel("x") +plt.ylabel("y") +plt.title("Pendulum") +ax.set_aspect("equal") +plt.legend(loc="lower right") # plot position over time fig, ax = plt.subplots() -ax.axhline(y=0, color='black', linestyle='--') -plt.plot(t, x, label='x') -plt.plot(t, y, label='y') +ax.axhline(y=0, color="black", linestyle="--") +plt.plot(t, x, label="x") +plt.plot(t, y, label="y") -plt.xlabel('t') -plt.ylabel('position') -plt.title('Pendulum Position') +plt.xlabel("t") +plt.ylabel("position") +plt.title("Pendulum Position") plt.legend() # plot velocity over time fig, ax = plt.subplots() -ax.axhline(y=0, color='black', linestyle='--') -plt.plot(t, vx, label='$v_x$') -plt.plot(t, vy, label='$v_y$') +ax.axhline(y=0, color="black", linestyle="--") +plt.plot(t, vx, label="$v_x$") +plt.plot(t, vy, label="$v_y$") -plt.xlabel('t') -plt.ylabel('velocity') -plt.title('Pendulum Velocity') +plt.xlabel("t") +plt.ylabel("velocity") +plt.title("Pendulum Velocity") plt.legend() # display plots diff --git a/examples/utilities/plot_data_2d.py b/examples/utilities/plot_data_2d.py index 0303e252ff..ecf15ab7e6 100755 --- a/examples/utilities/plot_data_2d.py +++ b/examples/utilities/plot_data_2d.py @@ -78,101 +78,111 @@ def main(): import sys import argparse - parser = argparse.ArgumentParser(description='''Plot 2D data files''') + parser = argparse.ArgumentParser(description="""Plot 2D data files""") # List of input data files - parser.add_argument('datafiles', type=str, nargs='+', - help='Data files to plot') + parser.add_argument("datafiles", type=str, nargs="+", help="Data files to plot") # Plot type options - group = parser.add_argument_group('Plot Options', - '''Options to specify the type of plot to - generate and what data to plot''') + group = parser.add_argument_group( + "Plot Options", + """Options to specify the type of plot to + generate and what data to plot""", + ) - group.add_argument('--plottype', type=str, - choices=['surface', 'surface-ani', - 'contour', 'contour-ani', - 'slice', 'point'], - default='surface', - help='''Set the plot type''') + group.add_argument( + "--plottype", + type=str, + choices=["surface", "surface-ani", "contour", "contour-ani", "slice", "point"], + default="surface", + help="""Set the plot type""", + ) - group.add_argument('--plotvars', type=int, nargs='+', - help='''Variable indices to plot''') + group.add_argument( + "--plotvars", type=int, nargs="+", help="""Variable indices to plot""" + ) - group.add_argument('--plottimes', type=int, nargs='+', - help='''Time indices to plot''') + group.add_argument( + "--plottimes", type=int, nargs="+", help="""Time indices to plot""" + ) # Slice plot options - group = parser.add_argument_group('Slice Plot Options', - '''Options specific to the slice plot - type''') + group = parser.add_argument_group( + "Slice Plot Options", + """Options specific to the slice plot + type""", + ) - group.add_argument('--slicetype', type=str, default='var', - choices=['var', 'time'], - help='''The slice plot type''') + group.add_argument( + "--slicetype", + type=str, + default="var", + choices=["var", "time"], + help="""The slice plot type""", + ) mxgroup = group.add_mutually_exclusive_group() - mxgroup.add_argument('--yslice', type=int, default=-1, - help='''y index to plot''') + mxgroup.add_argument("--yslice", type=int, default=-1, help="""y index to plot""") - mxgroup.add_argument('--xslice', type=int, default=-1, - help='''x index to plot''') + mxgroup.add_argument("--xslice", type=int, default=-1, help="""x index to plot""") # Point plot options - group = parser.add_argument_group('Point Plot Options', - '''Options specific to the point plot - type''') + group = parser.add_argument_group( + "Point Plot Options", + """Options specific to the point plot + type""", + ) - group.add_argument('--point', type=int, nargs=2, default=[0, 0], - help='''x and y index to plot''') + group.add_argument( + "--point", type=int, nargs=2, default=[0, 0], help="""x and y index to plot""" + ) # Output options - group = parser.add_argument_group('Output Options', - '''Options for saving plots''') + group = parser.add_argument_group("Output Options", """Options for saving plots""") - group.add_argument('--save', action='store_true', - help='''Save figure to file''') + group.add_argument("--save", action="store_true", help="""Save figure to file""") - group.add_argument('--prefix', type=str, - help='''File name prefix for saving the figure''') + group.add_argument( + "--prefix", type=str, help="""File name prefix for saving the figure""" + ) - group.add_argument('--merge', action='store_true', - help='''Merge PDF output files into a single file''') + group.add_argument( + "--merge", + action="store_true", + help="""Merge PDF output files into a single file""", + ) # Figure options - group = parser.add_argument_group('Figure Options', - '''Options to specify various figure - properties''') + group = parser.add_argument_group( + "Figure Options", + """Options to specify various figure + properties""", + ) - group.add_argument('--labels', type=str, nargs='+', - help='''Data labels for the plot legend''') + group.add_argument( + "--labels", type=str, nargs="+", help="""Data labels for the plot legend""" + ) - group.add_argument('--title', type=str, - help='''Plot title''') + group.add_argument("--title", type=str, help="""Plot title""") - group.add_argument('--xlabel', type=str, - help='''x-axis label''') + group.add_argument("--xlabel", type=str, help="""x-axis label""") - group.add_argument('--ylabel', type=str, - help='''y-axis label''') + group.add_argument("--ylabel", type=str, help="""y-axis label""") - group.add_argument('--zlabel', type=str, - help='''z-axis label''') + group.add_argument("--zlabel", type=str, help="""z-axis label""") - group.add_argument('--grid', action='store_true', - help='''Add grid to plot''') + group.add_argument("--grid", action="store_true", help="""Add grid to plot""") # Debugging options - parser.add_argument('--debug', action='store_true', - help='Enable debugging') + parser.add_argument("--debug", action="store_true", help="Enable debugging") # parse command line args args = parser.parse_args() @@ -190,52 +200,53 @@ def main(): plot_settings(args, info, time, xvals, yvals, zdata) # Create plots - if args.plottype == 'surface': + if args.plottype == "surface": plot_surface(args, info, time, xvals, yvals, zdata) - if args.plottype == 'surface-ani': + if args.plottype == "surface-ani": plot_surface_ani(args, info, time, xvals, yvals, zdata) - if args.plottype == 'contour': + if args.plottype == "contour": plot_contour(args, info, time, xvals, yvals, zdata) - if args.plottype == 'contour-ani': + if args.plottype == "contour-ani": plot_contour_ani(args, info, time, xvals, yvals, zdata) - if args.plottype == 'slice': + if args.plottype == "slice": # slice data - if (args.yslice > -1) and (args.yslice < info['ny']): + if (args.yslice > -1) and (args.yslice < info["ny"]): svals = xvals sdata = zdata[:, args.yslice, :, :] if args.xlabel: hlabel = args.xlabel else: - hlabel = 'x' + hlabel = "x" suffix = " at y = {:.4f}".format(yvals[args.yslice]) - elif (args.xslice > -1) and (args.xslice < info['nx']): + elif (args.xslice > -1) and (args.xslice < info["nx"]): svals = yvals sdata = zdata[:, :, args.xslice, :] if args.ylabel: hlabel = args.ylabel else: - hlabel = 'y' + hlabel = "y" suffix = " at x = {:.4f}".format(xvals[args.xslice]) else: print("ERROR: invalid xslice or yslice option") sys.exit() - if args.slicetype == 'var': + if args.slicetype == "var": plot_slice_vars(args, info, time, svals, sdata, hlabel, suffix) else: plot_slice_time(args, info, time, svals, sdata, hlabel, suffix) - if args.plottype == 'point': + if args.plottype == "point": # point data pdata = zdata[:, args.point[1], args.point[0], :] - suffix = " at x = {:.4f}, y = {:.4f}".format(xvals[args.point[0]], - yvals[args.point[1]]) + suffix = " at x = {:.4f}, y = {:.4f}".format( + xvals[args.point[0]], yvals[args.point[1]] + ) plot_point(args, info, time, pdata, suffix) @@ -264,8 +275,19 @@ def read_header(args): import numpy as np # initialize dictionary of header info variables to None - keys = ['title', 'varnames', 'nprocs', 'nvar', 'nt', 'nx', 'xl', 'xu', - 'ny', 'yl', 'yu'] + keys = [ + "title", + "varnames", + "nprocs", + "nvar", + "nt", + "nx", + "xl", + "xu", + "ny", + "yl", + "yu", + ] info = dict() for k in keys: @@ -290,62 +312,62 @@ def read_header(args): # plot title if "title" in line: - info['title'] = " ".join(text[2:]) + info["title"] = " ".join(text[2:]) continue # plot variable names if "vars" in line: - info['varnames'] = text[2:] + info["varnames"] = text[2:] continue # total number of processes if "nprocs" in line: - info['nprocs'] = int(text[2]) + info["nprocs"] = int(text[2]) continue # number of variables (at each spatial node) if "nvar" in line: - info['nvar'] = int(text[2]) + info["nvar"] = int(text[2]) continue # number of output times if "nt" in line: - info['nt'] = int(text[2]) + info["nt"] = int(text[2]) continue # the global number of nodes in the x-direction, the x lower bound # (west) and the x upper bound (east) if "nx" in line: - info['nx'] = int(text[2]) + info["nx"] = int(text[2]) continue if "xl" in line: - info['xl'] = float(text[2]) + info["xl"] = float(text[2]) continue if "xu" in line: - info['xu'] = float(text[2]) + info["xu"] = float(text[2]) continue # the global number of nodes in the y-direction, the y lower bound # (south) and the y upper bound (north) if "ny" in line: - info['ny'] = int(text[2]) + info["ny"] = int(text[2]) continue if "yl" in line: - info['yl'] = float(text[2]) + info["yl"] = float(text[2]) continue if "yu" in line: - info['yu'] = float(text[2]) + info["yu"] = float(text[2]) continue # load data to deduce values and perform sanity checks data = np.loadtxt(args.datafiles[0], dtype=np.double) # try to fill in missing values - if info['nvar'] is None: - info['nvar'] = 1 + if info["nvar"] is None: + info["nvar"] = 1 print("WARNING: nvar not provided. Using nvar = 1") - if info['nt'] is None or info['nx'] is None or info['ny'] is None: + if info["nt"] is None or info["nx"] is None or info["ny"] is None: # check if data exists if data.ndim != 2: @@ -353,72 +375,76 @@ def read_header(args): sys.exit() # number of output times - if info['nt'] is None: - info['nt'] = np.shape(data)[0] + if info["nt"] is None: + info["nt"] = np.shape(data)[0] # number of spatial nodes - if info['nx'] is None or info['ny'] is None: + if info["nx"] is None or info["ny"] is None: col = np.shape(data)[1] - 1 # exclude output times - if info['nx'] is None and info['ny'] is not None: - info['nx'] = col // (info['nvar'] * info['ny']) - elif info['nx'] is not None and info['ny'] is None: - info['ny'] = col // (info['nvar'] * info['nx']) + if info["nx"] is None and info["ny"] is not None: + info["nx"] = col // (info["nvar"] * info["ny"]) + elif info["nx"] is not None and info["ny"] is None: + info["ny"] = col // (info["nvar"] * info["nx"]) else: - info['nx'] = int(np.sqrt(col // info['nvar'])) - info['ny'] = info['nx'] - print("WARNING: nx and ny not provided. Using nx = ny =", - info['nx']) + info["nx"] = int(np.sqrt(col // info["nvar"])) + info["ny"] = info["nx"] + print("WARNING: nx and ny not provided. Using nx = ny =", info["nx"]) # sanity checks - if info['nt'] != np.shape(data)[0]: - print("ERROR: nt != nrows", info['nt'], np.shape(data)[0]) + if info["nt"] != np.shape(data)[0]: + print("ERROR: nt != nrows", info["nt"], np.shape(data)[0]) sys.exit() - if (info['nvar'] * info['nx'] * info['ny']) != (np.shape(data)[1] - 1): + if (info["nvar"] * info["nx"] * info["ny"]) != (np.shape(data)[1] - 1): print("ERROR: nvar * nx * ny != ncols - 1") sys.exit() # check x-dimension lower and upper bounds - if info['xl'] is None: + if info["xl"] is None: print("WARNING: xl not provided, using xl = 0") - info['xl'] = 0.0 + info["xl"] = 0.0 - if info['xu'] is None: + if info["xu"] is None: print("WARNING: xu not provided, using xu = 1") - info['xu'] = 1.0 + info["xu"] = 1.0 # check y-dimension lower and upper bounds - if info['yl'] is None: + if info["yl"] is None: print("WARNING: yl not provided, using yl = 0") - info['yl'] = 0.0 + info["yl"] = 0.0 - if info['yu'] is None: + if info["yu"] is None: print("WARNING: yu not provided, using yu = 1") - info['yu'] = 1.0 + info["yu"] = 1.0 # check number of processes - if info['nprocs'] is None: - info['nprocs'] = len(args.datafiles) - print("WARNING: nprocs not provided, using nprocs =", info['nprocs']) + if info["nprocs"] is None: + info["nprocs"] = len(args.datafiles) + print("WARNING: nprocs not provided, using nprocs =", info["nprocs"]) # check if all the expected input files were provided - if len(args.datafiles) != info['nprocs']: - print("ERROR: number of data files (", len(args.datafiles), - ") does not match number of processes (", info['nprocs'], ")") + if len(args.datafiles) != info["nprocs"]: + print( + "ERROR: number of data files (", + len(args.datafiles), + ") does not match number of processes (", + info["nprocs"], + ")", + ) sys.exit() if args.debug: - print('title = ', info['title']) - print('varnames = ', info['varnames']) - print('nprocs = ', info['nprocs']) - print('nvar = ', info['nvar']) - print('nt = ', info['nt']) - print('nx = ', info['nx']) - print('xl = ', info['xl']) - print('xu = ', info['xu']) - print('ny = ', info['ny']) - print('yl = ', info['yl']) - print('yu = ', info['yu']) + print("title = ", info["title"]) + print("varnames = ", info["varnames"]) + print("nprocs = ", info["nprocs"]) + print("nvar = ", info["nvar"]) + print("nt = ", info["nt"]) + print("nx = ", info["nx"]) + print("xl = ", info["xl"]) + print("xu = ", info["xu"]) + print("ny = ", info["ny"]) + print("yl = ", info["yl"]) + print("yu = ", info["yu"]) return info @@ -435,14 +461,14 @@ def read_subdomains(args, info): import numpy as np # load subdomain information, store in table - subdomains = np.zeros((info['nprocs'], 4), dtype=int) + subdomains = np.zeros((info["nprocs"], 4), dtype=int) # get the spatial subdomain owned by each process - if info['nprocs'] == 1: + if info["nprocs"] == 1: subdomains[0, 0] = 0 - subdomains[0, 1] = info['nx'] - 1 + subdomains[0, 1] = info["nx"] - 1 subdomains[0, 2] = 0 - subdomains[0, 3] = info['ny'] - 1 + subdomains[0, 3] = info["ny"] - 1 else: for idx, datafile in enumerate(args.datafiles): @@ -490,8 +516,7 @@ def read_subdomains(args, info): # check if subdomain indices were found if not (found_is and found_ie and found_js and found_je): - print("ERROR: could not find subdomain indices in", - datafile) + print("ERROR: could not find subdomain indices in", datafile) sys.exit() return subdomains @@ -507,10 +532,10 @@ def read_data(args, info, subdomains): import numpy as np # initialize data arrays - time = np.zeros(info['nt']) - xvals = np.linspace(info['xl'], info['xu'], info['nx']) - yvals = np.linspace(info['yl'], info['yu'], info['ny']) - zdata = np.zeros((info['nt'], info['ny'], info['nx'], info['nvar'])) + time = np.zeros(info["nt"]) + xvals = np.linspace(info["xl"], info["xu"], info["nx"]) + yvals = np.linspace(info["yl"], info["yu"], info["ny"]) + zdata = np.zeros((info["nt"], info["ny"], info["nx"], info["nvar"])) # extract data for idx, datafile in enumerate(args.datafiles): @@ -524,10 +549,17 @@ def read_data(args, info, subdomains): if args.debug: print(np.shape(data)) - if np.shape(data)[0] != info['nt']: - print("WARNING: subdomain", str(idx), "has an incorrect number of" - "output times (", np.shape(data)[0], "vs", info['nt'], ")") - info['nt'] = np.shape(data)[0] + if np.shape(data)[0] != info["nt"]: + print( + "WARNING: subdomain", + str(idx), + "has an incorrect number of" "output times (", + np.shape(data)[0], + "vs", + info["nt"], + ")", + ) + info["nt"] = np.shape(data)[0] # x-subdomain indices istart = subdomains[idx, 0] @@ -547,10 +579,11 @@ def read_data(args, info, subdomains): # reshape and save data time[:] = data[:, 0] - for v in range(info['nvar']): - for i in range(info['nt']): - zdata[i, jstart:jend+1, istart:iend+1, v] = \ - np.reshape(data[i, 1+v::info['nvar']], (nyl, nxl)) + for v in range(info["nvar"]): + for i in range(info["nt"]): + zdata[i, jstart : jend + 1, istart : iend + 1, v] = np.reshape( + data[i, 1 + v :: info["nvar"]], (nyl, nxl) + ) return time, xvals, yvals, zdata @@ -565,40 +598,40 @@ def plot_settings(args, info, time, xvals, yvals, zdata): import numpy as np # determine extents of plots - info['zmin'] = np.zeros(info['nvar']) - info['zmax'] = np.zeros(info['nvar']) + info["zmin"] = np.zeros(info["nvar"]) + info["zmax"] = np.zeros(info["nvar"]) - for v in range(info['nvar']): - info['zmin'][v] = np.amin(zdata[:, :, :, v]) - info['zmax'][v] = np.amax(zdata[:, :, :, v]) + for v in range(info["nvar"]): + info["zmin"][v] = np.amin(zdata[:, :, :, v]) + info["zmax"][v] = np.amax(zdata[:, :, :, v]) if args.debug: - print("z max = ", info['zmax']) - print("z min = ", info['zmin']) + print("z max = ", info["zmax"]) + print("z min = ", info["zmin"]) # which variables to plot if args.plotvars: - info['pltvars'] = args.plotvars + info["pltvars"] = args.plotvars else: - info['pltvars'] = range(info['nvar']) + info["pltvars"] = range(info["nvar"]) # which times to plot if args.plottimes: - info['plttimes'] = args.plottimes + info["plttimes"] = args.plottimes else: - info['plttimes'] = range(info['nt']) + info["plttimes"] = range(info["nt"]) # x-axis label if args.xlabel: - info['xlabel'] = args.xlabel + info["xlabel"] = args.xlabel else: - info['xlabel'] = 'x' + info["xlabel"] = "x" # y-axis label if args.ylabel: - info['ylabel'] = args.ylabel + info["ylabel"] = args.ylabel else: - info['ylabel'] = 'y' + info["ylabel"] = "y" # ----------------------------------------------------------------------------- @@ -638,50 +671,58 @@ def plot_surface(args, info, time, xvals, yvals, zdata): X, Y = np.meshgrid(xvals, yvals) # generate plots - for v in info['pltvars']: + for v in info["pltvars"]: if args.merge: mergefiles = list() - for t in info['plttimes']: + for t in info["plttimes"]: # create figure and axes fig = plt.figure() - ax = fig.add_subplot(111, projection='3d') - - ax.plot_surface(X, Y, zdata[t, :, :, v], rstride=1, cstride=1, - cmap=cm.jet, linewidth=0, antialiased=True, - shade=True) + ax = fig.add_subplot(111, projection="3d") + + ax.plot_surface( + X, + Y, + zdata[t, :, :, v], + rstride=1, + cstride=1, + cmap=cm.jet, + linewidth=0, + antialiased=True, + shade=True, + ) # set axis limits - ax.set_xlim([info['xl'], info['xu']]) - ax.set_ylim([info['yl'], info['yu']]) - ax.set_zlim(info['zmin'][v], info['zmax'][v]) + ax.set_xlim([info["xl"], info["xu"]]) + ax.set_ylim([info["yl"], info["yu"]]) + ax.set_zlim(info["zmin"][v], info["zmax"][v]) # initial perspective ax.view_init(20, -120) # add axis labels - plt.xlabel(info['xlabel']) - plt.ylabel(info['ylabel']) + plt.xlabel(info["xlabel"]) + plt.ylabel(info["ylabel"]) # add z-axis label if args.zlabel: ax.set_zlabel(args.zlabel) - elif info['varnames']: - ax.set_zlabel(info['varnames'][v]) + elif info["varnames"]: + ax.set_zlabel(info["varnames"][v]) else: - ax.set_zlabel('z') + ax.set_zlabel("z") # add title tstr = str(time[t]) if args.title: title = args.title - elif info['title']: - title = info['title'] + elif info["title"]: + title = info["title"] else: - title = 'Solution' - plt.title(title + '\nt = ' + tstr) + title = "Solution" + plt.title(title + "\nt = " + tstr) # add grid if args.grid: @@ -690,15 +731,15 @@ def plot_surface(args, info, time, xvals, yvals, zdata): # save plot to file if args.save: if args.prefix: - fname = args.prefix + '_fig_surface_' + fname = args.prefix + "_fig_surface_" else: - fname = 'fig_surface_' - if info['varnames']: - fname += info['varnames'][v] + fname = "fig_surface_" + if info["varnames"]: + fname += info["varnames"][v] else: - fname += 'var_' + repr(v).zfill(3) - fname += '_t_' + repr(t).zfill(3) + '.pdf' - plt.savefig(fname, bbox_inches='tight') + fname += "var_" + repr(v).zfill(3) + fname += "_t_" + repr(t).zfill(3) + ".pdf" + plt.savefig(fname, bbox_inches="tight") if args.merge: mergefiles.append(fname) else: @@ -707,14 +748,14 @@ def plot_surface(args, info, time, xvals, yvals, zdata): if args.merge: if args.prefix: - fname = args.prefix + '_fig_surface_' + fname = args.prefix + "_fig_surface_" else: - fname = 'fig_surface_' - if info['varnames']: - fname += info['varnames'][v] + fname = "fig_surface_" + if info["varnames"]: + fname += info["varnames"][v] else: - fname += 'var_' + repr(v).zfill(3) - fname += '.pdf' + fname += "var_" + repr(v).zfill(3) + fname += ".pdf" merge_pdf(mergefiles, fname) @@ -732,38 +773,47 @@ def plot_surface_ani(args, info, time, xvals, yvals, zdata): def update_plot(frame_number, zarray, v, plot): plot[0].remove() - plot[0] = ax.plot_surface(X, Y, zarray[frame_number, :, :, v], - cmap=cm.jet) + plot[0] = ax.plot_surface(X, Y, zarray[frame_number, :, :, v], cmap=cm.jet) tstr = str(time[frame_number]) if args.title: title = args.title - elif info['title']: - title = info['title'] + elif info["title"]: + title = info["title"] else: - title = 'Solution' - plt.title(title + '\nt = ' + tstr) + title = "Solution" + plt.title(title + "\nt = " + tstr) - return plot, + return (plot,) # set x and y meshgrid objects X, Y = np.meshgrid(xvals, yvals) # generate plots - for v in info['pltvars']: + for v in info["pltvars"]: # create figure and axes fig = plt.figure() - ax = plt.axes(projection='3d') - - plot = [ax.plot_surface(X, Y, zdata[0, :, :, v], rstride=1, cstride=1, - cmap=cm.jet, linewidth=0, antialiased=True, - shade=True)] + ax = plt.axes(projection="3d") + + plot = [ + ax.plot_surface( + X, + Y, + zdata[0, :, :, v], + rstride=1, + cstride=1, + cmap=cm.jet, + linewidth=0, + antialiased=True, + shade=True, + ) + ] # set axis limits - ax.set_xlim([info['xl'], info['xu']]) - ax.set_ylim([info['yl'], info['yu']]) - ax.set_zlim([info['zmin'][v], info['zmax'][v]]) + ax.set_xlim([info["xl"], info["xu"]]) + ax.set_ylim([info["yl"], info["yu"]]) + ax.set_zlim([info["zmin"][v], info["zmax"][v]]) # initial perspective ax.view_init(20, -120) @@ -772,45 +822,45 @@ def update_plot(frame_number, zarray, v, plot): if args.xlabel: plt.xlabel(args.xlabel) else: - ax.set_xlabel('x') + ax.set_xlabel("x") # add y-axis label if args.ylabel: plt.ylabel(args.ylabel) else: - ax.set_ylabel('y') + ax.set_ylabel("y") # add z-axis label if args.zlabel: ax.set_zlabel(args.zlabel) - elif info['varnames']: - ax.set_zlabel(info['varnames'][v]) + elif info["varnames"]: + ax.set_zlabel(info["varnames"][v]) else: - ax.set_zlabel('z') + ax.set_zlabel("z") # add grid if args.grid: plt.grid() - fps = 2 # frame per sec + fps = 2 # frame per sec frn = len(time) # number of frames in the animation # create animation - ani = animation.FuncAnimation(fig, update_plot, frn, - fargs=(zdata, v, plot), - interval=1000/fps) + ani = animation.FuncAnimation( + fig, update_plot, frn, fargs=(zdata, v, plot), interval=1000 / fps + ) # save animation to file if args.save: if args.prefix: - fname = args.prefix + '_ani_surface_' + fname = args.prefix + "_ani_surface_" else: - fname = 'ani_surface_' - if info['varnames']: - fname += info['varnames'][v] + fname = "ani_surface_" + if info["varnames"]: + fname += info["varnames"][v] else: - fname += 'var_' + repr(v).zfill(3) - ani.save(fname + '.mp4', dpi=200, fps=fps) + fname += "var_" + repr(v).zfill(3) + ani.save(fname + ".mp4", dpi=200, fps=fps) else: plt.show() plt.close() @@ -830,36 +880,37 @@ def plot_contour(args, info, time, xvals, yvals, zdata): X, Y = np.meshgrid(xvals, yvals) # generate plots - for v in info['pltvars']: + for v in info["pltvars"]: - levels = np.linspace(info['zmin'][v], info['zmax'][v], 100) - ticks = np.linspace(info['zmin'][v], info['zmax'][v], 10) + levels = np.linspace(info["zmin"][v], info["zmax"][v], 100) + ticks = np.linspace(info["zmin"][v], info["zmax"][v], 10) - for t in info['plttimes']: + for t in info["plttimes"]: # create figure and axes fig, ax = plt.subplots() - cf = ax.contourf(X, Y, zdata[t, :, :, v], levels=levels, - cmap="coolwarm", extend="both") + cf = ax.contourf( + X, Y, zdata[t, :, :, v], levels=levels, cmap="coolwarm", extend="both" + ) fig.colorbar(cf, ax=ax, fraction=0.046, pad=0.04, ticks=ticks) # set axis limits - ax.set_xlim([info['xl'], info['xu']]) - ax.set_ylim([info['yl'], info['yu']]) + ax.set_xlim([info["xl"], info["xu"]]) + ax.set_ylim([info["yl"], info["yu"]]) # add axis labels - plt.xlabel(info['xlabel']) - plt.ylabel(info['ylabel']) + plt.xlabel(info["xlabel"]) + plt.ylabel(info["ylabel"]) # add title tstr = str(time[t]) if args.title: - plt.title(args.title + ' at t = ' + tstr) - elif info['title']: - plt.title(info['title'] + ' at t = ' + tstr) + plt.title(args.title + " at t = " + tstr) + elif info["title"]: + plt.title(info["title"] + " at t = " + tstr) else: - plt.title('Solution at t = ' + tstr) + plt.title("Solution at t = " + tstr) # add grid if args.grid: @@ -868,15 +919,15 @@ def plot_contour(args, info, time, xvals, yvals, zdata): # save plot to file if args.save: if args.prefix: - fname = args.prefix + '_fig_contour_' + fname = args.prefix + "_fig_contour_" else: - fname = 'fig_contour_' - if info['varnames']: - fname += info['varnames'][v] + fname = "fig_contour_" + if info["varnames"]: + fname += info["varnames"][v] else: - fname += 'var_' + repr(v).zfill(3) - fname += '_t_' + repr(t).zfill(3) + '.pdf' - plt.savefig(fname, bbox_inches='tight') + fname += "var_" + repr(v).zfill(3) + fname += "_t_" + repr(t).zfill(3) + ".pdf" + plt.savefig(fname, bbox_inches="tight") else: plt.show() plt.close() @@ -894,67 +945,76 @@ def plot_contour_ani(args, info, time, xvals, yvals, zdata): import matplotlib.animation as animation def update_plot(frame_number, zarray, v, plot): - plot[0] = ax.contourf(X, Y, zdata[frame_number, :, :, v], - levels=levels, cmap="coolwarm", extend="both") + plot[0] = ax.contourf( + X, + Y, + zdata[frame_number, :, :, v], + levels=levels, + cmap="coolwarm", + extend="both", + ) tstr = str(time[frame_number]) if args.title: title = args.title - elif info['title']: - title = info['title'] + elif info["title"]: + title = info["title"] else: - title = 'Solution' - plt.title(title + '\nt = ' + tstr) + title = "Solution" + plt.title(title + "\nt = " + tstr) - return plot, + return (plot,) # set x and y meshgrid objects X, Y = np.meshgrid(xvals, yvals) # generate plots - for v in info['pltvars']: + for v in info["pltvars"]: - levels = np.linspace(info['zmin'][v], info['zmax'][v], 100) - ticks = np.linspace(info['zmin'][v], info['zmax'][v], 10) + levels = np.linspace(info["zmin"][v], info["zmax"][v], 100) + ticks = np.linspace(info["zmin"][v], info["zmax"][v], 10) # create figure and axes fig, ax = plt.subplots() - plot = [ax.contourf(X, Y, zdata[0, :, :, v], levels=levels, - cmap="coolwarm", extend="both")] + plot = [ + ax.contourf( + X, Y, zdata[0, :, :, v], levels=levels, cmap="coolwarm", extend="both" + ) + ] fig.colorbar(plot[0], ax=ax, fraction=0.046, pad=0.04, ticks=ticks) # set axis limits - ax.set_xlim([info['xl'], info['xu']]) - ax.set_ylim([info['yl'], info['yu']]) + ax.set_xlim([info["xl"], info["xu"]]) + ax.set_ylim([info["yl"], info["yu"]]) # add axis labels - plt.xlabel(info['xlabel']) - plt.ylabel(info['ylabel']) + plt.xlabel(info["xlabel"]) + plt.ylabel(info["ylabel"]) # add grid if args.grid: plt.grid() - fps = 2 # frame per sec + fps = 2 # frame per sec frn = len(time) # number of frames in the animation # create animation - ani = animation.FuncAnimation(fig, update_plot, frn, - fargs=(zdata, v, plot), - interval=1000/fps) + ani = animation.FuncAnimation( + fig, update_plot, frn, fargs=(zdata, v, plot), interval=1000 / fps + ) # save animation to file if args.save: if args.prefix: - fname = args.prefix + '_ani_contour_' + fname = args.prefix + "_ani_contour_" else: - fname = 'ani_contour_' - if info['varnames']: - fname += info['varnames'][v] + fname = "ani_contour_" + if info["varnames"]: + fname += info["varnames"][v] else: - fname += 'var_' + repr(v).zfill(3) - ani.save(fname + '.mp4', dpi=200, fps=fps) + fname += "var_" + repr(v).zfill(3) + ani.save(fname + ".mp4", dpi=200, fps=fps) else: plt.show() plt.close() @@ -971,10 +1031,10 @@ def plot_slice_vars(args, info, time, svals, sdata, hlabel, suffix): import matplotlib.pyplot as plt # determine extents of slice plot - smin = np.zeros(info['nvar']) - smax = np.zeros(info['nvar']) + smin = np.zeros(info["nvar"]) + smax = np.zeros(info["nvar"]) - for v in range(info['nvar']): + for v in range(info["nvar"]): smin[v] = np.amin(sdata[:, :, v]) smax[v] = np.amax(sdata[:, :, v]) @@ -989,13 +1049,13 @@ def plot_slice_vars(args, info, time, svals, sdata, hlabel, suffix): label = ["%.2f" % t for t in time] # create plot for each variable - for v in info['pltvars']: + for v in info["pltvars"]: # create figure and axes fig, ax = plt.subplots() # add each output time to the plot - for t in info['plttimes']: + for t in info["plttimes"]: ax.plot(svals, sdata[t, :, v], label=label[t]) # set axis limits @@ -1012,19 +1072,19 @@ def plot_slice_vars(args, info, time, svals, sdata, hlabel, suffix): if args.zlabel: ax.set_ylabel(args.zlabel) else: - if info['varnames']: - ax.set_ylabel(info['varnames'][v]) + if info["varnames"]: + ax.set_ylabel(info["varnames"][v]) else: - ax.set_ylabel('variable ' + repr(v)) + ax.set_ylabel("variable " + repr(v)) # add title if args.title: plt.title(args.title + suffix) - elif info['title']: - plt.title(info['title'] + suffix) + elif info["title"]: + plt.title(info["title"] + suffix) else: - if info['varnames']: - plt.title("Evolution of " + info['varnames'][v] + suffix) + if info["varnames"]: + plt.title("Evolution of " + info["varnames"][v] + suffix) else: plt.title("Evolution of variable " + repr(v) + suffix) @@ -1035,14 +1095,14 @@ def plot_slice_vars(args, info, time, svals, sdata, hlabel, suffix): # save plot to file if args.save: if args.prefix: - fname = args.prefix + '_fig_slice_' + fname = args.prefix + "_fig_slice_" else: - fname = 'fig_slice_' - if info['varnames']: - fname += info['varnames'][v] + fname = "fig_slice_" + if info["varnames"]: + fname += info["varnames"][v] else: - fname += 'var_' + repr(v).zfill(3) - plt.savefig(fname + '.pdf', bbox_inches='tight') + fname += "var_" + repr(v).zfill(3) + plt.savefig(fname + ".pdf", bbox_inches="tight") else: plt.show() plt.close() @@ -1069,19 +1129,19 @@ def plot_slice_time(args, info, time, svals, sdata, hlabel, suffix): # set labels for the plot legend if args.labels: label = args.labels - elif info['varnames']: - label = info['varnames'] + elif info["varnames"]: + label = info["varnames"] else: - label = [None] * info['nvar'] + label = [None] * info["nvar"] # create plot for each variable - for t in info['plttimes']: + for t in info["plttimes"]: # create figure and axes fig, ax = plt.subplots() # add each output time to the plot - for v in info['pltvars']: + for v in info["pltvars"]: ax.plot(svals, sdata[t, :, v], label=label[v]) # set axis limits @@ -1101,11 +1161,11 @@ def plot_slice_time(args, info, time, svals, sdata, hlabel, suffix): # add title tstr = str(time[t]) if args.title: - plt.title(args.title + suffix + ' and t = ' + tstr) - elif info['title']: - plt.title(info['title'] + suffix + ' and t = ' + tstr) + plt.title(args.title + suffix + " and t = " + tstr) + elif info["title"]: + plt.title(info["title"] + suffix + " and t = " + tstr) else: - plt.title("Evolution" + suffix + ' and t = ' + tstr) + plt.title("Evolution" + suffix + " and t = " + tstr) # add grid if args.grid: @@ -1114,11 +1174,11 @@ def plot_slice_time(args, info, time, svals, sdata, hlabel, suffix): # save plot to file if args.save: if args.prefix: - fname = args.prefix + '_fig_slice_t_' + fname = args.prefix + "_fig_slice_t_" else: - fname = 'fig_slice_t_' - fname += repr(t).zfill(3) + '.pdf' - plt.savefig(fname, bbox_inches='tight') + fname = "fig_slice_t_" + fname += repr(t).zfill(3) + ".pdf" + plt.savefig(fname, bbox_inches="tight") else: plt.show() plt.close() @@ -1136,16 +1196,16 @@ def plot_point(args, info, time, pdata, suffix): # set labels for the plot legend if args.labels: label = args.labels - elif info['varnames']: - label = info['varnames'] + elif info["varnames"]: + label = info["varnames"] else: - label = [None] * info['nvar'] + label = [None] * info["nvar"] # create figure and axes fig, ax = plt.subplots() # create plot for each variable - for v in info['pltvars']: + for v in info["pltvars"]: ax.plot(time, pdata[:, v], label=label[v]) # add legend @@ -1157,8 +1217,8 @@ def plot_point(args, info, time, pdata, suffix): # add title if args.title: plt.title(args.title + suffix) - elif info['title']: - plt.title(info['title'] + suffix) + elif info["title"]: + plt.title(info["title"] + suffix) else: plt.title("Evolution" + suffix) @@ -1169,10 +1229,10 @@ def plot_point(args, info, time, pdata, suffix): # save plot to file if args.save: if args.prefix: - fname = args.prefix + '_fig_point' + fname = args.prefix + "_fig_point" else: - fname = 'fig_point' - plt.savefig(fname + '.pdf', bbox_inches='tight') + fname = "fig_point" + plt.savefig(fname + ".pdf", bbox_inches="tight") else: plt.show() plt.close() @@ -1183,6 +1243,7 @@ def plot_point(args, info, time, pdata, suffix): # ----------------------------------------------------------------------------- -if __name__ == '__main__': +if __name__ == "__main__": import sys + sys.exit(main()) diff --git a/examples/utilities/plot_data_time_series.py b/examples/utilities/plot_data_time_series.py index f96aeec538..02e3c34f7c 100755 --- a/examples/utilities/plot_data_time_series.py +++ b/examples/utilities/plot_data_time_series.py @@ -31,6 +31,7 @@ # output time. # ----------------------------------------------------------------------------- + # ----------------------------------------------------------------------------- # main routine # ----------------------------------------------------------------------------- @@ -41,50 +42,46 @@ def main(): import numpy as np import shlex - parser = argparse.ArgumentParser(description='''Plot data files''') + parser = argparse.ArgumentParser(description="""Plot data files""") - parser.add_argument('quantity', type=str, - help='''Quantity to plot''') + parser.add_argument("quantity", type=str, help="""Quantity to plot""") - parser.add_argument('datafiles', type=str, nargs='+', - help='''Data files to plot''') + parser.add_argument("datafiles", type=str, nargs="+", help="""Data files to plot""") # Plot display options - parser.add_argument('--save', action='store_true', - help='''Save figure to file''') + parser.add_argument("--save", action="store_true", help="""Save figure to file""") - parser.add_argument('--labels', type=str, nargs='+', - help='''Data file labels for plot legend''') + parser.add_argument( + "--labels", type=str, nargs="+", help="""Data file labels for plot legend""" + ) - parser.add_argument('--title', type=str, - help='''Plot title''') + parser.add_argument("--title", type=str, help="""Plot title""") - parser.add_argument('--xlabel', type=str, - help='''x-axis label''') + parser.add_argument("--xlabel", type=str, help="""x-axis label""") - parser.add_argument('--ylabel', type=str, - help='''y-axis label''') + parser.add_argument("--ylabel", type=str, help="""y-axis label""") - parser.add_argument('--grid', action='store_true', - help='''Add grid to plot''') + parser.add_argument("--grid", action="store_true", help="""Add grid to plot""") # Axis scaling logscale = parser.add_mutually_exclusive_group() - logscale.add_argument('--logx', action='store_true', - help='''Plot with log scale x-axis''') + logscale.add_argument( + "--logx", action="store_true", help="""Plot with log scale x-axis""" + ) - logscale.add_argument('--logy', action='store_true', - help='''Plot with log scale y-axis''') + logscale.add_argument( + "--logy", action="store_true", help="""Plot with log scale y-axis""" + ) - logscale.add_argument('--loglog', action='store_true', - help='''Use log scale x and y axes''') + logscale.add_argument( + "--loglog", action="store_true", help="""Use log scale x and y axes""" + ) # Debugging options - parser.add_argument('--debug', action='store_true', - help='Enable debugging') + parser.add_argument("--debug", action="store_true", help="Enable debugging") # Parse command line args args = parser.parse_args() @@ -132,33 +129,49 @@ def main(): print(data) # Extract t and q data - tdata = data[:,0] # first column has t values - qdata = data[:,idx] # remaining columns have q values + tdata = data[:, 0] # first column has t values + qdata = data[:, idx] # remaining columns have q values # line colors: matplotlib.org/stable/tutorials/colors/colormaps.html # and colorbrewer2.org) if len(args.datafiles) < 22: - colors = ["#d62728", "#1f77b4", "#2ca02c", "#9467bd", "#ff7f0e", - "#8c564b", "#e377c2", "#7f7f7f", "#bcbd22", "#17becf", - "#000000", "#ff9896", "#aec7e8", "#98df8a", "#c5b0d5", - "#ffbb78", "#c49c94", "#f7b6d2", "#c7c7c7", "#dbdb8d", - "#9edae5"] + colors = [ + "#d62728", + "#1f77b4", + "#2ca02c", + "#9467bd", + "#ff7f0e", + "#8c564b", + "#e377c2", + "#7f7f7f", + "#bcbd22", + "#17becf", + "#000000", + "#ff9896", + "#aec7e8", + "#98df8a", + "#c5b0d5", + "#ffbb78", + "#c49c94", + "#f7b6d2", + "#c7c7c7", + "#dbdb8d", + "#9edae5", + ] else: print("ERROR: ncols > ncolors") sys.exit() # Set plot label for legend - if (args.labels): - label=args.labels[i] + if args.labels: + label = args.labels[i] else: - label=None + label = None if args.logx or args.logy or args.loglog: - ax.plot(tdata, np.abs(qdata), label=label, - color=colors[i]) + ax.plot(tdata, np.abs(qdata), label=label, color=colors[i]) else: - ax.plot(tdata, qdata, label=label, - color=colors[i]) + ax.plot(tdata, qdata, label=label, color=colors[i]) # Change axis scale if args.logx: @@ -183,7 +196,7 @@ def main(): if args.ylabel: plt.ylabel(args.ylabel) else: - plt.ylabel(args.quantity.replace("_"," ")); + plt.ylabel(args.quantity.replace("_", " ")) # Add legend if args.labels: @@ -199,10 +212,12 @@ def main(): else: plt.show() + # ----------------------------------------------------------------------------- # run the main routine # ----------------------------------------------------------------------------- -if __name__ == '__main__': +if __name__ == "__main__": import sys + sys.exit(main()) diff --git a/test/compare_benchmarks.py b/test/compare_benchmarks.py index 2df5e7249e..1f76c60519 100644 --- a/test/compare_benchmarks.py +++ b/test/compare_benchmarks.py @@ -22,19 +22,55 @@ def main(): - parser = argparse.ArgumentParser(description='Compare Sundials performance results against previous results') - - parser.add_argument('--release', dest='release', action='store_true', help='indicate if the current run to process is a release') - - parser.add_argument('--calidir', dest='caliDir', type=str, help='path to directory containing caliper files', default="/usr/workspace/sundials/califiles") - - parser.add_argument('--releasedir', dest='releaseDir', type=str, help='path to directory containing release caliper files', default="/usr/workspace/sundials/califiles/Release") - - parser.add_argument('--outpath', dest='outPath', type=str, help='path to directory to write results to', default="/dev/null") - - parser.add_argument('--jobid', dest='jobID', type=int, help='job id of the current run to identify .cali files') - - parser.add_argument('--threshold', dest="threshold", type=float, help='the percentage threshold in performance difference that indicates a regression', default=2.0) + parser = argparse.ArgumentParser( + description="Compare Sundials performance results against previous results" + ) + + parser.add_argument( + "--release", + dest="release", + action="store_true", + help="indicate if the current run to process is a release", + ) + + parser.add_argument( + "--calidir", + dest="caliDir", + type=str, + help="path to directory containing caliper files", + default="/usr/workspace/sundials/califiles", + ) + + parser.add_argument( + "--releasedir", + dest="releaseDir", + type=str, + help="path to directory containing release caliper files", + default="/usr/workspace/sundials/califiles/Release", + ) + + parser.add_argument( + "--outpath", + dest="outPath", + type=str, + help="path to directory to write results to", + default="/dev/null", + ) + + parser.add_argument( + "--jobid", + dest="jobID", + type=int, + help="job id of the current run to identify .cali files", + ) + + parser.add_argument( + "--threshold", + dest="threshold", + type=float, + help="the percentage threshold in performance difference that indicates a regression", + default=2.0, + ) args = parser.parse_args() @@ -50,38 +86,42 @@ def main(): if not os.path.exists(outPath): os.makedirs(outPath) - outFile = open("%s/benchmark_output.out" % outPath, 'w') + outFile = open("%s/benchmark_output.out" % outPath, "w") # thread per file with mp.Pool() as pool: - for res in pool.starmap(process_benchmark, [(jobID, release, releaseDir, i, threshold) for i in benchFiles]): + for res in pool.starmap( + process_benchmark, + [(jobID, release, releaseDir, i, threshold) for i in benchFiles], + ): if res: outFile.write(res + "\n") outFile.close() - outFile = open("%s/benchmark_output.out" % outPath, 'r') + outFile = open("%s/benchmark_output.out" % outPath, "r") try: outLines = outFile.readlines() finally: outFile.close() - if (len(outLines) == 0): + if len(outLines) == 0: return -1 return 0 + def process_benchmark(jobID, isRelease, releaseDir, benchmarkDir, threshold): # Get the current benchmark run benchmarkFiles = glob.glob("%s/*.cali" % benchmarkDir) # Don't compare if the run didn't include this benchmark - if (len(benchmarkFiles) == 0): + if len(benchmarkFiles) == 0: return th_files = tt.Thicket.from_caliperreader(benchmarkFiles) - curFilter = lambda x: x['job_id'] == jobID + curFilter = lambda x: x["job_id"] == jobID th_current = th_files.filter_metadata(curFilter) # Get the release caliper file - cluster = th_current.metadata['cluster'].values[0] + cluster = th_current.metadata["cluster"].values[0] if isRelease: # Get the last release versionDirs = glob.glob("%s/%s/*" % (releaseDir, cluster)) @@ -89,18 +129,23 @@ def process_benchmark(jobID, isRelease, releaseDir, benchmarkDir, threshold): versionDir = versionDirs[1] else: # Get the release the run is a part of - version = th_current.metadata['sundials_version'].values[0] + version = th_current.metadata["sundials_version"].values[0] versionDir = "%s/%s/%s" % (releaseDir, cluster, version) - benchmarkName = th_current.metadata['env.TEST_NAME'].values[0] - releaseFile = glob.glob("%s/Benchmarking/*/%s/*.cali" % (versionDir, benchmarkName), recursive=True) + benchmarkName = th_current.metadata["env.TEST_NAME"].values[0] + releaseFile = glob.glob( + "%s/Benchmarking/*/%s/*.cali" % (versionDir, benchmarkName), recursive=True + ) th_compare = tt.Thicket.from_caliperreader(releaseFile) - metrics = ['Max time/rank'] + metrics = ["Max time/rank"] tt.mean(th_current, columns=metrics) tt.mean(th_compare, columns=metrics) - ratio = th_current.statsframe.dataframe['Max time/rank_mean'] / th_compare.statsframe.dataframe['Max time/rank_mean'] + ratio = ( + th_current.statsframe.dataframe["Max time/rank_mean"] + / th_compare.statsframe.dataframe["Max time/rank_mean"] + ) - tolerance = threshold/100 + tolerance = threshold / 100 if 1 - ratio[0] < tolerance: return benchmarkName diff --git a/test/compare_examples.py b/test/compare_examples.py index 046b9d620b..3383fa91f6 100644 --- a/test/compare_examples.py +++ b/test/compare_examples.py @@ -27,18 +27,50 @@ import hatchet as ht import thicket as tt -def main(): - parser = argparse.ArgumentParser(description='Compare Sundials performance results against previous results') - - parser.add_argument('--release', dest='release', action='store_true', help='indicate if the current run to process is a release') - - parser.add_argument('--calidir', dest='caliDir', type=str, help='path to directory containing caliper files', default="/usr/workspace/sundials/califiles") - - parser.add_argument('--releasedir', dest='releaseDir', type=str, help='path to directory containing release caliper files', default="/usr/workspace/sundials/califiles/Release") - parser.add_argument('--outpath', dest='outPath', type=str, help='path to directory to write results to', default="/dev/null") - - parser.add_argument('--threshold', dest="threshold", type=float, help='the percentage threshold in performance difference that indicates a regression', default=2.0) +def main(): + parser = argparse.ArgumentParser( + description="Compare Sundials performance results against previous results" + ) + + parser.add_argument( + "--release", + dest="release", + action="store_true", + help="indicate if the current run to process is a release", + ) + + parser.add_argument( + "--calidir", + dest="caliDir", + type=str, + help="path to directory containing caliper files", + default="/usr/workspace/sundials/califiles", + ) + + parser.add_argument( + "--releasedir", + dest="releaseDir", + type=str, + help="path to directory containing release caliper files", + default="/usr/workspace/sundials/califiles/Release", + ) + + parser.add_argument( + "--outpath", + dest="outPath", + type=str, + help="path to directory to write results to", + default="/dev/null", + ) + + parser.add_argument( + "--threshold", + dest="threshold", + type=float, + help="the percentage threshold in performance difference that indicates a regression", + default=2.0, + ) args = parser.parse_args() @@ -49,13 +81,13 @@ def main(): threshold = args.threshold # Get the latest test run - runDirs = glob.glob("%s/Testing/*" % caliDir, recursive = True) + runDirs = glob.glob("%s/Testing/*" % caliDir, recursive=True) runDirs.sort(key=os.path.getmtime, reverse=True) runDir = runDirs[0] runFile = glob.glob(runDir)[0] th_temp = tt.Thicket.from_caliperreader(runFile) - cluster = th_temp.metadata['cluster'] + cluster = th_temp.metadata["cluster"] # get machine from the file if release: # Compare against the last release @@ -64,7 +96,7 @@ def main(): versionDir = versionDirs[1] else: # Compare against the release the run is a part of - version = th_temp.metadata['sundials_version'].values[0] + version = th_temp.metadata["sundials_version"].values[0] versionDir = "%s/%s/%s" % (releaseDir, cluster, version) # Gather files to process @@ -72,22 +104,24 @@ def main(): if not os.path.exists(outPath): os.makedirs(outPath) - outFile = open("%s/output.out" % outPath, 'w') + outFile = open("%s/output.out" % outPath, "w") # Compare test results against past runs. If a test performs below a threshold, output test name to outFile. with mp.Pool() as pool: - for res in pool.starmap(compare_against_release, [(versionDir, i, threshold) for i in runFiles]): + for res in pool.starmap( + compare_against_release, [(versionDir, i, threshold) for i in runFiles] + ): if res: outFile.write(res + "\n") outFile.close() - outFile = open("%s/example_output.out" % outPath, 'r') + outFile = open("%s/example_output.out" % outPath, "r") try: outLines = outFile.readlines() finally: outFile.close() - if (len(outLines) == 0): + if len(outLines) == 0: return -1 return 0 @@ -95,21 +129,27 @@ def main(): def compare_against_release(releaseDir, file, threshold): th = tt.Thicket.from_caliperreader(file) - testName = th.metadata['env.TEST_NAME'].values[0] + testName = th.metadata["env.TEST_NAME"].values[0] # Gather release run - releaseFile = glob.glob("%s/Testing/*/%s.*.cali" % (releaseDir, testName), recursive=True) + releaseFile = glob.glob( + "%s/Testing/*/%s.*.cali" % (releaseDir, testName), recursive=True + ) th_release = tt.Thicket.from_caliperreader(releaseFile) - metrics = ['Max time/rank'] + metrics = ["Max time/rank"] tt.mean(th_release, columns=metrics) tt.mean(th, columns=metrics) - ratio = th.statsframe.dataframe['Max time/rank_mean'] / th_release.statsframe.dataframe['Max time/rank_mean'] + ratio = ( + th.statsframe.dataframe["Max time/rank_mean"] + / th_release.statsframe.dataframe["Max time/rank_mean"] + ) print(ratio[0]) - tolerance = threshold/100 + tolerance = threshold / 100 if 1 - ratio[0] < tolerance: return testName + if __name__ == "__main__": main() diff --git a/test/config_cmake.py b/test/config_cmake.py index e916d1bcc0..f1574b6257 100644 --- a/test/config_cmake.py +++ b/test/config_cmake.py @@ -21,109 +21,252 @@ def main(): import argparse - parser = argparse.ArgumentParser(description='''Create a SUNDIALS CMake - cache file''') - - parser.add_argument('--filetype', type=str, choices=['cache', 'script'], - default='cache', - help='''Create a CMake cache file or configuration - script (default cache)''') - - parser.add_argument('--filename', type=str, default="sundials.cmake", - help='''Set the cache file or script name (default - sundials.cmake)''') - - parser.add_argument('--readenv', action='store_true', - help='''Read environment variables (command line + parser = argparse.ArgumentParser( + description="""Create a SUNDIALS CMake + cache file""" + ) + + parser.add_argument( + "--filetype", + type=str, + choices=["cache", "script"], + default="cache", + help="""Create a CMake cache file or configuration + script (default cache)""", + ) + + parser.add_argument( + "--filename", + type=str, + default="sundials.cmake", + help="""Set the cache file or script name (default + sundials.cmake)""", + ) + + parser.add_argument( + "--readenv", + action="store_true", + help="""Read environment variables (command line arguments will override any settings from the - environment variables)''') + environment variables)""", + ) - parser.add_argument('--debugscript', action='store_true', - help='Enable debugging output for this script') + parser.add_argument( + "--debugscript", + action="store_true", + help="Enable debugging output for this script", + ) # ----------------- # Compiler Options # ----------------- - group = parser.add_argument_group('Compilers and Flags', - '''Options for setting the C, C++, - Fortran, and CUDA compiler and flags.''') + group = parser.add_argument_group( + "Compilers and Flags", + """Options for setting the C, C++, + Fortran, and CUDA compiler and flags.""", + ) # Build type - add_arg(group, '--build-type', 'CMAKE_BUILD_TYPE', 'CMAKE_BUILD_TYPE', - 'RelWithDebInfo', 'STRING', - 'CMake build type (Debug, RelWithDebInfo, Release)') + add_arg( + group, + "--build-type", + "CMAKE_BUILD_TYPE", + "CMAKE_BUILD_TYPE", + "RelWithDebInfo", + "STRING", + "CMake build type (Debug, RelWithDebInfo, Release)", + ) # C compiler - add_arg(group, '--c-compiler', 'CC', 'CMAKE_C_COMPILER', None, 'FILEPATH', - 'C compiler') - - add_arg(group, '--c-flags', 'CFLAGS', 'CMAKE_C_FLAGS', None, 'STRING', - 'C compiler flags') - - add_arg(group, '--c-std', 'CMAKE_C_STANDARD', 'CMAKE_C_STANDARD', '99', - 'STRING', 'C standard') - - add_arg(group, '--c-ext', 'CMAKE_C_EXTENSIONS', 'CMAKE_C_EXTENSIONS', - 'OFF', 'STRING', 'C compiler extensions') + add_arg( + group, "--c-compiler", "CC", "CMAKE_C_COMPILER", None, "FILEPATH", "C compiler" + ) + + add_arg( + group, + "--c-flags", + "CFLAGS", + "CMAKE_C_FLAGS", + None, + "STRING", + "C compiler flags", + ) + + add_arg( + group, + "--c-std", + "CMAKE_C_STANDARD", + "CMAKE_C_STANDARD", + "99", + "STRING", + "C standard", + ) + + add_arg( + group, + "--c-ext", + "CMAKE_C_EXTENSIONS", + "CMAKE_C_EXTENSIONS", + "OFF", + "STRING", + "C compiler extensions", + ) # C++ compiler - add_arg(group, '--cxx-compiler', 'CXX', 'CMAKE_CXX_COMPILER', None, - 'FILEPATH', 'C++ compiler') - - add_arg(group, '--cxx-flags', 'CXXFLAGS', 'CMAKE_CXX_FLAGS', None, - 'STRING', 'C++ compiler flags') - - add_arg(group, '--cxx-std', 'CMAKE_CXX_STANDARD', 'CMAKE_CXX_STANDARD', - '14', 'STRING', 'C++ standard') - - add_arg(group, '--cxx-ext', 'CMAKE_CXX_EXTENSIONS', 'CMAKE_CXX_EXTENSIONS', - 'OFF', 'STRING', 'C++ compiler extensions') + add_arg( + group, + "--cxx-compiler", + "CXX", + "CMAKE_CXX_COMPILER", + None, + "FILEPATH", + "C++ compiler", + ) + + add_arg( + group, + "--cxx-flags", + "CXXFLAGS", + "CMAKE_CXX_FLAGS", + None, + "STRING", + "C++ compiler flags", + ) + + add_arg( + group, + "--cxx-std", + "CMAKE_CXX_STANDARD", + "CMAKE_CXX_STANDARD", + "14", + "STRING", + "C++ standard", + ) + + add_arg( + group, + "--cxx-ext", + "CMAKE_CXX_EXTENSIONS", + "CMAKE_CXX_EXTENSIONS", + "OFF", + "STRING", + "C++ compiler extensions", + ) # Fortran compiler - add_arg(group, '--fortran-compiler', 'FC', 'CMAKE_Fortran_COMPILER', None, - 'FILEPATH', 'Fortran compiler') - - add_arg(group, '--fortran-flags', 'FFLAGS', 'CMAKE_Fortran_FLAGS', None, - 'STRING', 'Fortran compiler flags') + add_arg( + group, + "--fortran-compiler", + "FC", + "CMAKE_Fortran_COMPILER", + None, + "FILEPATH", + "Fortran compiler", + ) + + add_arg( + group, + "--fortran-flags", + "FFLAGS", + "CMAKE_Fortran_FLAGS", + None, + "STRING", + "Fortran compiler flags", + ) # CUDA compiler - add_arg(group, '--cuda-compiler', 'CUDACXX', 'CMAKE_CUDA_COMPILER', None, - 'FILEPATH', 'CUDA compiler') - - add_arg(group, '--cuda-flags', 'CUDAFLAGS', 'CMAKE_CUDA_FLAGS', None, - 'STRING', 'CUDA compiler flags') - - add_arg(group, '--cuda-std', 'CMAKE_CUDA_STANDARD', 'CMAKE_CUDA_STANDARD', - '14', 'STRING', 'CUDA standard') - - add_arg(group, '--cuda-arch', 'CUDAARCHS', 'CMAKE_CUDA_ARCHITECTURES', - None, 'STRING', 'CUDA architecture') + add_arg( + group, + "--cuda-compiler", + "CUDACXX", + "CMAKE_CUDA_COMPILER", + None, + "FILEPATH", + "CUDA compiler", + ) + + add_arg( + group, + "--cuda-flags", + "CUDAFLAGS", + "CMAKE_CUDA_FLAGS", + None, + "STRING", + "CUDA compiler flags", + ) + + add_arg( + group, + "--cuda-std", + "CMAKE_CUDA_STANDARD", + "CMAKE_CUDA_STANDARD", + "14", + "STRING", + "CUDA standard", + ) + + add_arg( + group, + "--cuda-arch", + "CUDAARCHS", + "CMAKE_CUDA_ARCHITECTURES", + None, + "STRING", + "CUDA architecture", + ) # Additional compiler options - add_arg(group, '--Wall', 'SUNDIALS_ENABLE_ALL_WARNINGS', - 'ENABLE_ALL_WARNINGS', 'OFF', 'BOOL', - 'Enable all compiler warnings') - - add_arg(group, '--Werror', 'SUNDIALS_ENABLE_WARNINGS_AS_ERRORS', - 'ENABLE_WARNINGS_AS_ERRORS', 'OFF', 'BOOL', - 'Enable compiler warnings as errors') - - add_arg(group, '--address-sanitizer', 'SUNDIALS_ENABLE_ADDRESS_SANITIZER', - 'ENABLE_ADDRESS_SANITIZER', 'OFF', 'BOOL', - 'Enable address sanitizer') + add_arg( + group, + "--Wall", + "SUNDIALS_ENABLE_ALL_WARNINGS", + "ENABLE_ALL_WARNINGS", + "OFF", + "BOOL", + "Enable all compiler warnings", + ) + + add_arg( + group, + "--Werror", + "SUNDIALS_ENABLE_WARNINGS_AS_ERRORS", + "ENABLE_WARNINGS_AS_ERRORS", + "OFF", + "BOOL", + "Enable compiler warnings as errors", + ) + + add_arg( + group, + "--address-sanitizer", + "SUNDIALS_ENABLE_ADDRESS_SANITIZER", + "ENABLE_ADDRESS_SANITIZER", + "OFF", + "BOOL", + "Enable address sanitizer", + ) # ---------------- # Install Options # ---------------- - group = parser.add_argument_group('Install Options', - '''Options for where SUNDIALS should be - installed.''') + group = parser.add_argument_group( + "Install Options", + """Options for where SUNDIALS should be + installed.""", + ) # install prefix - add_arg(group, '--install-prefix', 'SUNDIALS_INSTALL_PREFIX', - 'CMAKE_INSTALL_PREFIX', None, 'PATH', 'SUNDIALS install location') + add_arg( + group, + "--install-prefix", + "SUNDIALS_INSTALL_PREFIX", + "CMAKE_INSTALL_PREFIX", + None, + "PATH", + "SUNDIALS install location", + ) # library directory @@ -131,148 +274,333 @@ def main(): # Debugging Options # ------------------ - group = parser.add_argument_group('Debugging Options', - '''Options debugging SUNDIALS.''') - - add_arg(group, '--debug', 'SUNDIALS_DEBUG', 'SUNDIALS_DEBUG', 'OFF', - 'BOOL', 'SUNDIALS debugging output') - - add_arg(group, '--debug-assert', 'SUNDIALS_DEBUG_ASSERT', - 'SUNDIALS_DEBUG_ASSERT', 'OFF', 'BOOL', - 'SUNDIALS debugging asserts', dependson='--debug') - - add_arg(group, '--debug-cuda', 'SUNDIALS_DEBUG_CUDA_LASTERROR', - 'SUNDIALS_DEBUG_CUDA_LASTERROR', 'OFF', 'BOOL', - 'SUNDIALS debugging cuda errors', dependson='--debug') - - add_arg(group, '--debug-hip', 'SUNDIALS_DEBUG_HIP_LASTERROR', - 'SUNDIALS_DEBUG_HIP_LASTERROR', 'OFF', 'BOOL', - 'SUNDIALS debugging hip errors', dependson='--debug') - - add_arg(group, '--debug-printvec', 'SUNDIALS_DEBUG_PRINTVEC', - 'SUNDIALS_DEBUG_PRINTVEC', 'OFF', 'BOOL', - 'SUNDIALS debugging vector output', dependson='--debug') + group = parser.add_argument_group( + "Debugging Options", """Options debugging SUNDIALS.""" + ) + + add_arg( + group, + "--debug", + "SUNDIALS_DEBUG", + "SUNDIALS_DEBUG", + "OFF", + "BOOL", + "SUNDIALS debugging output", + ) + + add_arg( + group, + "--debug-assert", + "SUNDIALS_DEBUG_ASSERT", + "SUNDIALS_DEBUG_ASSERT", + "OFF", + "BOOL", + "SUNDIALS debugging asserts", + dependson="--debug", + ) + + add_arg( + group, + "--debug-cuda", + "SUNDIALS_DEBUG_CUDA_LASTERROR", + "SUNDIALS_DEBUG_CUDA_LASTERROR", + "OFF", + "BOOL", + "SUNDIALS debugging cuda errors", + dependson="--debug", + ) + + add_arg( + group, + "--debug-hip", + "SUNDIALS_DEBUG_HIP_LASTERROR", + "SUNDIALS_DEBUG_HIP_LASTERROR", + "OFF", + "BOOL", + "SUNDIALS debugging hip errors", + dependson="--debug", + ) + + add_arg( + group, + "--debug-printvec", + "SUNDIALS_DEBUG_PRINTVEC", + "SUNDIALS_DEBUG_PRINTVEC", + "OFF", + "BOOL", + "SUNDIALS debugging vector output", + dependson="--debug", + ) # -------------- # Library Types # -------------- - group = parser.add_argument_group('Library Type Options', - '''Options to specify if shared and/or - static libraries are build.''') - add_arg(group, '--static', 'SUNDIALS_STATIC_LIBRARIES', - 'BUILD_STATIC_LIBS', 'ON', 'BOOL', - 'Build static SUNDIALS libraries') - - add_arg(group, '--shared', 'SUNDIALS_SHARED_LIBRARIES', - 'BUILD_SHARED_LIBS', 'ON', 'BOOL', - 'Build shared SUNDIALS libraries') + group = parser.add_argument_group( + "Library Type Options", + """Options to specify if shared and/or + static libraries are build.""", + ) + add_arg( + group, + "--static", + "SUNDIALS_STATIC_LIBRARIES", + "BUILD_STATIC_LIBS", + "ON", + "BOOL", + "Build static SUNDIALS libraries", + ) + + add_arg( + group, + "--shared", + "SUNDIALS_SHARED_LIBRARIES", + "BUILD_SHARED_LIBS", + "ON", + "BOOL", + "Build shared SUNDIALS libraries", + ) # --------- # Packages # --------- # packages TODO(DJG): Add support for ONLY option - group = parser.add_argument_group('SUNDIALS Packages', - '''Options to specify which SUNDIALS - packages should be built.''') - add_arg(group, '--arkode', 'SUNDIALS_ARKODE', 'BUILD_ARKODE', 'ON', 'BOOL', - 'Build the ARKODE library') - - add_arg(group, '--cvode', 'SUNDIALS_CVODE', 'BUILD_CVODE', 'ON', 'BOOL', - 'Build the CVODE library') - - add_arg(group, '--cvodes', 'SUNDIALS_CVODES', 'BUILD_CVODES', 'ON', 'BOOL', - 'Build the CVODES library') - - add_arg(group, '--ida', 'SUNDIALS_IDA', 'BUILD_IDA', 'ON', 'BOOL', - 'Build the IDA library') - - add_arg(group, '--idas', 'SUNDIALS_IDAS', 'BUILD_IDAS', 'ON', 'BOOL', - 'Build the IDAS library') - - add_arg(group, '--kinsol', 'SUNDIALS_KINSOL', 'BUILD_KINSOL', 'ON', 'BOOL', - 'Build the KINSOL library') + group = parser.add_argument_group( + "SUNDIALS Packages", + """Options to specify which SUNDIALS + packages should be built.""", + ) + add_arg( + group, + "--arkode", + "SUNDIALS_ARKODE", + "BUILD_ARKODE", + "ON", + "BOOL", + "Build the ARKODE library", + ) + + add_arg( + group, + "--cvode", + "SUNDIALS_CVODE", + "BUILD_CVODE", + "ON", + "BOOL", + "Build the CVODE library", + ) + + add_arg( + group, + "--cvodes", + "SUNDIALS_CVODES", + "BUILD_CVODES", + "ON", + "BOOL", + "Build the CVODES library", + ) + + add_arg( + group, + "--ida", + "SUNDIALS_IDA", + "BUILD_IDA", + "ON", + "BOOL", + "Build the IDA library", + ) + + add_arg( + group, + "--idas", + "SUNDIALS_IDAS", + "BUILD_IDAS", + "ON", + "BOOL", + "Build the IDAS library", + ) + + add_arg( + group, + "--kinsol", + "SUNDIALS_KINSOL", + "BUILD_KINSOL", + "ON", + "BOOL", + "Build the KINSOL library", + ) # ----------------- # Packages Options # ----------------- - group = parser.add_argument_group('SUNDIALS Package Options', - '''Options for configuring SUNDIALS types + group = parser.add_argument_group( + "SUNDIALS Package Options", + """Options for configuring SUNDIALS types and enabling special compile time - features.''') + features.""", + ) # index size - add_arg(group, '--indexsize', 'SUNDIALS_INDEX_SIZE', 'SUNDIALS_INDEX_SIZE', - '64', 'STRING', 'index size', choices=['32', '64']) + add_arg( + group, + "--indexsize", + "SUNDIALS_INDEX_SIZE", + "SUNDIALS_INDEX_SIZE", + "64", + "STRING", + "index size", + choices=["32", "64"], + ) # precision - add_arg(group, '--precision', 'SUNDIALS_PRECISION', 'SUNDIALS_PRECISION', - 'double', 'STRING', 'real type precision', - choices=['single', 'double', 'extended']) + add_arg( + group, + "--precision", + "SUNDIALS_PRECISION", + "SUNDIALS_PRECISION", + "double", + "STRING", + "real type precision", + choices=["single", "double", "extended"], + ) # monitoring - add_arg(group, '--monitoring', 'SUNDIALS_MONITORING', - 'SUNDIALS_BUILD_WITH_MONITORING', 'OFF', 'BOOL', - 'integrator and solver monitoring') + add_arg( + group, + "--monitoring", + "SUNDIALS_MONITORING", + "SUNDIALS_BUILD_WITH_MONITORING", + "OFF", + "BOOL", + "integrator and solver monitoring", + ) # profiling - add_arg(group, '--profiling', 'SUNDIALS_PROFILING', - 'SUNDIALS_BUILD_WITH_PROFILING', 'OFF', 'BOOL', - 'fine-grained profiling') - - add_arg(group, '--logging-level', 'SUNDIALS_LOGGING_LEVEL', - 'SUNDIALS_LOGGING_LEVEL', '0', 'STRING', - 'logging', choices=['0', '1', '2', '3', '4', '5']) + add_arg( + group, + "--profiling", + "SUNDIALS_PROFILING", + "SUNDIALS_BUILD_WITH_PROFILING", + "OFF", + "BOOL", + "fine-grained profiling", + ) + + add_arg( + group, + "--logging-level", + "SUNDIALS_LOGGING_LEVEL", + "SUNDIALS_LOGGING_LEVEL", + "0", + "STRING", + "logging", + choices=["0", "1", "2", "3", "4", "5"], + ) # fused kernels - add_arg(group, '--fused-kernels', 'SUNDIALS_FUSED_KERNELS', - 'SUNDIALS_BUILD_PACKAGE_FUSED_KERNELS', 'OFF', 'BOOL', - 'package fused kernels') + add_arg( + group, + "--fused-kernels", + "SUNDIALS_FUSED_KERNELS", + "SUNDIALS_BUILD_PACKAGE_FUSED_KERNELS", + "OFF", + "BOOL", + "package fused kernels", + ) # error checks - add_arg(group, '--enable-error-checks', 'SUNDIALS_ENABLE_ERROR_CHECKS', - 'SUNDIALS_ENABLE_ERROR_CHECKS', 'OFF', 'BOOL', - 'enable error checks') - + add_arg( + group, + "--enable-error-checks", + "SUNDIALS_ENABLE_ERROR_CHECKS", + "SUNDIALS_ENABLE_ERROR_CHECKS", + "OFF", + "BOOL", + "enable error checks", + ) # ----------- # Interfaces # ----------- - group = parser.add_argument_group('SUNDIALS Interfaces', - '''These options enable or disable the - SUNDIALS Fortran interfaces.''') + group = parser.add_argument_group( + "SUNDIALS Interfaces", + """These options enable or disable the + SUNDIALS Fortran interfaces.""", + ) # Fortran interfaces - add_arg(group, '--fmod-interface', 'SUNDIALS_FMOD_INTERFACE', - 'BUILD_FORTRAN_MODULE_INTERFACE', 'OFF', 'BOOL', - 'Fortran module interface') + add_arg( + group, + "--fmod-interface", + "SUNDIALS_FMOD_INTERFACE", + "BUILD_FORTRAN_MODULE_INTERFACE", + "OFF", + "BOOL", + "Fortran module interface", + ) # --------- # Examples # --------- - group = parser.add_argument_group('Example and Benchmark Programs', - '''These options enable or disable + group = parser.add_argument_group( + "Example and Benchmark Programs", + """These options enable or disable building and installing the SUNDIALS - example and Benchmark programs.''') - - add_arg(group, '--examples-c', 'SUNDIALS_EXAMPLES_C', - 'EXAMPLES_ENABLE_C', 'ON', 'BOOL', 'C examples') - - add_arg(group, '--examples-cxx', 'SUNDIALS_EXAMPLES_CXX', - 'EXAMPLES_ENABLE_CXX', None, 'BOOL', 'C++ examples') - - add_arg(group, '--examples-f03', 'SUNDIALS_EXAMPLES_F03', - 'EXAMPLES_ENABLE_F2003', None, 'BOOL', - 'Fortran 2003 examples') - - add_arg(group, '--examples-cuda', 'SUNDIALS_EXAMPLES_CUDA', - 'EXAMPLES_ENABLE_CUDA', None, 'BOOL', 'CUDA examples') - - add_arg(group, '--benchmarks', 'SUNDIALS_BENCHMARKS', - 'BUILD_BENCHMARKS', 'OFF', 'BOOL', 'Benchmarks') + example and Benchmark programs.""", + ) + + add_arg( + group, + "--examples-c", + "SUNDIALS_EXAMPLES_C", + "EXAMPLES_ENABLE_C", + "ON", + "BOOL", + "C examples", + ) + + add_arg( + group, + "--examples-cxx", + "SUNDIALS_EXAMPLES_CXX", + "EXAMPLES_ENABLE_CXX", + None, + "BOOL", + "C++ examples", + ) + + add_arg( + group, + "--examples-f03", + "SUNDIALS_EXAMPLES_F03", + "EXAMPLES_ENABLE_F2003", + None, + "BOOL", + "Fortran 2003 examples", + ) + + add_arg( + group, + "--examples-cuda", + "SUNDIALS_EXAMPLES_CUDA", + "EXAMPLES_ENABLE_CUDA", + None, + "BOOL", + "CUDA examples", + ) + + add_arg( + group, + "--benchmarks", + "SUNDIALS_BENCHMARKS", + "BUILD_BENCHMARKS", + "OFF", + "BOOL", + "Benchmarks", + ) # ------------ # TPL Options @@ -282,317 +610,745 @@ def main(): # MPI # ---- - group = parser.add_argument_group('MPI Options', - '''Options for enabling MPI support in + group = parser.add_argument_group( + "MPI Options", + """Options for enabling MPI support in SUNDIALS and setting the MPI C, C++, and - Fortran compilers.''') - - add_arg(group, '--mpi', 'SUNDIALS_MPI', 'ENABLE_MPI', 'OFF', - 'FILEPATH', 'SUNDIALS MPI support') - - add_arg(group, '--mpicc', 'MPICC', 'MPI_C_COMPILER', None, - 'FILEPATH', 'MPI C compiler', dependson='--mpi') - - add_arg(group, '--mpicxx', 'MPICXX', 'MPI_CXX_COMPILER', None, - 'FILEPATH', 'MPI C++ compiler', dependson='--mpi') - - add_arg(group, '--mpifort', 'MPIFC', 'MPI_Fortran_COMPILER', None, - 'FILEPATH', 'MPI Fortran compiler', dependson='--mpi') - - add_arg(group, '--mpiexec', 'MPIEXEC', 'MPIEXEC_EXECUTABLE', None, - 'FILEPATH', 'MPI executable', dependson='--mpi') - - add_arg(group, '--mpiexec-pre-flags', 'MPIEXEC_PREFLAGS', 'MPIEXEC_PREFLAGS', None, - 'STRING', 'MPI executable extra flags', dependson='--mpi') + Fortran compilers.""", + ) + + add_arg( + group, + "--mpi", + "SUNDIALS_MPI", + "ENABLE_MPI", + "OFF", + "FILEPATH", + "SUNDIALS MPI support", + ) + + add_arg( + group, + "--mpicc", + "MPICC", + "MPI_C_COMPILER", + None, + "FILEPATH", + "MPI C compiler", + dependson="--mpi", + ) + + add_arg( + group, + "--mpicxx", + "MPICXX", + "MPI_CXX_COMPILER", + None, + "FILEPATH", + "MPI C++ compiler", + dependson="--mpi", + ) + + add_arg( + group, + "--mpifort", + "MPIFC", + "MPI_Fortran_COMPILER", + None, + "FILEPATH", + "MPI Fortran compiler", + dependson="--mpi", + ) + + add_arg( + group, + "--mpiexec", + "MPIEXEC", + "MPIEXEC_EXECUTABLE", + None, + "FILEPATH", + "MPI executable", + dependson="--mpi", + ) + + add_arg( + group, + "--mpiexec-pre-flags", + "MPIEXEC_PREFLAGS", + "MPIEXEC_PREFLAGS", + None, + "STRING", + "MPI executable extra flags", + dependson="--mpi", + ) # ---------- # Threading # ---------- # OpenMP - group = parser.add_argument_group('OpenMP Options', - '''Options for enabling OpenMP support in - SUNDIALS.''') - - add_arg(group, '--openmp', 'SUNDIALS_OPENMP', 'ENABLE_OPENMP', 'OFF', - 'BOOL', 'SUNDIALS OpenMP support') - - add_arg(group, '--openmp-device-works', 'SUNDIALS_OPENMP_DEVICE_WORKS', - 'OPENMP_DEVICE_WORKS', 'OFF', 'BOOL', - 'Disable OpenMP Device Support Checks (assume OpenMP 4.5+)') - + group = parser.add_argument_group( + "OpenMP Options", + """Options for enabling OpenMP support in + SUNDIALS.""", + ) + + add_arg( + group, + "--openmp", + "SUNDIALS_OPENMP", + "ENABLE_OPENMP", + "OFF", + "BOOL", + "SUNDIALS OpenMP support", + ) + + add_arg( + group, + "--openmp-device-works", + "SUNDIALS_OPENMP_DEVICE_WORKS", + "OPENMP_DEVICE_WORKS", + "OFF", + "BOOL", + "Disable OpenMP Device Support Checks (assume OpenMP 4.5+)", + ) # Pthread - group = parser.add_argument_group('Pthread Options', - '''Options for enabling - Pthread support in SUNDIALS.''') - - add_arg(group, '--pthread', 'SUNDIALS_PTHREAD', 'ENABLE_PTHREAD', 'OFF', - 'BOOL', 'SUNDIALS PThread support') + group = parser.add_argument_group( + "Pthread Options", + """Options for enabling + Pthread support in SUNDIALS.""", + ) + + add_arg( + group, + "--pthread", + "SUNDIALS_PTHREAD", + "ENABLE_PTHREAD", + "OFF", + "BOOL", + "SUNDIALS PThread support", + ) # ----- # GPUs # ----- # CUDA - group = parser.add_argument_group('CUDA Options', - '''Options for enabling CUDA support in - - SUNDIALS''') - add_arg(group, '--cuda', 'SUNDIALS_CUDA', 'ENABLE_CUDA', 'OFF', 'BOOL', - 'SUNDIALS CUDA support') + group = parser.add_argument_group( + "CUDA Options", + """Options for enabling CUDA support in + + SUNDIALS""", + ) + add_arg( + group, + "--cuda", + "SUNDIALS_CUDA", + "ENABLE_CUDA", + "OFF", + "BOOL", + "SUNDIALS CUDA support", + ) # HIP - group = parser.add_argument_group('HIP Options', - '''Options for enabling HIP support in - SUNDIALS.''') - - add_arg(group, '--hip', 'SUNDIALS_HIP', 'ENABLE_HIP', 'OFF', 'BOOL', - 'SUNDIALS HIP support') + group = parser.add_argument_group( + "HIP Options", + """Options for enabling HIP support in + SUNDIALS.""", + ) + + add_arg( + group, + "--hip", + "SUNDIALS_HIP", + "ENABLE_HIP", + "OFF", + "BOOL", + "SUNDIALS HIP support", + ) # OpenMP Offload - group = parser.add_argument_group('OpenMP Offload Options', - '''Options for enabling OpenMP offload - support in SUNDIALS.''') - - add_arg(group, '--openmp-offload', 'SUNDIALS_OPENMP_OFFLOAD', - 'ENABLE_OPENMP_DEVICE', 'OFF', 'BOOL', - 'SUNDIALS OpenMP offload support') + group = parser.add_argument_group( + "OpenMP Offload Options", + """Options for enabling OpenMP offload + support in SUNDIALS.""", + ) + + add_arg( + group, + "--openmp-offload", + "SUNDIALS_OPENMP_OFFLOAD", + "ENABLE_OPENMP_DEVICE", + "OFF", + "BOOL", + "SUNDIALS OpenMP offload support", + ) # ------------------------ # Performance portability # ------------------------ # Kokkos - group = parser.add_argument_group('Kokkos Options') - - add_arg(group, '--kokkos', 'SUNDIALS_KOKKOS', 'ENABLE_KOKKOS', 'OFF', - 'BOOL', 'SUNDIALS Kokkos support') - - add_arg(group, '--kokkos-dir', 'KOKKOS_ROOT', 'Kokkos_DIR', None, 'PATH', - 'Kokkos install directory', dependson='--kokkos') + group = parser.add_argument_group("Kokkos Options") + + add_arg( + group, + "--kokkos", + "SUNDIALS_KOKKOS", + "ENABLE_KOKKOS", + "OFF", + "BOOL", + "SUNDIALS Kokkos support", + ) + + add_arg( + group, + "--kokkos-dir", + "KOKKOS_ROOT", + "Kokkos_DIR", + None, + "PATH", + "Kokkos install directory", + dependson="--kokkos", + ) # RAJA - group = parser.add_argument_group('RAJA Options') - - add_arg(group, '--raja', 'SUNDIALS_RAJA', 'ENABLE_RAJA', 'OFF', 'BOOL', - 'SUNDIALS Raja support') - - add_arg(group, '--raja-dir', 'RAJA_ROOT', 'RAJA_DIR', None, 'PATH', - 'RAJA install directory', dependson='--raja') - - add_arg(group, '--raja-backends', 'RAJA_BACKENDS', - 'SUNDIALS_RAJA_BACKENDS', None, 'STRING', 'RAJA backends', - choices=['CUDA', 'HIP'], dependson='--raja') + group = parser.add_argument_group("RAJA Options") + + add_arg( + group, + "--raja", + "SUNDIALS_RAJA", + "ENABLE_RAJA", + "OFF", + "BOOL", + "SUNDIALS Raja support", + ) + + add_arg( + group, + "--raja-dir", + "RAJA_ROOT", + "RAJA_DIR", + None, + "PATH", + "RAJA install directory", + dependson="--raja", + ) + + add_arg( + group, + "--raja-backends", + "RAJA_BACKENDS", + "SUNDIALS_RAJA_BACKENDS", + None, + "STRING", + "RAJA backends", + choices=["CUDA", "HIP"], + dependson="--raja", + ) # SYCL - group = parser.add_argument_group('SYCL Options') - - add_arg(group, '--sycl', 'SUNDIALS_SYCL', 'ENABLE_SYCL', 'OFF', 'BOOL', - 'SUNDIALS SYCL support') + group = parser.add_argument_group("SYCL Options") + + add_arg( + group, + "--sycl", + "SUNDIALS_SYCL", + "ENABLE_SYCL", + "OFF", + "BOOL", + "SUNDIALS SYCL support", + ) # ------------------------ # Linear solver libraries # ------------------------ # Ginkgo - group = parser.add_argument_group('Ginkgo Options') - - add_arg(group, '--ginkgo', 'SUNDIALS_GINKGO', 'ENABLE_GINKGO', 'OFF', - 'BOOL', 'SUNDIALS Ginkgo support') - - add_arg(group, '--ginkgo-dir', 'GINKGO_ROOT', 'Ginkgo_DIR', None, 'PATH', - 'Ginkgo install directory', dependson='--ginkgo') - - add_arg(group, '--ginkgo-backends', 'GINKGO_BACKENDS', - 'SUNDIALS_GINKGO_BACKENDS', 'REF;OMP', 'STRING', 'Ginkgo backends', - choices=['REF', 'OMP', 'CUDA', 'HIP', 'DPCPP'], dependson='--ginkgo') + group = parser.add_argument_group("Ginkgo Options") + + add_arg( + group, + "--ginkgo", + "SUNDIALS_GINKGO", + "ENABLE_GINKGO", + "OFF", + "BOOL", + "SUNDIALS Ginkgo support", + ) + + add_arg( + group, + "--ginkgo-dir", + "GINKGO_ROOT", + "Ginkgo_DIR", + None, + "PATH", + "Ginkgo install directory", + dependson="--ginkgo", + ) + + add_arg( + group, + "--ginkgo-backends", + "GINKGO_BACKENDS", + "SUNDIALS_GINKGO_BACKENDS", + "REF;OMP", + "STRING", + "Ginkgo backends", + choices=["REF", "OMP", "CUDA", "HIP", "DPCPP"], + dependson="--ginkgo", + ) # LAPACK - group = parser.add_argument_group('LAPACK Options') - - add_arg(group, '--lapack', 'SUNDIALS_LAPACK', 'ENABLE_LAPACK', 'OFF', - 'BOOL', 'SUNDIALS LAPACK support') - - add_arg(group, '--lapack-libs', 'LAPACK_LIBRARIES', 'LAPACK_LIBRARIES', - None, 'STRING', 'LAPACK libraries', dependson='--lapack') + group = parser.add_argument_group("LAPACK Options") + + add_arg( + group, + "--lapack", + "SUNDIALS_LAPACK", + "ENABLE_LAPACK", + "OFF", + "BOOL", + "SUNDIALS LAPACK support", + ) + + add_arg( + group, + "--lapack-libs", + "LAPACK_LIBRARIES", + "LAPACK_LIBRARIES", + None, + "STRING", + "LAPACK libraries", + dependson="--lapack", + ) # KLU - group = parser.add_argument_group('KLU Options') - - add_arg(group, '--klu', 'SUNDIALS_KLU', 'ENABLE_KLU', 'OFF', 'BOOL', - 'SUNDIALS KLU support') - - add_arg(group, '--klu-incdir', 'SUITE_SPARSE_INCLUDE_DIR', - 'KLU_INCLUDE_DIR', None, 'PATH', 'KLU include directory', - dependson='--klu') - - add_arg(group, '--klu-libdir', 'SUITE_SPARSE_LIBRARY_DIR', - 'KLU_LIBRARY_DIR', None, 'PATH', 'KLU library directory', - dependson='--klu') + group = parser.add_argument_group("KLU Options") + + add_arg( + group, + "--klu", + "SUNDIALS_KLU", + "ENABLE_KLU", + "OFF", + "BOOL", + "SUNDIALS KLU support", + ) + + add_arg( + group, + "--klu-incdir", + "SUITE_SPARSE_INCLUDE_DIR", + "KLU_INCLUDE_DIR", + None, + "PATH", + "KLU include directory", + dependson="--klu", + ) + + add_arg( + group, + "--klu-libdir", + "SUITE_SPARSE_LIBRARY_DIR", + "KLU_LIBRARY_DIR", + None, + "PATH", + "KLU library directory", + dependson="--klu", + ) # KokkosKernels - group = parser.add_argument_group('KokkosKernels Options') - - add_arg(group, '--kokkos-kernels', 'SUNDIALS_KOKKOS_KERNELS', - 'ENABLE_KOKKOS_KERNELS', 'OFF', 'BOOL', - 'SUNDIALS Kokkos-Kernels support') - - add_arg(group, '--kokkos-kernels-dir', 'KOKKOS_KERNELS_ROOT', - 'KokkosKernels_DIR', None, 'PATH', - 'Kokkos-Kernels install directory', dependson='--kokkos-kernels') + group = parser.add_argument_group("KokkosKernels Options") + + add_arg( + group, + "--kokkos-kernels", + "SUNDIALS_KOKKOS_KERNELS", + "ENABLE_KOKKOS_KERNELS", + "OFF", + "BOOL", + "SUNDIALS Kokkos-Kernels support", + ) + + add_arg( + group, + "--kokkos-kernels-dir", + "KOKKOS_KERNELS_ROOT", + "KokkosKernels_DIR", + None, + "PATH", + "Kokkos-Kernels install directory", + dependson="--kokkos-kernels", + ) # SuperLU MT - group = parser.add_argument_group('SuperLU_MT Options') - - add_arg(group, '--superlu-mt', 'SUNDIALS_SUPERLU_MT', 'ENABLE_SUPERLUMT', - 'OFF', 'BOOL', 'SUNDIALS SuperLU MT support') - - add_arg(group, '--superlu-mt-incdir', 'SUPERLU_MT_INCLUDE_DIR', - 'SUPERLUMT_INCLUDE_DIR', None, 'PATH', - 'SuperLU_MT include directory', dependson='--superlu-mt') - - add_arg(group, '--superlu-mt-libdir', 'SUPERLU_MT_LIBRARY_DIR', - 'SUPERLUMT_LIBRARY_DIR', None, 'PATH', - 'SuperLU_MT library directory', dependson='--superlu-mt') - - add_arg(group, '--superlu-mt-libs', 'SUPERLU_MT_LIBRARIES', - 'SUPERLUMT_LIBRARIES', None, 'STRING', - 'SuperLU_MT additional libraries', dependson='--superlu-mt') - - add_arg(group, '--superlu-mt-thread-type', 'SUPERLU_MT_THREAD_TYPE', - 'SUPERLUMT_THREAD_TYPE', None, 'STRING', - 'SuperLU_MT thread type', choices=['OpenMP', 'Pthread'], - dependson='--superlu-mt') + group = parser.add_argument_group("SuperLU_MT Options") + + add_arg( + group, + "--superlu-mt", + "SUNDIALS_SUPERLU_MT", + "ENABLE_SUPERLUMT", + "OFF", + "BOOL", + "SUNDIALS SuperLU MT support", + ) + + add_arg( + group, + "--superlu-mt-incdir", + "SUPERLU_MT_INCLUDE_DIR", + "SUPERLUMT_INCLUDE_DIR", + None, + "PATH", + "SuperLU_MT include directory", + dependson="--superlu-mt", + ) + + add_arg( + group, + "--superlu-mt-libdir", + "SUPERLU_MT_LIBRARY_DIR", + "SUPERLUMT_LIBRARY_DIR", + None, + "PATH", + "SuperLU_MT library directory", + dependson="--superlu-mt", + ) + + add_arg( + group, + "--superlu-mt-libs", + "SUPERLU_MT_LIBRARIES", + "SUPERLUMT_LIBRARIES", + None, + "STRING", + "SuperLU_MT additional libraries", + dependson="--superlu-mt", + ) + + add_arg( + group, + "--superlu-mt-thread-type", + "SUPERLU_MT_THREAD_TYPE", + "SUPERLUMT_THREAD_TYPE", + None, + "STRING", + "SuperLU_MT thread type", + choices=["OpenMP", "Pthread"], + dependson="--superlu-mt", + ) # SuperLU DIST - group = parser.add_argument_group('SuperLU_DIST Options') - - add_arg(group, '--superlu-dist', 'SUNDIALS_SUPERLU_DIST', - 'ENABLE_SUPERLUDIST', 'OFF', 'BOOL', - 'SUNDIALS SuperLU DIST support') - - add_arg(group, '--superlu-dist-dir', 'SUPERLU_DIST_ROOT', - 'SUPERLUDIST_DIR', None, 'PATH', - 'SuperLU_DIST installation directory', dependson='--superlu-dist') - - add_arg(group, '--superlu-dist-incdir', 'SUPERLU_DIST_INCLUDE_DIR', - 'SUPERLUDIST_INCLUDE_DIR', None, 'PATH', - 'SuperLU_DIST include directory', dependson='--superlu-dist') - - add_arg(group, '--superlu-dist-libdir', 'SUPERLU_DIST_LIBRARY_DIR', - 'SUPERLUDIST_LIBRARY_DIR', None, 'PATH', - 'SuperLU_DIST library directory', dependson='--superlu-dist') - - add_arg(group, '--superlu-dist-libs', 'SUPERLU_DIST_LIBRARIES', - 'SUPERLUDIST_LIBRARIES', None, 'STRING', - 'SuperLU_DIST additional libraries', dependson='--superlu-dist') - - add_arg(group, '--superlu-dist-openmp', 'SUPERLU_DIST_OPENMP', - 'SUPERLUDIST_OpenMP', 'OFF', 'BOOL', 'SuperLU_DIST OpenMP enabled', - dependson='--superlu-dist') + group = parser.add_argument_group("SuperLU_DIST Options") + + add_arg( + group, + "--superlu-dist", + "SUNDIALS_SUPERLU_DIST", + "ENABLE_SUPERLUDIST", + "OFF", + "BOOL", + "SUNDIALS SuperLU DIST support", + ) + + add_arg( + group, + "--superlu-dist-dir", + "SUPERLU_DIST_ROOT", + "SUPERLUDIST_DIR", + None, + "PATH", + "SuperLU_DIST installation directory", + dependson="--superlu-dist", + ) + + add_arg( + group, + "--superlu-dist-incdir", + "SUPERLU_DIST_INCLUDE_DIR", + "SUPERLUDIST_INCLUDE_DIR", + None, + "PATH", + "SuperLU_DIST include directory", + dependson="--superlu-dist", + ) + + add_arg( + group, + "--superlu-dist-libdir", + "SUPERLU_DIST_LIBRARY_DIR", + "SUPERLUDIST_LIBRARY_DIR", + None, + "PATH", + "SuperLU_DIST library directory", + dependson="--superlu-dist", + ) + + add_arg( + group, + "--superlu-dist-libs", + "SUPERLU_DIST_LIBRARIES", + "SUPERLUDIST_LIBRARIES", + None, + "STRING", + "SuperLU_DIST additional libraries", + dependson="--superlu-dist", + ) + + add_arg( + group, + "--superlu-dist-openmp", + "SUPERLU_DIST_OPENMP", + "SUPERLUDIST_OpenMP", + "OFF", + "BOOL", + "SuperLU_DIST OpenMP enabled", + dependson="--superlu-dist", + ) # Magma - group = parser.add_argument_group('MAGMA Options') - - add_arg(group, '--magma', 'SUNDIALS_MAGMA', 'ENABLE_MAGMA', 'OFF', 'BOOL', - 'SUNDIALS MAGMA support') - - add_arg(group, '--magma-dir', 'MAGMA_ROOT', 'MAGMA_DIR', None, 'PATH', - 'MAGMA install directory', dependson='--magma') - - add_arg(group, '--magma-backends', 'MAGAMA_BACKENDS', - 'SUNDIALS_MAGMA_BACKENDS', None, 'STRING', 'MAGMA backends', - choices=['CUDA', 'HIP'], dependson='--magma') + group = parser.add_argument_group("MAGMA Options") + + add_arg( + group, + "--magma", + "SUNDIALS_MAGMA", + "ENABLE_MAGMA", + "OFF", + "BOOL", + "SUNDIALS MAGMA support", + ) + + add_arg( + group, + "--magma-dir", + "MAGMA_ROOT", + "MAGMA_DIR", + None, + "PATH", + "MAGMA install directory", + dependson="--magma", + ) + + add_arg( + group, + "--magma-backends", + "MAGAMA_BACKENDS", + "SUNDIALS_MAGMA_BACKENDS", + None, + "STRING", + "MAGMA backends", + choices=["CUDA", "HIP"], + dependson="--magma", + ) # ---------------- # Other libraries # ---------------- # hypre - group = parser.add_argument_group('hypre Options') - - add_arg(group, '--hypre', 'SUNDIALS_HYPRE', 'ENABLE_HYPRE', 'OFF', 'BOOL', - 'SUNDIALS hypre support') - - add_arg(group, '--hypre-incdir', 'HYPRE_INCLUDE_DIR', - 'HYPRE_INCLUDE_DIR', None, 'PATH', - 'Hypre include directory', dependson='--hypre') - - add_arg(group, '--hypre-libdir', 'HYPRE_LIBRARY_DIR', - 'HYPRE_LIBRARY_DIR', None, 'PATH', - 'Hypre library directory', dependson='--hypre') + group = parser.add_argument_group("hypre Options") + + add_arg( + group, + "--hypre", + "SUNDIALS_HYPRE", + "ENABLE_HYPRE", + "OFF", + "BOOL", + "SUNDIALS hypre support", + ) + + add_arg( + group, + "--hypre-incdir", + "HYPRE_INCLUDE_DIR", + "HYPRE_INCLUDE_DIR", + None, + "PATH", + "Hypre include directory", + dependson="--hypre", + ) + + add_arg( + group, + "--hypre-libdir", + "HYPRE_LIBRARY_DIR", + "HYPRE_LIBRARY_DIR", + None, + "PATH", + "Hypre library directory", + dependson="--hypre", + ) # PETSc - group = parser.add_argument_group('PTESc Options') - - add_arg(group, '--petsc', 'SUNDIALS_PETSC', 'ENABLE_PETSC', 'OFF', 'BOOL', - 'SUNDIALS PETSc support') - - add_arg(group, '--petsc-dir', 'PETSC_ROOT', 'PETSC_DIR', None, 'PATH', - 'PETSc install directory', dependson='--petsc') + group = parser.add_argument_group("PTESc Options") + + add_arg( + group, + "--petsc", + "SUNDIALS_PETSC", + "ENABLE_PETSC", + "OFF", + "BOOL", + "SUNDIALS PETSc support", + ) + + add_arg( + group, + "--petsc-dir", + "PETSC_ROOT", + "PETSC_DIR", + None, + "PATH", + "PETSc install directory", + dependson="--petsc", + ) # Trilinos - group = parser.add_argument_group('Trilinos Options') - - add_arg(group, '--trilinos', 'SUNDIALS_TRILINOS', 'ENABLE_TRILINOS', 'OFF', - 'BOOL', 'SUNDIALS Trilinos support') - - add_arg(group, '--trilinos-dir', 'TRILINOS_ROOT', 'Trilinos_DIR', None, - 'PATH', 'Trilinos install directory', dependson='--trilinos') + group = parser.add_argument_group("Trilinos Options") + + add_arg( + group, + "--trilinos", + "SUNDIALS_TRILINOS", + "ENABLE_TRILINOS", + "OFF", + "BOOL", + "SUNDIALS Trilinos support", + ) + + add_arg( + group, + "--trilinos-dir", + "TRILINOS_ROOT", + "Trilinos_DIR", + None, + "PATH", + "Trilinos install directory", + dependson="--trilinos", + ) # XBraid - group = parser.add_argument_group('XBraid Options') - - add_arg(group, '--xbraid', 'SUNDIALS_XBRAID', 'ENABLE_XBRAID', 'OFF', - 'BOOL', 'SUNDIALS XBraid support') - - add_arg(group, '--xbraid-dir', 'XBRAID_ROOT', 'XBRAID_DIR', None, 'PATH', - 'XBraid install directory', dependson='--xbraid') + group = parser.add_argument_group("XBraid Options") + + add_arg( + group, + "--xbraid", + "SUNDIALS_XBRAID", + "ENABLE_XBRAID", + "OFF", + "BOOL", + "SUNDIALS XBraid support", + ) + + add_arg( + group, + "--xbraid-dir", + "XBRAID_ROOT", + "XBRAID_DIR", + None, + "PATH", + "XBraid install directory", + dependson="--xbraid", + ) # -------- # Testing # -------- - group = parser.add_argument_group('Testing Options') + group = parser.add_argument_group("Testing Options") # development tests - add_arg(group, '--dev-tests', 'SUNDIALS_TEST_DEVTESTS', - 'SUNDIALS_TEST_DEVTESTS', 'OFF', 'BOOL', - 'SUNDIALS development tests') + add_arg( + group, + "--dev-tests", + "SUNDIALS_TEST_DEVTESTS", + "SUNDIALS_TEST_DEVTESTS", + "OFF", + "BOOL", + "SUNDIALS development tests", + ) # unit tests - add_arg(group, '--unit-tests', 'SUNDIALS_TEST_UNITTESTS', - 'SUNDIALS_TEST_UNITTESTS', 'OFF', 'BOOL', - 'SUNDIALS unit tests') - - add_arg(group, '--no-gtest', 'SUNDIALS_TEST_ENABLE_GTEST', - 'SUNDIALS_TEST_ENABLE_GTEST', 'ON', 'BOOL', - 'SUNDIALS GTest unit tests') + add_arg( + group, + "--unit-tests", + "SUNDIALS_TEST_UNITTESTS", + "SUNDIALS_TEST_UNITTESTS", + "OFF", + "BOOL", + "SUNDIALS unit tests", + ) + + add_arg( + group, + "--no-gtest", + "SUNDIALS_TEST_ENABLE_GTEST", + "SUNDIALS_TEST_ENABLE_GTEST", + "ON", + "BOOL", + "SUNDIALS GTest unit tests", + ) # test output directory - add_arg(group, '--test-output-dir', 'SUNDIALS_TEST_OUTPUT_DIR', - 'SUNDIALS_TEST_OUTPUT_DIR', None, 'PATH', - 'SUNDIALS test output directory') + add_arg( + group, + "--test-output-dir", + "SUNDIALS_TEST_OUTPUT_DIR", + "SUNDIALS_TEST_OUTPUT_DIR", + None, + "PATH", + "SUNDIALS test output directory", + ) # test answer directory - add_arg(group, '--test-answer-dir', 'SUNDIALS_TEST_ANSWER_DIR', - 'SUNDIALS_TEST_ANSWER_DIR', None, 'PATH', - 'SUNDIALS test answer directory') + add_arg( + group, + "--test-answer-dir", + "SUNDIALS_TEST_ANSWER_DIR", + "SUNDIALS_TEST_ANSWER_DIR", + None, + "PATH", + "SUNDIALS test answer directory", + ) # test float comparison precision - add_arg(group, '--test-float-precision', 'SUNDIALS_TEST_FLOAT_PRECISION', - 'SUNDIALS_TEST_FLOAT_PRECISION', None, 'STRING', - 'SUNDIALS test float comparison precision') + add_arg( + group, + "--test-float-precision", + "SUNDIALS_TEST_FLOAT_PRECISION", + "SUNDIALS_TEST_FLOAT_PRECISION", + None, + "STRING", + "SUNDIALS test float comparison precision", + ) # test integer comparison precision - add_arg(group, '--test-integer-precision', - 'SUNDIALS_TEST_INTEGER_PRECISION', - 'SUNDIALS_TEST_INTEGER_PRECISION', None, 'STRING', - 'SUNDIALS test integer comparison precision') - - add_arg(group, '--make-verbose', 'CMAKE_VERBOSE_MAKEFILE', - 'CMAKE_VERBOSE_MAKEFILE', 'OFF', 'BOOL', 'verbose make output') + add_arg( + group, + "--test-integer-precision", + "SUNDIALS_TEST_INTEGER_PRECISION", + "SUNDIALS_TEST_INTEGER_PRECISION", + None, + "STRING", + "SUNDIALS test integer comparison precision", + ) + + add_arg( + group, + "--make-verbose", + "CMAKE_VERBOSE_MAKEFILE", + "CMAKE_VERBOSE_MAKEFILE", + "OFF", + "BOOL", + "verbose make output", + ) # --------------------- # Parse and check args @@ -640,20 +1396,20 @@ def read_env(args): continue # don't overwite options already set at command line - value = args_dict[a]['value'] - default = args_dict[a]['default'] + value = args_dict[a]["value"] + default = args_dict[a]["default"] if value != default: continue # check for environment variable and set value - env_var = args_dict[a]['env_var'] + env_var = args_dict[a]["env_var"] if env_var is None: continue if env_var in os.environ: - args_dict[a]['value'] = os.environ[env_var] + args_dict[a]["value"] = os.environ[env_var] # ----------------------------------------------------------------------------- @@ -661,31 +1417,49 @@ def read_env(args): # ----------------------------------------------------------------------------- -def add_arg(parser, arg, env_var, cmake_var, cmake_default, cmake_type, msg, - choices=None, dependson=None): +def add_arg( + parser, + arg, + env_var, + cmake_var, + cmake_default, + cmake_type, + msg, + choices=None, + dependson=None, +): """Add a command SUNDIALS option command line arg""" # Use underscores in the arg variable name - arg_dest = arg[2:].replace('-', '_') + arg_dest = arg[2:].replace("-", "_") help_msg = msg # Define function to create an argparse SUNDIALS option type - arg_type = cmake_arg(env_var, cmake_var, cmake_default, cmake_type, msg, - choices=choices, dependson=dependson) + arg_type = cmake_arg( + env_var, + cmake_var, + cmake_default, + cmake_type, + msg, + choices=choices, + dependson=dependson, + ) # Replace 'None' with a default string to ensure a dictionary is created # even when a command line input is not provided. This is ensures the # dictionary exists when reading variables from the environment. if cmake_default is None: - cmake_default = '__default_none__' + cmake_default = "__default_none__" # Create command line arg - parser.add_argument(arg, dest=arg_dest, type=arg_type, - default=cmake_default, help=help_msg) + parser.add_argument( + arg, dest=arg_dest, type=arg_type, default=cmake_default, help=help_msg + ) -def cmake_arg(env_var, cmake_var, cmake_default, cmake_type, msg, - choices=None, dependson=None): +def cmake_arg( + env_var, cmake_var, cmake_default, cmake_type, msg, choices=None, dependson=None +): """Function factory for argparse SUNDIALS option type""" def _cmake_arg(str_var): @@ -694,19 +1468,19 @@ def _cmake_arg(str_var): import argparse # check if using None for the default value - if str_var == '__default_none__': + if str_var == "__default_none__": str_var = None # check for valid input options - if cmake_type == 'BOOL' and str_var not in ['ON', 'OFF', None]: - err_msg = 'Invalid option value ' + str_var + '. ' - err_msg += 'Input value must be ON or OFF.' + if cmake_type == "BOOL" and str_var not in ["ON", "OFF", None]: + err_msg = "Invalid option value " + str_var + ". " + err_msg += "Input value must be ON or OFF." raise argparse.ArgumentTypeError("Invaid Value for BOOL") if choices is not None and str_var is not None: raise_error = False if ";" in str_var: - for s in str_var.split(';'): + for s in str_var.split(";"): if s not in choices: raise_error = True else: @@ -714,24 +1488,24 @@ def _cmake_arg(str_var): raise_error = True if raise_error: - err_msg = 'Invalid option value ' + str_var + '. ' - err_msg += 'Input value must be ' + err_msg = "Invalid option value " + str_var + ". " + err_msg += "Input value must be " if len(choices) < 3: - err_msg += ' or '.join(choices) + '.' + err_msg += " or ".join(choices) + "." else: - err_msg += ', '.join(choices[:-1]) - err_msg += ', or ' + choices[-1] + '.' + err_msg += ", ".join(choices[:-1]) + err_msg += ", or " + choices[-1] + "." raise argparse.ArgumentTypeError(err_msg) # create dictionary for SUNDIALS option cmake_dict = {} - cmake_dict['env_var'] = env_var - cmake_dict['cmake_var'] = cmake_var - cmake_dict['default'] = cmake_default - cmake_dict['cmake_type'] = cmake_type - cmake_dict['msg'] = msg - cmake_dict['value'] = str_var - cmake_dict['depends_on'] = dependson + cmake_dict["env_var"] = env_var + cmake_dict["cmake_var"] = cmake_var + cmake_dict["default"] = cmake_default + cmake_dict["cmake_type"] = cmake_type + cmake_dict["msg"] = msg + cmake_dict["value"] = str_var + cmake_dict["depends_on"] = dependson return cmake_dict @@ -761,35 +1535,34 @@ def write_cmake(fn, args): # print(a, args_dict[a]) # don't wite output lines if using the default value - value = args_dict[a]['value'] - default = args_dict[a]['default'] + value = args_dict[a]["value"] + default = args_dict[a]["default"] if value is None or value == default: continue # don't wite output if TPL is not enabled - depends_on = args_dict[a]['depends_on'] + depends_on = args_dict[a]["depends_on"] if depends_on is not None: - depends_on = depends_on[2:].replace('-', '_') - depends_on_val = args_dict[depends_on]['value'] + depends_on = depends_on[2:].replace("-", "_") + depends_on_val = args_dict[depends_on]["value"] # print(depends_on, depends_on_val) - if depends_on_val != 'ON': + if depends_on_val != "ON": continue # write CMake output - cmake_var = args_dict[a]['cmake_var'] - cmake_type = args_dict[a]['cmake_type'] - cmake_msg = args_dict[a]['msg'] + cmake_var = args_dict[a]["cmake_var"] + cmake_type = args_dict[a]["cmake_type"] + cmake_msg = args_dict[a]["msg"] - if args.filetype == 'cache': - cmd = (f"set({cmake_var} \"{value}\" CACHE {cmake_type} " - f"\"{cmake_msg}\")\n") + if args.filetype == "cache": + cmd = f'set({cmake_var} "{value}" CACHE {cmake_type} ' f'"{cmake_msg}")\n' else: - cmd = f" \\\n -D {cmake_var}=\"{value}\"" + cmd = f' \\\n -D {cmake_var}="{value}"' fn.write(cmd) @@ -799,13 +1572,15 @@ def setup_file(cmakefile, filename, filetype): import os import stat - if filetype == 'cache': - msg = (f'# CMake cache file for configuring SUNDIALS\n' - f'#\n' - f'# Move this file to your build directory and configure ' - f'SUNDIALS with the\n' - f'# following command:\n' - f'# cmake -C {filename}\n') + if filetype == "cache": + msg = ( + f"# CMake cache file for configuring SUNDIALS\n" + f"#\n" + f"# Move this file to your build directory and configure " + f"SUNDIALS with the\n" + f"# following command:\n" + f"# cmake -C {filename}\n" + ) cmakefile.write(msg) # update permissions to make sure the file is not executable @@ -817,18 +1592,20 @@ def setup_file(cmakefile, filename, filetype): st = os.stat(filename) os.chmod(filename, st.st_mode & NO_EXE) else: - msg = (f'#!/bin/bash\n' - f'# Script for configuring SUNDIALS\n' - f'#\n' - f'# Move this file to your build directory and configure ' - f'SUNDIALS with the\n' - f'# following command:\n' - f'# ./{filename} \n' - f'if [ "$#" -lt 1 ]; then\n' - f' echo "ERROR: Path to SUNDIALS source required"\n' - f' exit 1\n' - f'fi\n' - f'cmake $1') + msg = ( + f"#!/bin/bash\n" + f"# Script for configuring SUNDIALS\n" + f"#\n" + f"# Move this file to your build directory and configure " + f"SUNDIALS with the\n" + f"# following command:\n" + f"# ./{filename} \n" + f'if [ "$#" -lt 1 ]; then\n' + f' echo "ERROR: Path to SUNDIALS source required"\n' + f" exit 1\n" + f"fi\n" + f"cmake $1" + ) cmakefile.write(msg) # update permissions to make sure the user can execute the script @@ -858,6 +1635,7 @@ def print_args(args): # ----------------------------------------------------------------------------- -if __name__ == '__main__': +if __name__ == "__main__": import sys + sys.exit(main()) diff --git a/test/notify.py b/test/notify.py index befdd721ab..ee19008d9a 100755 --- a/test/notify.py +++ b/test/notify.py @@ -15,24 +15,29 @@ # Send email notification if a SUNDIALS regression test status # ----------------------------------------------------------------------------- + def main(): import argparse import os parser = argparse.ArgumentParser( - description='Send email notification based on regression test status', - formatter_class=argparse.RawTextHelpFormatter) + description="Send email notification based on regression test status", + formatter_class=argparse.RawTextHelpFormatter, + ) - parser.add_argument('teststatus', type=str, - choices=['passed', 'failed', 'fixed'], - help='Status of regression test') + parser.add_argument( + "teststatus", + type=str, + choices=["passed", "failed", "fixed"], + help="Status of regression test", + ) - parser.add_argument('testname', type=str, - help='Name branch name or pull-request tested') + parser.add_argument( + "testname", type=str, help="Name branch name or pull-request tested" + ) - parser.add_argument('testurl', type=str, - help='URL for viewing test results') + parser.add_argument("testurl", type=str, help="URL for viewing test results") # parse command line args args = parser.parse_args() @@ -41,7 +46,7 @@ def main(): logfile = "suntest.log" # if log file exists add url, otherwise create log file - if (os.path.isfile(logfile)): + if os.path.isfile(logfile): with open(logfile, "a") as log: log.write("View test output at:\n") log.write(args.testurl) @@ -53,7 +58,7 @@ def main(): log.write(args.testurl) # determine notification recipient - special_branches = ['main', 'develop', 'release'] + special_branches = ["main", "develop", "release"] if any(branch in args.testname for branch in special_branches): # SUNDIALS developers list @@ -61,23 +66,23 @@ def main(): else: # author of most recent commit cmd = "git log --format='%ae' -1" - recipient = runCommand(cmd).rstrip().decode('UTF-8') + recipient = runCommand(cmd).rstrip().decode("UTF-8") # check if the last commit was a CI merge - if (recipient == 'nobody@nowhere'): + if recipient == "nobody@nowhere": cmd = "git log HEAD~1 --pretty=format:'%ae' -1" - recipient = runCommand(cmd).rstrip().decode('UTF-8') + recipient = runCommand(cmd).rstrip().decode("UTF-8") # send notification if tests fail, log file not found, or fixed - if (args.teststatus == 'failed'): + if args.teststatus == "failed": - subject = "FAILED: SUNDIALS "+args.testname+" failed regression tests" + subject = "FAILED: SUNDIALS " + args.testname + " failed regression tests" print("Tests failed, sending notification to", recipient) sendEmail(recipient, subject, logfile) - elif (args.teststatus == 'fixed'): + elif args.teststatus == "fixed": - subject = "FIXED: SUNDIALS "+args.testname+" passed regression tests" + subject = "FIXED: SUNDIALS " + args.testname + " passed regression tests" print("Tests fixed, sending notification to", recipient) sendEmail(recipient, subject, logfile) @@ -94,7 +99,7 @@ def runCommand(cmd): cmdout = subprocess.check_output(cmd, shell=True) - return(cmdout) + return cmdout # @@ -116,13 +121,13 @@ def sendEmail(recipient, subject, message): sender = "SUNDIALS.suntest@llnl.gov" # email settings - msg['Subject'] = subject - msg['From'] = sender - msg['To'] = recipient + msg["Subject"] = subject + msg["From"] = sender + msg["To"] = recipient # Send the message via our own SMTP server, but don't include the # envelope header. - s = smtplib.SMTP('smtp.llnl.gov') + s = smtplib.SMTP("smtp.llnl.gov") s.send_message(msg) s.quit() @@ -130,5 +135,5 @@ def sendEmail(recipient, subject, message): # # just run the main routine # -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/test/test_install.py b/test/test_install.py index 7b2dbe06e6..4dab1c4513 100755 --- a/test/test_install.py +++ b/test/test_install.py @@ -19,6 +19,7 @@ # main routine # ----------------------------------------------------------------------------- + def main(): import argparse @@ -28,29 +29,29 @@ def main(): import subprocess parser = argparse.ArgumentParser( - description='Find and build installed examples', - formatter_class=argparse.RawTextHelpFormatter) + description="Find and build installed examples", + formatter_class=argparse.RawTextHelpFormatter, + ) - parser.add_argument('directory', type=str, - help='Directory to search for build files') + parser.add_argument( + "directory", type=str, help="Directory to search for build files" + ) - parser.add_argument('--cmake', action='store_true', - help='CMake build') + parser.add_argument("--cmake", action="store_true", help="CMake build") - parser.add_argument('--test', action='store_true', - help='Test builds') + parser.add_argument("--test", action="store_true", help="Test builds") - parser.add_argument('--clean', action='store_true', - help='Clean builds') + parser.add_argument("--clean", action="store_true", help="Clean builds") - parser.add_argument('--regex', type=str, - help='Regular expression for filtering example directories') + parser.add_argument( + "--regex", type=str, help="Regular expression for filtering example directories" + ) - parser.add_argument('-v', '--verbose', action='count', default=0, - help='Verbose output') + parser.add_argument( + "-v", "--verbose", action="count", default=0, help="Verbose output" + ) - parser.add_argument('--failfast', action='store_true', - help='Stop on first failure') + parser.add_argument("--failfast", action="store_true", help="Stop on first failure") # parse command line args args = parser.parse_args() @@ -80,7 +81,7 @@ def main(): # filter files if args.regex: regex = re.compile(args.regex) - buildfiles = [ bf for bf in buildfiles if re.search(regex, bf) ] + buildfiles = [bf for bf in buildfiles if re.search(regex, bf)] if args.verbose > 0: print(f"Total files (filtered): {len(buildfiles)}") if args.verbose > 2: @@ -102,9 +103,12 @@ def main(): # clean and move on if args.clean: - ret = subprocess.call('make clean', shell=True, - stdout=subprocess.DEVNULL, - stderr=subprocess.DEVNULL) + ret = subprocess.call( + "make clean", + shell=True, + stdout=subprocess.DEVNULL, + stderr=subprocess.DEVNULL, + ) # return to original directory os.chdir(cwd) continue @@ -112,11 +116,14 @@ def main(): # confgure cmake if necessary configfail = False if args.cmake: - if os.path.isfile('Makefile'): - os.remove('Makefile') - ret = subprocess.call('cmake -DCMAKE_VERBOSE_MAKEFILE=ON .', - shell=True, stdout=subprocess.DEVNULL, - stderr=subprocess.DEVNULL) + if os.path.isfile("Makefile"): + os.remove("Makefile") + ret = subprocess.call( + "cmake -DCMAKE_VERBOSE_MAKEFILE=ON .", + shell=True, + stdout=subprocess.DEVNULL, + stderr=subprocess.DEVNULL, + ) if args.verbose > 0: print(f" Config return: {ret}") if ret != 0: @@ -126,9 +133,9 @@ def main(): # make examples buildfail = False if not configfail: - ret = subprocess.call('make', shell=True, - stdout=subprocess.DEVNULL, - stderr=subprocess.DEVNULL) + ret = subprocess.call( + "make", shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL + ) if args.verbose > 0: print(f" Build return: {ret}") if ret != 0: @@ -138,9 +145,12 @@ def main(): # test examples testfail = False if not configfail and not buildfail and args.test: - ret = subprocess.call('make test', shell=True, - stdout=subprocess.DEVNULL, - stderr=subprocess.DEVNULL) + ret = subprocess.call( + "make test", + shell=True, + stdout=subprocess.DEVNULL, + stderr=subprocess.DEVNULL, + ) if args.verbose > 0: print(f" Test return: {ret}") if ret != 0: @@ -167,10 +177,12 @@ def main(): else: print("All builds successful.") + # ----------------------------------------------------------------------------- # run the main routine # ----------------------------------------------------------------------------- -if __name__ == '__main__': +if __name__ == "__main__": import sys + sys.exit(main()) diff --git a/tools/log_example.py b/tools/log_example.py index fdd8f131d9..a9b908a6bb 100755 --- a/tools/log_example.py +++ b/tools/log_example.py @@ -16,6 +16,7 @@ # logs produced by the SUNLogger for adaptive integrators. # ----------------------------------------------------------------------------- + def main(): import argparse @@ -24,41 +25,57 @@ def main(): from suntools import logs as sunlog - parser = argparse.ArgumentParser(description='Plots') - - parser.add_argument('logfiles', type=str, nargs='+', - help='Log files to plot') - - parser.add_argument('--val', type=str, default='h', - choices=['h', 'q', 'dsm'], - help='Value to plot (default: %(default)s)') - - parser.add_argument('--step-range', type=int, nargs=2, - default=None, metavar=('LOWER_BOUND', 'UPPER_BOUND'), - help='Step range to plot') - - parser.add_argument('--time-range', type=float, nargs=2, - default=None, metavar=('LOWER_BOUND', 'UPPER_BOUND'), - help='Time range to plot') - - parser.add_argument('--step-number', action='store_true', - help='Plot value vs step number') - - parser.add_argument('--scatter', action='store_true', - help='Create scatter plot') - - parser.add_argument('--logx', action='store_true', - help='Use log scale for x-axis') - - parser.add_argument('--logy', action='store_true', - help='Use log scale for y-axis') - - parser.add_argument('--labels', type=str, nargs='+', - help='Plot labels') - - parser.add_argument('--save', type=str, nargs='?', const='fig.pdf', - default=None, metavar='FILE_NAME', - help='''Save figure to file''') + parser = argparse.ArgumentParser(description="Plots") + + parser.add_argument("logfiles", type=str, nargs="+", help="Log files to plot") + + parser.add_argument( + "--val", + type=str, + default="h", + choices=["h", "q", "dsm"], + help="Value to plot (default: %(default)s)", + ) + + parser.add_argument( + "--step-range", + type=int, + nargs=2, + default=None, + metavar=("LOWER_BOUND", "UPPER_BOUND"), + help="Step range to plot", + ) + + parser.add_argument( + "--time-range", + type=float, + nargs=2, + default=None, + metavar=("LOWER_BOUND", "UPPER_BOUND"), + help="Time range to plot", + ) + + parser.add_argument( + "--step-number", action="store_true", help="Plot value vs step number" + ) + + parser.add_argument("--scatter", action="store_true", help="Create scatter plot") + + parser.add_argument("--logx", action="store_true", help="Use log scale for x-axis") + + parser.add_argument("--logy", action="store_true", help="Use log scale for y-axis") + + parser.add_argument("--labels", type=str, nargs="+", help="Plot labels") + + parser.add_argument( + "--save", + type=str, + nargs="?", + const="fig.pdf", + default=None, + metavar="FILE_NAME", + help="""Save figure to file""", + ) # parse command line args args = parser.parse_args() @@ -72,19 +89,31 @@ def main(): log = sunlog.log_file_to_list(lf) # get successful step data - steps_s, times_s, vals_s = sunlog.get_history(log, args.val, 'success', - step_range=args.step_range, - time_range=args.time_range) + steps_s, times_s, vals_s = sunlog.get_history( + log, + args.val, + "success", + step_range=args.step_range, + time_range=args.time_range, + ) # get data for error test failures - steps_etf, times_etf, vals_etf = sunlog.get_history(log, args.val, 'failed error test', - step_range=args.step_range, - time_range=args.time_range) + steps_etf, times_etf, vals_etf = sunlog.get_history( + log, + args.val, + "failed error test", + step_range=args.step_range, + time_range=args.time_range, + ) # get data for solver failures - steps_sf, times_sf, vals_sf = sunlog.get_history(log, args.val, 'failed solve', - step_range=args.step_range, - time_range=args.time_range) + steps_sf, times_sf, vals_sf = sunlog.get_history( + log, + args.val, + "failed solve", + step_range=args.step_range, + time_range=args.time_range, + ) # plot log data if args.step_number: @@ -97,37 +126,39 @@ def main(): x_sf = times_sf if len(args.logfiles) == 1: - s_color = 'green' - etf_color = 'red' - sf_color = 'darkorange' + s_color = "green" + etf_color = "red" + sf_color = "darkorange" else: s_color = colors(idx) etf_color = s_color sf_color = s_color if args.labels: - s_label = f'{args.labels[idx]} successful' - etf_label = f'{args.labels[idx]} error test failed' - sf_label = f'{args.labels[idx]} solver failed' + s_label = f"{args.labels[idx]} successful" + etf_label = f"{args.labels[idx]} error test failed" + sf_label = f"{args.labels[idx]} solver failed" else: - s_label = 'successful' - etf_label = 'error test failed' - sf_label = 'solver failed' + s_label = "successful" + etf_label = "error test failed" + sf_label = "solver failed" # plot successful data if args.scatter: - ax.scatter(x_s, vals_s, color=s_color, marker='o', label=s_label, - zorder=0.1) + ax.scatter( + x_s, vals_s, color=s_color, marker="o", label=s_label, zorder=0.1 + ) else: - ax.plot(x_s, vals_s, color=s_color, marker='.', label=s_label, - zorder=0.1) + ax.plot(x_s, vals_s, color=s_color, marker=".", label=s_label, zorder=0.1) # always add failures as scatter plot - ax.scatter(x_etf, vals_etf, color=etf_color, marker='x', label=etf_label, - zorder=0.2) + ax.scatter( + x_etf, vals_etf, color=etf_color, marker="x", label=etf_label, zorder=0.2 + ) - ax.scatter(x_sf, vals_sf, color=sf_color, marker='d', label=sf_label, - zorder=0.2) + ax.scatter( + x_sf, vals_sf, color=sf_color, marker="d", label=sf_label, zorder=0.2 + ) if args.logx: ax.set_xscale("log") @@ -139,25 +170,26 @@ def main(): else: ax.set_xlabel("time") - if args.val == 'h': + if args.val == "h": ax.set_ylabel("step size") - elif args.val == 'q': + elif args.val == "q": ax.set_ylabel("order") ax.yaxis.set_major_locator(tik.MaxNLocator(integer=True)) - elif args.val == 'dsm': + elif args.val == "dsm": ax.set_ylabel("LTE estimate") - ax.legend(loc='best') + ax.legend(loc="best") - ax.grid(alpha=0.3, linestyle='--') + ax.grid(alpha=0.3, linestyle="--") if args.save: - plt.savefig(args.save, bbox_inches='tight') + plt.savefig(args.save, bbox_inches="tight") else: plt.show() # run the main routine -if __name__ == '__main__': +if __name__ == "__main__": import sys + sys.exit(main()) diff --git a/tools/log_example_mri.py b/tools/log_example_mri.py index ef138020c7..f7dda55b38 100755 --- a/tools/log_example_mri.py +++ b/tools/log_example_mri.py @@ -16,6 +16,7 @@ # logs produced by the SUNLogger with an MRI method. # ----------------------------------------------------------------------------- + def main(): import argparse @@ -23,25 +24,41 @@ def main(): from suntools import logs as sunlog - parser = argparse.ArgumentParser(description='Plots') - - parser.add_argument('logfile', type=str, - help='Log file to plot') - - parser.add_argument('--step-number', action='store_true', - help='Plot value vs step number') - - parser.add_argument('--step-range', type=int, nargs=2, - default=None, metavar=('LOWER_BOUND', 'UPPER_BOUND'), - help='Step range to plot') - - parser.add_argument('--time-range', type=float, nargs=2, - default=None, metavar=('LOWER_BOUND', 'UPPER_BOUND'), - help='Time range to plot') - - parser.add_argument('--save', type=str, nargs='?', const='fig.pdf', - default=None, metavar='FILE_NAME', - help='''Save figure to file''') + parser = argparse.ArgumentParser(description="Plots") + + parser.add_argument("logfile", type=str, help="Log file to plot") + + parser.add_argument( + "--step-number", action="store_true", help="Plot value vs step number" + ) + + parser.add_argument( + "--step-range", + type=int, + nargs=2, + default=None, + metavar=("LOWER_BOUND", "UPPER_BOUND"), + help="Step range to plot", + ) + + parser.add_argument( + "--time-range", + type=float, + nargs=2, + default=None, + metavar=("LOWER_BOUND", "UPPER_BOUND"), + help="Time range to plot", + ) + + parser.add_argument( + "--save", + type=str, + nargs="?", + const="fig.pdf", + default=None, + metavar="FILE_NAME", + help="""Save figure to file""", + ) # parse command line args args = parser.parse_args() @@ -50,9 +67,9 @@ def main(): log = sunlog.log_file_to_list(args.logfile) # plot log data - steps, times, vals = sunlog.get_history(log, 'h', - step_range=args.step_range, - time_range=args.time_range) + steps, times, vals = sunlog.get_history( + log, "h", step_range=args.step_range, time_range=args.time_range + ) if args.step_number: x = steps @@ -61,7 +78,7 @@ def main(): fig, ax = plt.subplots() - ax.scatter(x, vals, color='green', marker='o') + ax.scatter(x, vals, color="green", marker="o") if args.step_number: ax.set_xlabel("step") @@ -69,15 +86,16 @@ def main(): ax.set_xlabel("time") ax.set_ylabel("step size") - ax.grid(alpha=0.3, linestyle='--') + ax.grid(alpha=0.3, linestyle="--") if args.save: - plt.savefig(args.save, bbox_inches='tight') + plt.savefig(args.save, bbox_inches="tight") else: plt.show() # run the main routine -if __name__ == '__main__': +if __name__ == "__main__": import sys + sys.exit(main()) diff --git a/tools/suntools/__init__.py b/tools/suntools/__init__.py index 87ae34c366..90d13d34fc 100644 --- a/tools/suntools/__init__.py +++ b/tools/suntools/__init__.py @@ -1,4 +1,3 @@ - """ This is a Python library of utilities SUNDIALS developer may find useful. Right now it consists of the following modules: diff --git a/tools/suntools/csv.py b/tools/suntools/csv.py index 5739d6796b..f5135ad0f8 100644 --- a/tools/suntools/csv.py +++ b/tools/suntools/csv.py @@ -15,6 +15,7 @@ # Function to parse SUNDIALS CSV output files # ----------------------------------------------------------------------------- + def num(s): """Try to convert a string to an int or float""" @@ -42,7 +43,7 @@ def keys(filename): """ # Get keys from the first row - with open(filename, mode='r') as csvfile: + with open(filename, mode="r") as csvfile: keys = csvfile.readline().split(",")[::2] return keys @@ -73,7 +74,7 @@ def read(filename): csv_dict[k] = [] # Get values from each row - with open(filename, mode='r') as csvfile: + with open(filename, mode="r") as csvfile: reader = csv.reader(csvfile) for row in reader: values = row[1::2]