From 6081f39c6231c365ac3db9b1e168b483a0d946d1 Mon Sep 17 00:00:00 2001 From: Tuomas Koskela Date: Fri, 11 May 2018 15:55:29 +0300 Subject: [PATCH 001/602] Created a mini-app for testing refined grids. Contains cpp code that initializes and refines a grid, and a python code that sorts the refined grid into pencils that is meant for testing the algorithm. The cpp sorting code will be added soon. --- mini-apps/simple-grid-test/Makefile | 16 ++ mini-apps/simple-grid-test/grid_test.cpp | 82 ++++++ .../simple-grid-test/sort_refined_ids.py | 250 ++++++++++++++++++ 3 files changed, 348 insertions(+) create mode 100644 mini-apps/simple-grid-test/Makefile create mode 100644 mini-apps/simple-grid-test/grid_test.cpp create mode 100644 mini-apps/simple-grid-test/sort_refined_ids.py diff --git a/mini-apps/simple-grid-test/Makefile b/mini-apps/simple-grid-test/Makefile new file mode 100644 index 000000000..c05eede04 --- /dev/null +++ b/mini-apps/simple-grid-test/Makefile @@ -0,0 +1,16 @@ +ARCH=$(VLASIATOR_ARCH) +include ../../MAKE/Makefile.${ARCH} + +FLAGS = -W -Wall -Wextra -pedantic -std=c++11 -O0 +INCLUDES = ${INC_DCCRG} -L$/usr/lib/x86_64-linux-gnu -lboost_program_options -I$/usr/include/boost -L/home/tkoskela/lib/zoltan/Zoltan_v3.83/build/lib -lzoltan -I/home/tkoskela/lib/zoltan/Zoltan_v3.83/build/include + +default: grid_test + +clean: + rm -rf *.o grid_test + +grid_test.o: grid_test.cpp + ${CMP} ${FLAGS} ${INCLUDES} -c $^ + +grid_test: grid_test.o + $(CMP) ${FLAGS} $^ ${INCLUDES} -o $@ diff --git a/mini-apps/simple-grid-test/grid_test.cpp b/mini-apps/simple-grid-test/grid_test.cpp new file mode 100644 index 000000000..47fea8e8d --- /dev/null +++ b/mini-apps/simple-grid-test/grid_test.cpp @@ -0,0 +1,82 @@ +#include "dccrg.hpp" +#include "mpi.h" +#include +#include "fstream" + +struct grid_data { + + int value = 0; + + std::tuple get_mpi_datatype() + { + return std::make_tuple(this, 0, MPI_BYTE); + } + +}; + +int main(int argc, char* argv[]) { + + if (MPI_Init(&argc, &argv) != MPI_SUCCESS) { + // cerr << "Coudln't initialize MPI." << endl; + abort(); + } + + MPI_Comm comm = MPI_COMM_WORLD; + + int rank = 0, comm_size = 0; + MPI_Comm_rank(comm, &rank); + MPI_Comm_size(comm, &comm_size); + + dccrg::Dccrg grid; + + const int xDim = 16; + const int yDim = 16; + const int zDim = 16; + const std::array grid_size = {{xDim,yDim,zDim}}; + + grid.initialize(grid_size, comm, "RANDOM", 1); + + grid.balance_load(); + + bool doRefine = true; + const std::array refinementIds = {{4,22,29,4104}}; + if(doRefine) { + for(uint i = 0; i < refinementIds.size(); i++) { + if(refinementIds[i] > 0) { + grid.refine_completely(refinementIds[i]); + grid.stop_refining(); + } + } + } + + grid.balance_load(); + + // std::vector cells = grid.get_cells() + + auto cells = grid.cells; + sort(cells.begin(), cells.end()); + + std::cout << "Grid size at 0 refinement is " << xDim << " x " << yDim << " x " << zDim << std::endl; + for (const auto& cell: cells) { + std::cout << "Data of cell " << cell.id << " is stored at " << cell.data << std::endl; + } + + std::ofstream outfile; + + grid.write_vtk_file("test.vtk"); + + outfile.open("test.vtk", std::ofstream::app); + // write each cells id + outfile << "CELL_DATA " << cells.size() << std::endl; + outfile << "SCALARS id int 1" << std::endl; + outfile << "LOOKUP_TABLE default" << std::endl; + for (const auto& cell: cells) { + outfile << cell.id << std::endl; + } + outfile.close(); + + MPI_Finalize(); + + return 0; + +} diff --git a/mini-apps/simple-grid-test/sort_refined_ids.py b/mini-apps/simple-grid-test/sort_refined_ids.py new file mode 100644 index 000000000..9fa9716cf --- /dev/null +++ b/mini-apps/simple-grid-test/sort_refined_ids.py @@ -0,0 +1,250 @@ +import numpy as np +import pdb + +def findParent(id, gridSize, debug): + + nIndicesInRefLvl = list() + for refLvl in np.arange(1,10): + nIndicesInRefLvl.append(gridSize * 2 ** ((refLvl - 1) * 3)) + + for i,_ in enumerate(nIndicesInRefLvl): + if id <= sum(nIndicesInRefLvl[:i+1]): + refLvl = i + break + if refLvl == 0: + if id > 0: + #print("cell {:3d}".format(id)+" is not refined") + pass + + return 0, refLvl + + id2 = id - sum(nIndicesInRefLvl[:refLvl]) + ix = (id2 - 1) % (xdim * 2 ** refLvl) + 1 + iy = (id2 - 1) / (xdim * 2 ** refLvl) % (ydim * 2 ** refLvl) + 1 + iz = (id2 - 1) / (xdim * 2 ** refLvl * ydim * 2 ** refLvl) + 1 + parentId = (int(np.ceil(iz / 2.0) - 1) * xdim * 2 ** (refLvl - 1) * ydim * 2 ** (refLvl - 1) + + int(np.ceil(iy / 2.0) - 1) * xdim * 2 ** (refLvl - 1) + + int(np.ceil(ix / 2.0)) + + sum(nIndicesInRefLvl[:refLvl-1])) + if debug: + print("id = {:3d}".format(id)+", id2 = {:3d}".format(id2)+ + ", col = {:2d}".format(ix)+", row = {:2d}".format(iy)+ + ", plane = {:2d}".format(iz)+", parentId = {:2d}".format(parentId)+ + ", refLvl = {:1d}".format(refLvl)) + else: + #print("cell {:3d}".format(id)+" is the child of cell {:2d}".format(parentId)) + pass + + return parentId, refLvl + +def getChildren(children, parentIds, up = True, left = True): + + down = not up + right = not left + + N = 8 + + myChildren = list() + for id in parentIds: + + if up and left: + i1 = 0 + i2 = 1 + if down and left: + i1 = 2 + i2 = 3 + if up and right: + i1 = 4 + i2 = 5 + if down and right: + i1 = 6 + i2 = 7 + + if id in children.keys(): + myChildren.extend(children[id][i1::N]) + myChildren.extend(children[id][i2::N]) + else: + # If no children were found, return the parent + myChildren.append(id) + + return myChildren + + +debug = False + +filename = "grid_test.out" +fh = open(filename) +lines = fh.readlines() +fh.close() + +ids = list() + +for i,line in enumerate(lines): + #print(line[:-1]) + words = line.split() + if i == 0: + xdim = int(words[6]) + ydim = int(words[8]) + zdim = int(words[10]) + else: + ids.append(int(words[3])) + +gridSize = xdim*ydim*zdim + +#debug = True + +parents = dict() +children = dict() +refLvls = dict() +hasChildren = list() + +for id in ids: + + # Find the parent of cell id + parentId, refLvl = findParent(id,gridSize,debug) + + parents[id] = parentId + refLvls[id] = refLvl + + # Parents are not stored in the id array by default, let's add them + # For completeness + if not parentId in ids and parentId > 0: + ids.append(parentId) + + # Make a list of cells that have been refined at least once + if parentId > 0: + if not parentId in hasChildren: + children[parentId] = list() + hasChildren.append(parentId) + + # Make a list of children for each cell + children[parentId].append(id) + +for key in children.keys(): + children[key].sort() +ids.sort() + +# Second pass to count how many times each cell has been refined +isRefined = dict() +for id in ids: + isRefined[id] = 0 + if refLvls[id] > 0: + parentId = parents[id] + while parentId is not 0: + isRefined[parentId] = refLvls[id] - refLvls[parentId] + parentId = parents[parentId] + +# Begin sorting, select the dimension by which we sort +dimension = 0 + +# Sort the mesh ids using Sebastians c++ code +if dimension == 0: + + pass + +if dimension == 1: + + pass + +if dimension == 2: + + pass + +# Create a list of unrefined cells +sortedUnrefinedIds = dict() +for id in isRefined.keys(): + if refLvls[id] == 0: + sortedUnrefinedIds[id] = isRefined[id] + +# Create pencils of unrefined cells, store the level of refinement for each cell +unrefinedPencils = list() +for iz in np.arange(zdim): + for iy in np.arange(ydim): + ibeg = iz * zdim + iy * ydim + iend = iz * zdim + (iy + 1) * ydim + unrefinedPencils.append({'ids' : sortedUnrefinedIds.keys()[ibeg:iend], + 'refLvl' : sortedUnrefinedIds.values()[ibeg:iend]}) + +# Refine the unrefined pencils that contain refined cells +#print +#print('*** Refining ***') +#print + +pencils = list() +parentIds = list() +up = True +left = True + +# Loop over the unrefined pencils +for row,unrefinedPencil in enumerate(unrefinedPencils): + # Refine each pencil according to its max refinement level, then remove duplicates + maxRefLvl = max(unrefinedPencil['refLvl']) + # We are creating pencils along the 'x' axis, loop over the 'y' and 'z' axes + # Assuming the refinement has been done equally in each dimension + for i in np.arange(2 ** maxRefLvl): + for j in np.arange(2 ** maxRefLvl): + #print('Starting new pencil, row = {:1d}, subrow = {:1d}, column = {:1d}'.format(row,i,j)) + pencilIds = list() + # Walk along the unrefined pencil + for ix in np.arange(xdim): + maxLocalRefLvl = unrefinedPencil['refLvl'][ix] + #print(' ix = {:1d}, maxLocalRefLvl = {:1d}'.format(ix,maxLocalRefLvl)) + # Walk down the refinement tree of the parent cell + parentIds.append(unrefinedPencil['ids'][ix]) + #offsets = np.zeros(maxLocalRefLvl, dtype = int) + offset = 0 + nUnRefined = 0 + iRefined = 0 + for iref in np.arange(max(maxLocalRefLvl,1)): + + # Logic for selecting cells for the pencil among the child cells + left = ( (j / 2 ** (maxRefLvl - iref - 1)) % 2 == 0 ) + up = ( (i / 2 ** (maxRefLvl - iref - 1)) % 2 == 0 ) + + #print(' iref = {:1d}, up = {:b}, left = {:b}'.format(iref,up,left)) + # The function getChildren returns the children of the parent, or the + # parent itself if it has no children + cells = getChildren(children, parentIds, up, left) + #print(cells) + parentIds = list() + + offset = nUnRefined - iRefined + for k,icell in enumerate(cells): + + #print(' icell = {:3d}').format(icell) + + # Add cells that do not have further refinement to the pencil + if isRefined[icell] == 0: + # Count the number of unrefined cells that have been added during + # this iteration + nUnRefined += 1 + # The offset is the number of unrefined cells from the last + # iteration minus the index of the refined cell. + if offset > 0: + pencilIds.insert(-offset,icell) + else: + pencilIds.append(icell) + else: + # Store the index of the refined cell + iRefined = k + + # Add to cells to be processed on the next refinement level + parentIds.append(icell) + + parentIds = list() + + # Add to the list of pencils if ids are not a duplicate of the previous + # pencil. This gets rid of most duplicates, but not all of them. Needs fixing. + if len(pencils) == 0 or not pencilIds == pencils[-1]['ids']: + pencils.append({'ids' : pencilIds, + 'length': len(pencilIds), + 'width' : 2.0 ** -max(unrefinedPencil['refLvl']), + 'row' : row, + 'subrow' : i, + 'subcolumn' : j}) + else: + pass + #print('Removing duplicate pencil') + + + From d04699d118dbb861dc2ec4e93b343868054a309f Mon Sep 17 00:00:00 2001 From: Tuomas Koskela Date: Fri, 11 May 2018 15:59:48 +0300 Subject: [PATCH 002/602] Added Makefile for my laptop --- MAKE/Makefile.appa | 96 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 96 insertions(+) create mode 100644 MAKE/Makefile.appa diff --git a/MAKE/Makefile.appa b/MAKE/Makefile.appa new file mode 100644 index 000000000..02c84110a --- /dev/null +++ b/MAKE/Makefile.appa @@ -0,0 +1,96 @@ +# -*- mode: makefile -*- +CMP = mpic++ +LNK = mpic++ + +#======== Vectorization ========== +#Set vector backend type for vlasov solvers, sets precision and length. +#NOTE this has to have the same precision as the distribution function define (DISTRIBUTION_FP_PRECISION) +#Options: +# AVX: VEC4D_AGNER, VEC4F_AGNER, VEC8F_AGNER +# AVX512: VEC8D_AGNER, VEC16F_AGNER +# Fallback: VEC4D_FALLBACK, VEC4F_FALLBACK, VEC8F_FALLBACK + +ifeq ($(DISTRIBUTION_FP_PRECISION),SPF) +#Single-precision + VECTORCLASS = VEC8F_AGNER +else +#Double-precision + VECTORCLASS = VEC4D_AGNER +endif + +#======== PAPI ========== +#Add PAPI_MEM define to use papi to report memory consumption? +CXXFLAGS += -DPAPI_MEM + + +#======== Allocator ========= +#Use jemalloc instead of system malloc to reduce memory fragmentation? https://github.com/jemalloc/jemalloc +#Configure jemalloc with --with-jemalloc-prefix=je_ when installing it +CXXFLAGS += -DUSE_JEMALLOC -DJEMALLOC_NO_DEMANGLE + + +#======= Compiler and compilation flags ========= +# NOTES on compiler flags: +# CXXFLAGS is for compiler flags, they are always used +# MATHFLAGS are for special math etc. flags, these are only applied on solver functions +# LDFLAGS flags for linker + +#-DNO_WRITE_AT_ALL: Define to disable write at all to +# avoid memleak (much slower IO) +#-DMPICH_IGNORE_CXX_SEEK: Ignores some multiple definition +# errors that come up when using +# mpi.h in c++ on Cray + +CXXFLAGS += -DMPICH_IGNORE_CXX_SEEK + +FLAGS = + +#GNU flags: +CC_BRAND = gcc +CC_BRAND_VERSION = 5.4.0 +CXXFLAGS += -O3 -fopenmp -funroll-loops -std=c++11 -W -Wall -Wno-unused -fabi-version=0 -mavx2 +testpackage: CXXFLAGS = -O2 -fopenmp -funroll-loops -std=c++0x -fabi-version=0 -mavx + +MATHFLAGS = -ffast-math +LDFLAGS = +LIB_MPI = -lgomp + +# BOOST_VERSION = current trilinos version +# ZOLTAN_VERSION = current trilinos verson + +#======== Libraries =========== + +MPT_VERSION = 7.2.6 +JEMALLOC_VERSION = 5.0.1 +LIBRARY_PREFIX = /home/tkoskela/lib + + +#compiled libraries +INC_BOOST = -I$/usr/include/boost +LIB_BOOST = -L$/usr/lib/x86_64-linux-gnu -lboost_program_options + +INC_ZOLTAN = -I$(LIBRARY_PREFIX)/zoltan/Zoltan_v3.83/build/include +#LIB_ZOLTAN = -I$(LIBRARY_PREFIX)/zoltan/Zoltan_v3.83/build/lib -lzoltan +LIB_ZOLTAN = /home/tkoskela/lib/zoltan/Zoltan_v3.83/build/lib/libzoltan.a + +INC_JEMALLOC = -I$(LIBRARY_PREFIX)/jemalloc/include +LIB_JEMALLOC = -L$(LIBRARY_PREFIX)/jemalloc/lib -ljemalloc + +INC_VLSV = -I$(LIBRARY_PREFIX)/vlsv +LIB_VLSV = -L$(LIBRARY_PREFIX)/vlsv -lvlsv + +INC_PROFILE = -I$(LIBRARY_PREFIX)/phiprof/phiprof-2.0-beta/include +LIB_PROFILE = -L$(LIBRARY_PREFIX)/phiprof/phiprof-2.0-beta/lib -lphiprof + +INC_PAPI = -I$(LIBRARY_PREFIX)/papi/include +LIB_PAPI = -L$(LIBRARY_PREFIX)/papi/lib -lpapi + +#header libraries + +INC_EIGEN = -I$(LIBRARY_PREFIX)/eigen/ +INC_DCCRG = -I$(LIBRARY_PREFIX)/dccrg/ +INC_VECTORCLASS = -I$(LIBRARY_PREFIX)/vectorclass + + + + From 7c5303e9a22176345d3bf8e3be6cbed81b5237da Mon Sep 17 00:00:00 2001 From: Tuomas Koskela Date: Mon, 14 May 2018 14:10:20 +0300 Subject: [PATCH 003/602] Fixed a bug in the creation of unrefined pencils when zdim > 1. --- .../simple-grid-test/sort_refined_ids.py | 55 ++++++++++++------- 1 file changed, 34 insertions(+), 21 deletions(-) diff --git a/mini-apps/simple-grid-test/sort_refined_ids.py b/mini-apps/simple-grid-test/sort_refined_ids.py index 9fa9716cf..a78dec442 100644 --- a/mini-apps/simple-grid-test/sort_refined_ids.py +++ b/mini-apps/simple-grid-test/sort_refined_ids.py @@ -7,13 +7,13 @@ def findParent(id, gridSize, debug): for refLvl in np.arange(1,10): nIndicesInRefLvl.append(gridSize * 2 ** ((refLvl - 1) * 3)) - for i,_ in enumerate(nIndicesInRefLvl): + for i in np.arange(len(nIndicesInRefLvl)): if id <= sum(nIndicesInRefLvl[:i+1]): refLvl = i break if refLvl == 0: if id > 0: - #print("cell {:3d}".format(id)+" is not refined") + print("cell {:3d}".format(id)+" is not refined") pass return 0, refLvl @@ -32,7 +32,7 @@ def findParent(id, gridSize, debug): ", plane = {:2d}".format(iz)+", parentId = {:2d}".format(parentId)+ ", refLvl = {:1d}".format(refLvl)) else: - #print("cell {:3d}".format(id)+" is the child of cell {:2d}".format(parentId)) + print("cell {:3d}".format(id)+" is the child of cell {:2d}".format(parentId)) pass return parentId, refLvl @@ -120,6 +120,8 @@ def getChildren(children, parentIds, up = True, left = True): # Make a list of children for each cell children[parentId].append(id) +# Sort the id and children lists, this is needed when adding cells to pencils +# to get the order right for key in children.keys(): children[key].sort() ids.sort() @@ -137,18 +139,29 @@ def getChildren(children, parentIds, up = True, left = True): # Begin sorting, select the dimension by which we sort dimension = 0 -# Sort the mesh ids using Sebastians c++ code -if dimension == 0: - - pass - -if dimension == 1: +sortedIds = list() +for id in ids: + # Sort the mesh ids using Sebastians c++ code + if dimension == 0: - pass + idMapped = id + + if dimension == 1: + + x_index = id % xdim + y_index = (id / xdim) % ydim + idMapped = id - (x_index + y_index * xdim) + y_index + x_index * ydim + + if dimension == 2: + + x_index = id % xdim + y_index = (id / xdim) % ydim + z_index = (id / (xdim * ydim)) + idMapped = z_index + y_index * zdim + x_index * ydim * zdim -if dimension == 2: + sortedIds.append((idMapped, id)) - pass +sortedIds.sort() # Create a list of unrefined cells sortedUnrefinedIds = dict() @@ -160,15 +173,15 @@ def getChildren(children, parentIds, up = True, left = True): unrefinedPencils = list() for iz in np.arange(zdim): for iy in np.arange(ydim): - ibeg = iz * zdim + iy * ydim - iend = iz * zdim + (iy + 1) * ydim + ibeg = iz * xdim * ydim + iy * ydim + iend = iz * xdim * ydim + (iy + 1) * ydim unrefinedPencils.append({'ids' : sortedUnrefinedIds.keys()[ibeg:iend], 'refLvl' : sortedUnrefinedIds.values()[ibeg:iend]}) # Refine the unrefined pencils that contain refined cells -#print -#print('*** Refining ***') -#print +print +print('*** Refining ***') +print pencils = list() parentIds = list() @@ -183,12 +196,12 @@ def getChildren(children, parentIds, up = True, left = True): # Assuming the refinement has been done equally in each dimension for i in np.arange(2 ** maxRefLvl): for j in np.arange(2 ** maxRefLvl): - #print('Starting new pencil, row = {:1d}, subrow = {:1d}, column = {:1d}'.format(row,i,j)) + print('Starting new pencil, row = {:1d}, subrow = {:1d}, column = {:1d}'.format(row,i,j)) pencilIds = list() # Walk along the unrefined pencil for ix in np.arange(xdim): maxLocalRefLvl = unrefinedPencil['refLvl'][ix] - #print(' ix = {:1d}, maxLocalRefLvl = {:1d}'.format(ix,maxLocalRefLvl)) + print(' ix = {:1d}, maxLocalRefLvl = {:1d}'.format(ix,maxLocalRefLvl)) # Walk down the refinement tree of the parent cell parentIds.append(unrefinedPencil['ids'][ix]) #offsets = np.zeros(maxLocalRefLvl, dtype = int) @@ -201,7 +214,7 @@ def getChildren(children, parentIds, up = True, left = True): left = ( (j / 2 ** (maxRefLvl - iref - 1)) % 2 == 0 ) up = ( (i / 2 ** (maxRefLvl - iref - 1)) % 2 == 0 ) - #print(' iref = {:1d}, up = {:b}, left = {:b}'.format(iref,up,left)) + print(' iref = {:1d}, up = {:b}, left = {:b}'.format(iref,up,left)) # The function getChildren returns the children of the parent, or the # parent itself if it has no children cells = getChildren(children, parentIds, up, left) @@ -243,8 +256,8 @@ def getChildren(children, parentIds, up = True, left = True): 'subrow' : i, 'subcolumn' : j}) else: + print('Removing duplicate pencil') pass - #print('Removing duplicate pencil') From b1615254e1ee75b7551af7f864fff475ec680148 Mon Sep 17 00:00:00 2001 From: Tuomas Koskela Date: Tue, 15 May 2018 11:45:20 +0300 Subject: [PATCH 004/602] Fix for getChildren in dimensions 1 and 2. --- .../simple-grid-test/sort_refined_ids.py | 119 ++++++++++++------ 1 file changed, 82 insertions(+), 37 deletions(-) diff --git a/mini-apps/simple-grid-test/sort_refined_ids.py b/mini-apps/simple-grid-test/sort_refined_ids.py index a78dec442..434542589 100644 --- a/mini-apps/simple-grid-test/sort_refined_ids.py +++ b/mini-apps/simple-grid-test/sort_refined_ids.py @@ -37,7 +37,7 @@ def findParent(id, gridSize, debug): return parentId, refLvl -def getChildren(children, parentIds, up = True, left = True): +def getChildren(children, parentIds, dimension = 0, up = True, left = True): down = not up right = not left @@ -46,19 +46,48 @@ def getChildren(children, parentIds, up = True, left = True): myChildren = list() for id in parentIds: - - if up and left: - i1 = 0 - i2 = 1 - if down and left: - i1 = 2 - i2 = 3 - if up and right: - i1 = 4 - i2 = 5 - if down and right: - i1 = 6 - i2 = 7 + + if dimension == 0: + if up and left: + i1 = 0 + i2 = 1 + if down and left: + i1 = 2 + i2 = 3 + if up and right: + i1 = 4 + i2 = 5 + if down and right: + i1 = 6 + i2 = 7 + + if dimension == 1: + if up and left: + i1 = 0 + i2 = 2 + if down and left: + i1 = 1 + i2 = 3 + if up and right: + i1 = 4 + i2 = 6 + if down and right: + i1 = 5 + i2 = 7 + + if dimension == 2: + if up and left: + i1 = 0 + i2 = 4 + if down and left: + i1 = 1 + i2 = 5 + if up and right: + i1 = 2 + i2 = 6 + if down and right: + i1 = 3 + i2 = 7 if id in children.keys(): myChildren.extend(children[id][i1::N]) @@ -137,46 +166,62 @@ def getChildren(children, parentIds, up = True, left = True): parentId = parents[parentId] # Begin sorting, select the dimension by which we sort -dimension = 0 +dimension = 2 -sortedIds = list() +#sortedIds = list() +mapping = dict() for id in ids: # Sort the mesh ids using Sebastians c++ code if dimension == 0: + dims = (zdim, ydim, xdim) + idMapped = id if dimension == 1: + + dims = (zdim, xdim, ydim) - x_index = id % xdim - y_index = (id / xdim) % ydim + x_index = (id-1) % xdim + y_index = ((id-1) / xdim) % ydim idMapped = id - (x_index + y_index * xdim) + y_index + x_index * ydim if dimension == 2: + + dims = (ydim, xdim, zdim) - x_index = id % xdim - y_index = (id / xdim) % ydim - z_index = (id / (xdim * ydim)) - idMapped = z_index + y_index * zdim + x_index * ydim * zdim + x_index = (id-1) % xdim + y_index = ((id-1) / xdim) % ydim + z_index = ((id-1) / (xdim * ydim)) + idMapped = 1 + z_index + y_index * zdim + x_index * ydim * zdim - sortedIds.append((idMapped, id)) + #sortedIds.append((idMapped, id)) + if refLvls[id] == 0: + mapping[idMapped] = id -sortedIds.sort() +#sortedIds.sort() # Create a list of unrefined cells -sortedUnrefinedIds = dict() -for id in isRefined.keys(): - if refLvls[id] == 0: - sortedUnrefinedIds[id] = isRefined[id] +#sortedUnrefinedIds = dict() +# for id in isRefined.keys(): +# if refLvls[id] == 0: +# sortedUnrefinedIds[id] = isRefined[id] # Create pencils of unrefined cells, store the level of refinement for each cell unrefinedPencils = list() -for iz in np.arange(zdim): - for iy in np.arange(ydim): - ibeg = iz * xdim * ydim + iy * ydim - iend = iz * xdim * ydim + (iy + 1) * ydim - unrefinedPencils.append({'ids' : sortedUnrefinedIds.keys()[ibeg:iend], - 'refLvl' : sortedUnrefinedIds.values()[ibeg:iend]}) +for i in np.arange(dims[0]): + for j in np.arange(dims[1]): + ibeg = 1 + i * dims[2] * dims[1] + j * dims[2] + iend = 1 + i * dims[2] * dims[1] + (j + 1) * dims[2] + myIsRefined = list() + myIds = list() + for k in np.arange(ibeg,iend): + myIds.append(mapping[k]) + myIsRefined.append(isRefined[mapping[k]]) + unrefinedPencils.append({'ids' : myIds, + 'refLvl' : myIsRefined}) + #unrefinedPencils.append({'ids' : sortedUnrefinedIds.keys()[ibeg:iend], + # 'refLvl' : sortedUnrefinedIds.values()[ibeg:iend]}) # Refine the unrefined pencils that contain refined cells print @@ -199,7 +244,7 @@ def getChildren(children, parentIds, up = True, left = True): print('Starting new pencil, row = {:1d}, subrow = {:1d}, column = {:1d}'.format(row,i,j)) pencilIds = list() # Walk along the unrefined pencil - for ix in np.arange(xdim): + for ix in np.arange(dims[2]): maxLocalRefLvl = unrefinedPencil['refLvl'][ix] print(' ix = {:1d}, maxLocalRefLvl = {:1d}'.format(ix,maxLocalRefLvl)) # Walk down the refinement tree of the parent cell @@ -217,8 +262,8 @@ def getChildren(children, parentIds, up = True, left = True): print(' iref = {:1d}, up = {:b}, left = {:b}'.format(iref,up,left)) # The function getChildren returns the children of the parent, or the # parent itself if it has no children - cells = getChildren(children, parentIds, up, left) - #print(cells) + cells = getChildren(children, parentIds, dimension, up, left) + print(cells) parentIds = list() offset = nUnRefined - iRefined From fde99926d675756ce456f83272791cb0a4e075cb Mon Sep 17 00:00:00 2001 From: Tuomas Koskela Date: Wed, 16 May 2018 07:57:25 +0300 Subject: [PATCH 005/602] Added comments --- mini-apps/simple-grid-test/sort_refined_ids.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/mini-apps/simple-grid-test/sort_refined_ids.py b/mini-apps/simple-grid-test/sort_refined_ids.py index 434542589..7884cc2bf 100644 --- a/mini-apps/simple-grid-test/sort_refined_ids.py +++ b/mini-apps/simple-grid-test/sort_refined_ids.py @@ -47,6 +47,8 @@ def getChildren(children, parentIds, dimension = 0, up = True, left = True): myChildren = list() for id in parentIds: + # Select 2/8 children per parent according to the logical parameters up,down,left,right. + # The names are slightly unintuitive in other dimensions but they come from dimension == 0 if dimension == 0: if up and left: i1 = 0 From 82865585d31d9f49575a49810b4c2bcd30f79461 Mon Sep 17 00:00:00 2001 From: Tuomas Koskela Date: Thu, 17 May 2018 15:55:31 +0300 Subject: [PATCH 006/602] Added a recursive algorithm to sort refined ids and create pencils. Seems to work. --- .../simple-grid-test/sort_refined_ids.py | 31 +- .../sort_refined_ids_recursive.py | 338 ++++++++++++++++++ 2 files changed, 358 insertions(+), 11 deletions(-) create mode 100644 mini-apps/simple-grid-test/sort_refined_ids_recursive.py diff --git a/mini-apps/simple-grid-test/sort_refined_ids.py b/mini-apps/simple-grid-test/sort_refined_ids.py index 7884cc2bf..e102cbc4b 100644 --- a/mini-apps/simple-grid-test/sort_refined_ids.py +++ b/mini-apps/simple-grid-test/sort_refined_ids.py @@ -1,5 +1,6 @@ import numpy as np import pdb +import time def findParent(id, gridSize, debug): @@ -103,7 +104,8 @@ def getChildren(children, parentIds, dimension = 0, up = True, left = True): debug = False -filename = "grid_test.out" +#filename = "grid_test.out" +filename = "refined_4.out" fh = open(filename) lines = fh.readlines() fh.close() @@ -124,6 +126,8 @@ def getChildren(children, parentIds, dimension = 0, up = True, left = True): #debug = True +t1 = time.time() + parents = dict() children = dict() refLvls = dict() @@ -168,7 +172,7 @@ def getChildren(children, parentIds, dimension = 0, up = True, left = True): parentId = parents[parentId] # Begin sorting, select the dimension by which we sort -dimension = 2 +dimension = 1 #sortedIds = list() mapping = dict() @@ -227,8 +231,8 @@ def getChildren(children, parentIds, dimension = 0, up = True, left = True): # Refine the unrefined pencils that contain refined cells print -print('*** Refining ***') -print +#print('*** Refining ***') +#print pencils = list() parentIds = list() @@ -243,15 +247,16 @@ def getChildren(children, parentIds, dimension = 0, up = True, left = True): # Assuming the refinement has been done equally in each dimension for i in np.arange(2 ** maxRefLvl): for j in np.arange(2 ** maxRefLvl): - print('Starting new pencil, row = {:1d}, subrow = {:1d}, column = {:1d}'.format(row,i,j)) + if debug: + print('Starting new pencil, row = {:1d}, subrow = {:1d}, column = {:1d}'.format(row,i,j)) pencilIds = list() # Walk along the unrefined pencil for ix in np.arange(dims[2]): maxLocalRefLvl = unrefinedPencil['refLvl'][ix] - print(' ix = {:1d}, maxLocalRefLvl = {:1d}'.format(ix,maxLocalRefLvl)) + if debug: + print(' ix = {:1d}, maxLocalRefLvl = {:1d}'.format(ix,maxLocalRefLvl)) # Walk down the refinement tree of the parent cell parentIds.append(unrefinedPencil['ids'][ix]) - #offsets = np.zeros(maxLocalRefLvl, dtype = int) offset = 0 nUnRefined = 0 iRefined = 0 @@ -260,12 +265,12 @@ def getChildren(children, parentIds, dimension = 0, up = True, left = True): # Logic for selecting cells for the pencil among the child cells left = ( (j / 2 ** (maxRefLvl - iref - 1)) % 2 == 0 ) up = ( (i / 2 ** (maxRefLvl - iref - 1)) % 2 == 0 ) - - print(' iref = {:1d}, up = {:b}, left = {:b}'.format(iref,up,left)) + if debug: + print(' iref = {:1d}, up = {:b}, left = {:b}'.format(iref,up,left)) # The function getChildren returns the children of the parent, or the # parent itself if it has no children cells = getChildren(children, parentIds, dimension, up, left) - print(cells) + #print(cells) parentIds = list() offset = nUnRefined - iRefined @@ -281,7 +286,7 @@ def getChildren(children, parentIds, dimension = 0, up = True, left = True): # The offset is the number of unrefined cells from the last # iteration minus the index of the refined cell. if offset > 0: - pencilIds.insert(-offset,icell) + pencilIds.insert(-offset,icell) else: pencilIds.append(icell) else: @@ -306,5 +311,9 @@ def getChildren(children, parentIds, dimension = 0, up = True, left = True): print('Removing duplicate pencil') pass +t2 = time.time() +for i,pencil in enumerate(pencils): + print("pencil {:2d}, ids: ".format(i), pencil['ids']) +print(t2-t1) diff --git a/mini-apps/simple-grid-test/sort_refined_ids_recursive.py b/mini-apps/simple-grid-test/sort_refined_ids_recursive.py new file mode 100644 index 000000000..16157e797 --- /dev/null +++ b/mini-apps/simple-grid-test/sort_refined_ids_recursive.py @@ -0,0 +1,338 @@ +import numpy as np +import pdb +import time + +def findParent(id, gridSize, debug): + + nIndicesInRefLvl = list() + for refLvl in np.arange(1,10): + nIndicesInRefLvl.append(gridSize * 2 ** ((refLvl - 1) * 3)) + + for i in np.arange(len(nIndicesInRefLvl)): + if id <= sum(nIndicesInRefLvl[:i+1]): + refLvl = i + break + if refLvl == 0: + if id > 0: + print("cell {:3d}".format(id)+" does not have a parent") + pass + + return 0, refLvl + + id2 = id - sum(nIndicesInRefLvl[:refLvl]) + ix = (id2 - 1) % (xdim * 2 ** refLvl) + 1 + iy = (id2 - 1) / (xdim * 2 ** refLvl) % (ydim * 2 ** refLvl) + 1 + iz = (id2 - 1) / (xdim * 2 ** refLvl * ydim * 2 ** refLvl) + 1 + parentId = (int(np.ceil(iz / 2.0) - 1) * xdim * 2 ** (refLvl - 1) * ydim * 2 ** (refLvl - 1) + + int(np.ceil(iy / 2.0) - 1) * xdim * 2 ** (refLvl - 1) + + int(np.ceil(ix / 2.0)) + + sum(nIndicesInRefLvl[:refLvl-1])) + if debug: + print("id = {:3d}".format(id)+", id2 = {:3d}".format(id2)+ + ", col = {:2d}".format(ix)+", row = {:2d}".format(iy)+ + ", plane = {:2d}".format(iz)+", parentId = {:2d}".format(parentId)+ + ", refLvl = {:1d}".format(refLvl)) + else: + print("cell {:3d}".format(id)+" is the child of cell {:2d}".format(parentId)) + pass + + return parentId, refLvl + +def getChildren(children, parentId, dimension = 0, up = True, left = True): + + down = not up + right = not left + + N = 8 + + myChildren = list() + + # Select 2/8 children per parent according to the logical parameters up,down,left,right. + # The names are slightly unintuitive in other dimensions but they come from dimension == 0 + if dimension == 0: + if up and left: + i1 = 0 + i2 = 1 + if down and left: + i1 = 2 + i2 = 3 + if up and right: + i1 = 4 + i2 = 5 + if down and right: + i1 = 6 + i2 = 7 + + if dimension == 1: + if up and left: + i1 = 0 + i2 = 2 + if down and left: + i1 = 1 + i2 = 3 + if up and right: + i1 = 4 + i2 = 6 + if down and right: + i1 = 5 + i2 = 7 + + if dimension == 2: + if up and left: + i1 = 0 + i2 = 4 + if down and left: + i1 = 1 + i2 = 5 + if up and right: + i1 = 2 + i2 = 6 + if down and right: + i1 = 3 + i2 = 7 + + if parentId in children.keys(): + myChildren.extend(children[parentId][i1::N]) + myChildren.extend(children[parentId][i2::N]) + else: + # If no children were found, return the parent + myChildren.extend(parentId) + + #print(up,left,myChildren) + return myChildren + +#def buildPencils(pencils,initialPencil,idsIn,isRefined,refLvls,children,dimension = 0,path = list()): +def buildPencils(pencils,initialPencil,idsIn,dimension = 0,path = list()): + + # pencils - list of completed pencils + # initalPencil - list of ids that have already been added to the pencil being built + # idsIn - candidate cell ids to be added to the pencil being built (unless they contain refinement) + # dimension - dimension along which the pencils are built + # path - the steps (up/down, left/right) taken while building the current pencil + + # Global arrays that are accessed read-only + # isRefined - global array that contains how many times each cell has been refined + # refLvls - global array that contains the refinement level of each cell + # children - global array that contains the children of each refined cell + import copy + + # Copy the input ids to a working set of ids + ids = copy.copy(idsIn) + + # Copy the already computed pencil to the output list + idsOut = copy.copy(initialPencil) + + # Walk along the input pencil + for i,id in enumerate(ids): + + i1 = i + 1 + # Check if the current cell contains refined cells + if isRefined[id] > 0: + + # Check if we have encountered this refinement level before and stored + # The path this builder followed + if len(path) > refLvls[id]: + + # Get children using the stored path + myChildren = getChildren(children,id,dimension, + path[refLvls[id]][0],path[refLvls[id]][1]) + + # Add the children to the working set + ids[i1:i1] = myChildren + + else: + + # Spawn new builders to construct pencils at the new refinement level + for up in [True, False]: + for left in [True, False]: + + # Store the path this builder has chosen + myPath = copy.copy(path) + myPath.append((up,left)) + + # Get children along my path + myChildren = getChildren(children,id,dimension,up,left) + myIds = ids[i1:] + + # The current builder will continue along the bottom-right path + if not up and not left: + + # Add the children to the working set. Next iteration of the + # main looop (over ids) will start on the first child + ids[i1:i1] = myChildren + path = myPath + #print('building pencil for'+str(ids[i1:])) + pass + + # Other paths will spawn a new builder + else: + + # Create a new working set by adding the remainder of the old + # working set to the current children. + myChildren.extend(myIds) + + #buildPencils(pencils,idsOut,myChildren,isRefined,refLvls,children,dimension,myPath) + buildPencils(pencils,idsOut,myChildren,dimension,myPath) + + # Add unrefined cells to the pencil directly + else: + + idsOut.append(id) + + pass + + pencils.append(idsOut) + return pencils + + #print(idsOut) + #print(pencils) + +debug = False + +filename = 'refined_4.vtk' +fh = open(filename) +lines = fh.readlines() +fh.close() + +ids = list() + +xdim = 1 +ydim = 1 +zdim = 1 +for i,line in enumerate(lines): + if 'DATASET UNSTRUCTURED_GRID' in line: + n = int(lines[i+1].split()[1]) + for j in np.arange(n): + xyz = lines[i+j+2].split() + xdim = max(xdim,float(xyz[0])) + ydim = max(ydim,float(xyz[1])) + zdim = max(zdim,float(xyz[2])) + if 'SCALARS id int' in line: + n = int(lines[i-1].split()[1]) + for j in np.arange(n): + ids.append(int(lines[i+j+2])) + +xdim = int(xdim) +ydim = int(ydim) +zdim = int(zdim) + +print('grid dimensions are {:2d} x {:2d} x {:2d}'.format(xdim,ydim,zdim)) +gridSize = xdim*ydim*zdim + +#debug = True + +t1 = time.time() + +parents = dict() +children = dict() +refLvls = dict() +hasChildren = list() + +for id in ids: + + # Find the parent of cell id + parentId, refLvl = findParent(id,gridSize,debug) + + parents[id] = parentId + refLvls[id] = refLvl + + # Parents are not stored in the id array by default, let's add them + # For completeness + if not parentId in ids and parentId > 0: + ids.append(parentId) + + # Make a list of cells that have been refined at least once + if parentId > 0: + if not parentId in hasChildren: + children[parentId] = list() + hasChildren.append(parentId) + + # Make a list of children for each cell + children[parentId].append(id) + +# Sort the id and children lists, this is needed when adding cells to pencils +# to get the order right +for key in children.keys(): + children[key].sort() +ids.sort() + +# Second pass to count how many times each cell has been refined +isRefined = dict() +for id in ids: + isRefined[id] = 0 + if refLvls[id] > 0: + parentId = parents[id] + while parentId is not 0: + isRefined[parentId] = refLvls[id] - refLvls[parentId] + parentId = parents[parentId] + +# Begin sorting, select the dimension by which we sort +dimension = 1 +# dimensions = ('x','y','z') +print +print('Building pencils along dimension {:1d}'.format(dimension)) +print + +#sortedIds = list() +mapping = dict() +for id in ids: + # Sort the mesh ids using Sebastians c++ code + if dimension == 0: + + dims = (zdim, ydim, xdim) + + idMapped = id + + if dimension == 1: + + dims = (zdim, xdim, ydim) + + x_index = (id-1) % xdim + y_index = ((id-1) / xdim) % ydim + idMapped = id - (x_index + y_index * xdim) + y_index + x_index * ydim + + if dimension == 2: + + dims = (ydim, xdim, zdim) + + x_index = (id-1) % xdim + y_index = ((id-1) / xdim) % ydim + z_index = ((id-1) / (xdim * ydim)) + idMapped = 1 + z_index + y_index * zdim + x_index * ydim * zdim + + if refLvls[id] == 0: + mapping[idMapped] = id + +# Create pencils of unrefined cells, store the level of refinement for each cell +unrefinedPencils = list() +for i in np.arange(dims[0]): + for j in np.arange(dims[1]): + ibeg = 1 + i * dims[2] * dims[1] + j * dims[2] + iend = 1 + i * dims[2] * dims[1] + (j + 1) * dims[2] + myIsRefined = list() + myIds = list() + for k in np.arange(ibeg,iend): + myIds.append(mapping[k]) + myIsRefined.append(isRefined[mapping[k]]) + unrefinedPencils.append({'ids' : myIds, + 'refLvl' : myIsRefined}) + +# Refine the unrefined pencils that contain refined cells + +pencils = list() + +# Loop over the unrefined pencils +for unrefinedPencil in unrefinedPencils: + + #pencils = buildPencils(pencils,[],unrefinedPencil['ids'],isRefined,refLvls,children,dimension) + pencils = buildPencils(pencils,[],unrefinedPencil['ids'],dimension) + +t2 = time.time() + +print('I have created the following pencils:') +print +for pencil in pencils: + print(pencil) + +print +print('Execution time was {:.4f} seconds'.format(t2-t1)) From 90a554ec522796a84ffdf82266bf3f52f3ab0e94 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Fri, 18 May 2018 12:19:45 +0300 Subject: [PATCH 007/602] Added command line argument parsing. Added test cases. --- .../pencils_dim0_testcase_1.txt | 12 + .../pencils_dim0_testcase_2.txt | 24 + .../pencils_dim0_testcase_3.txt | 36 + .../pencils_dim1_testcase_1.txt | 27 + .../pencils_dim1_testcase_2.txt | 34 + .../pencils_dim1_testcase_3.txt | 36 + .../pencils_dim2_testcase_2.txt | 33 + .../pencils_dim2_testcase_3.txt | 48 + .../sort_refined_ids_recursive.py | 28 +- mini-apps/simple-grid-test/testcase_1.vtk | 1000 ++++++++++ mini-apps/simple-grid-test/testcase_2.vtk | 1385 +++++++++++++ mini-apps/simple-grid-test/testcase_3.vtk | 1726 +++++++++++++++++ 12 files changed, 4381 insertions(+), 8 deletions(-) create mode 100644 mini-apps/simple-grid-test/pencils_dim0_testcase_1.txt create mode 100644 mini-apps/simple-grid-test/pencils_dim0_testcase_2.txt create mode 100644 mini-apps/simple-grid-test/pencils_dim0_testcase_3.txt create mode 100644 mini-apps/simple-grid-test/pencils_dim1_testcase_1.txt create mode 100644 mini-apps/simple-grid-test/pencils_dim1_testcase_2.txt create mode 100644 mini-apps/simple-grid-test/pencils_dim1_testcase_3.txt create mode 100644 mini-apps/simple-grid-test/pencils_dim2_testcase_2.txt create mode 100644 mini-apps/simple-grid-test/pencils_dim2_testcase_3.txt create mode 100644 mini-apps/simple-grid-test/testcase_1.vtk create mode 100644 mini-apps/simple-grid-test/testcase_2.vtk create mode 100644 mini-apps/simple-grid-test/testcase_3.vtk diff --git a/mini-apps/simple-grid-test/pencils_dim0_testcase_1.txt b/mini-apps/simple-grid-test/pencils_dim0_testcase_1.txt new file mode 100644 index 000000000..437f19352 --- /dev/null +++ b/mini-apps/simple-grid-test/pencils_dim0_testcase_1.txt @@ -0,0 +1,12 @@ +[1, 2, 32, 33, 4, 5, 38, 39, 268, 269, 270, 271, 42, 43, 9] +[1, 2, 32, 33, 4, 5, 38, 39, 700, 701, 702, 703, 42, 43, 9] +[1, 2, 32, 33, 4, 5, 38, 39, 304, 305, 306, 307, 42, 43, 9] +[1, 2, 32, 33, 4, 5, 38, 39, 736, 737, 738, 739, 42, 43, 9] +[1, 2, 140, 141, 4, 5, 146, 147, 148, 149, 150, 151, 9] +[1, 2, 50, 51, 4, 5, 56, 57, 58, 59, 60, 61, 9] +[1, 2, 158, 159, 4, 5, 164, 165, 166, 167, 168, 169, 9] +[10, 11, 12, 13, 14, 74, 75, 76, 77, 78, 79, 18] +[10, 11, 12, 13, 14, 182, 183, 184, 185, 186, 187, 18] +[10, 11, 12, 13, 14, 92, 93, 94, 95, 96, 97, 18] +[10, 11, 12, 13, 14, 200, 201, 202, 203, 204, 205, 18] +[19, 20, 21, 22, 23, 24, 25, 26, 27] diff --git a/mini-apps/simple-grid-test/pencils_dim0_testcase_2.txt b/mini-apps/simple-grid-test/pencils_dim0_testcase_2.txt new file mode 100644 index 000000000..19398fa31 --- /dev/null +++ b/mini-apps/simple-grid-test/pencils_dim0_testcase_2.txt @@ -0,0 +1,24 @@ +[28, 29, 248, 249, 250, 251, 32, 33] +[28, 29, 392, 393, 394, 395, 32, 33] +[28, 29, 260, 261, 262, 263, 32, 33] +[28, 29, 404, 405, 406, 407, 32, 33] +[64, 65, 66, 67, 68, 69] +[34, 35, 36, 37, 38, 39] +[70, 71, 72, 73, 74, 75] +[40, 41, 42, 43, 44, 45] +[76, 77, 78, 79, 80, 81] +[46, 47, 48, 49, 50, 51] +[82, 83, 84, 85, 86, 87] +[7, 8, 9] +[100, 101, 102, 103, 104, 105] +[136, 137, 138, 139, 140, 141] +[106, 107, 108, 109, 110, 111] +[142, 143, 144, 145, 146, 147] +[112, 113, 114, 115, 116, 117] +[148, 149, 150, 151, 152, 153] +[118, 119, 120, 121, 122, 123] +[154, 155, 156, 157, 158, 159] +[16, 17, 18] +[19, 20, 21] +[22, 23, 24] +[25, 26, 27] diff --git a/mini-apps/simple-grid-test/pencils_dim0_testcase_3.txt b/mini-apps/simple-grid-test/pencils_dim0_testcase_3.txt new file mode 100644 index 000000000..70828acaa --- /dev/null +++ b/mini-apps/simple-grid-test/pencils_dim0_testcase_3.txt @@ -0,0 +1,36 @@ +[1, 12, 13, 90, 91, 678, 679, 680, 5312, 5313] +[1, 12, 13, 90, 91, 678, 679, 680, 7616, 7617] +[1, 12, 13, 90, 91, 678, 679, 680, 5360, 5361] +[1, 12, 13, 90, 91, 678, 679, 680, 7664, 7665] +[1, 12, 13, 90, 91, 1254, 1255, 1256, 1257] +[1, 12, 13, 90, 91, 702, 703, 704, 705] +[1, 12, 13, 90, 91, 1278, 1279, 1280, 1281] +[1, 12, 13, 234, 235, 1830, 1831, 1832, 1833] +[1, 12, 13, 234, 235, 2406, 2407, 2408, 2409] +[1, 12, 13, 234, 235, 1854, 1855, 1856, 1857] +[1, 12, 13, 234, 235, 2430, 2431, 2432, 2433] +[1, 12, 13, 102, 103, 726, 727, 728, 729] +[1, 12, 13, 102, 103, 1302, 1303, 1304, 1305] +[1, 12, 13, 102, 103, 750, 751, 752, 753] +[1, 12, 13, 102, 103, 1326, 1327, 1328, 1329] +[1, 12, 13, 246, 247, 1878, 1879, 1880, 1881] +[1, 12, 13, 246, 247, 2454, 2455, 2456, 2457] +[1, 12, 13, 246, 247, 1902, 1903, 1904, 1905] +[1, 12, 13, 246, 247, 2478, 2479, 2480, 2481] +[1, 48, 49, 378, 379, 380, 381] +[1, 48, 49, 522, 523, 524, 525] +[1, 48, 49, 390, 391, 392, 393] +[1, 48, 49, 534, 535, 536, 537] +[1, 18, 19, 114, 115, 116, 117] +[1, 18, 19, 258, 259, 260, 261] +[1, 18, 19, 126, 127, 128, 129] +[1, 18, 19, 270, 271, 272, 273] +[1, 54, 55, 402, 403, 404, 405] +[1, 54, 55, 546, 547, 548, 549] +[1, 54, 55, 414, 415, 416, 417] +[1, 54, 55, 558, 559, 560, 561] +[4, 24, 25, 26, 27] +[4, 60, 61, 62, 63] +[4, 30, 31, 32, 33] +[4, 66, 67, 68, 69] +[7, 8, 9] diff --git a/mini-apps/simple-grid-test/pencils_dim1_testcase_1.txt b/mini-apps/simple-grid-test/pencils_dim1_testcase_1.txt new file mode 100644 index 000000000..3d8118b30 --- /dev/null +++ b/mini-apps/simple-grid-test/pencils_dim1_testcase_1.txt @@ -0,0 +1,27 @@ +[1, 10, 19] +[2, 11, 20] +[32, 50, 12, 21] +[140, 158, 12, 21] +[33, 51, 12, 21] +[141, 159, 12, 21] +[4, 13, 22] +[5, 14, 23] +[38, 56, 74, 92, 24] +[146, 164, 182, 200, 24] +[39, 57, 75, 93, 24] +[147, 165, 183, 201, 24] +[268, 304, 58, 76, 94, 25] +[700, 736, 58, 76, 94, 25] +[269, 305, 58, 76, 94, 25] +[701, 737, 58, 76, 94, 25] +[148, 166, 184, 202, 25] +[270, 306, 59, 77, 95, 25] +[702, 738, 59, 77, 95, 25] +[271, 307, 59, 77, 95, 25] +[703, 739, 59, 77, 95, 25] +[149, 167, 185, 203, 25] +[42, 60, 78, 96, 26] +[150, 168, 186, 204, 26] +[43, 61, 79, 97, 26] +[151, 169, 187, 205, 26] +[9, 18, 27] diff --git a/mini-apps/simple-grid-test/pencils_dim1_testcase_2.txt b/mini-apps/simple-grid-test/pencils_dim1_testcase_2.txt new file mode 100644 index 000000000..cdb5d6ad6 --- /dev/null +++ b/mini-apps/simple-grid-test/pencils_dim1_testcase_2.txt @@ -0,0 +1,34 @@ +[28, 34, 40, 46, 7] +[64, 70, 76, 82, 7] +[29, 35, 41, 47, 7] +[65, 71, 77, 83, 7] +[248, 260, 36, 42, 48, 8] +[392, 404, 36, 42, 48, 8] +[249, 261, 36, 42, 48, 8] +[393, 405, 36, 42, 48, 8] +[66, 72, 78, 84, 8] +[250, 262, 37, 43, 49, 8] +[394, 406, 37, 43, 49, 8] +[251, 263, 37, 43, 49, 8] +[395, 407, 37, 43, 49, 8] +[67, 73, 79, 85, 8] +[32, 38, 44, 50, 9] +[68, 74, 80, 86, 9] +[33, 39, 45, 51, 9] +[69, 75, 81, 87, 9] +[100, 106, 112, 118, 16] +[136, 142, 148, 154, 16] +[101, 107, 113, 119, 16] +[137, 143, 149, 155, 16] +[102, 108, 114, 120, 17] +[138, 144, 150, 156, 17] +[103, 109, 115, 121, 17] +[139, 145, 151, 157, 17] +[104, 110, 116, 122, 18] +[140, 146, 152, 158, 18] +[105, 111, 117, 123, 18] +[141, 147, 153, 159, 18] +[19, 22, 25] +[20, 23, 26] +[21, 24, 27] + diff --git a/mini-apps/simple-grid-test/pencils_dim1_testcase_3.txt b/mini-apps/simple-grid-test/pencils_dim1_testcase_3.txt new file mode 100644 index 000000000..9d83dddee --- /dev/null +++ b/mini-apps/simple-grid-test/pencils_dim1_testcase_3.txt @@ -0,0 +1,36 @@ +[1, 4, 7] +[12, 18, 24, 30, 8] +[48, 54, 60, 66, 8] +[13, 19, 25, 31, 8] +[49, 55, 61, 67, 8] +[90, 102, 114, 126, 26, 32, 9] +[234, 246, 258, 270, 26, 32, 9] +[91, 103, 115, 127, 26, 32, 9] +[235, 247, 259, 271, 26, 32, 9] +[378, 390, 402, 414, 62, 68, 9] +[522, 534, 546, 558, 62, 68, 9] +[379, 391, 403, 415, 62, 68, 9] +[523, 535, 547, 559, 62, 68, 9] +[678, 702, 726, 750, 116, 128, 27, 33, 9] +[1254, 1278, 1302, 1326, 116, 128, 27, 33, 9] +[679, 703, 727, 751, 116, 128, 27, 33, 9] +[1255, 1279, 1303, 1327, 116, 128, 27, 33, 9] +[1830, 1854, 1878, 1902, 260, 272, 27, 33, 9] +[2406, 2430, 2454, 2478, 260, 272, 27, 33, 9] +[1831, 1855, 1879, 1903, 260, 272, 27, 33, 9] +[2407, 2431, 2455, 2479, 260, 272, 27, 33, 9] +[680, 704, 728, 752, 117, 129, 27, 33, 9] +[1256, 1280, 1304, 1328, 117, 129, 27, 33, 9] +[5312, 5360, 705, 729, 753, 117, 129, 27, 33, 9] +[7616, 7664, 705, 729, 753, 117, 129, 27, 33, 9] +[5313, 5361, 705, 729, 753, 117, 129, 27, 33, 9] +[7617, 7665, 705, 729, 753, 117, 129, 27, 33, 9] +[1257, 1281, 1305, 1329, 117, 129, 27, 33, 9] +[1832, 1856, 1880, 1904, 261, 273, 27, 33, 9] +[2408, 2432, 2456, 2480, 261, 273, 27, 33, 9] +[1833, 1857, 1881, 1905, 261, 273, 27, 33, 9] +[2409, 2433, 2457, 2481, 261, 273, 27, 33, 9] +[380, 392, 404, 416, 63, 69, 9] +[524, 536, 548, 560, 63, 69, 9] +[381, 393, 405, 417, 63, 69, 9] +[525, 537, 549, 561, 63, 69, 9] diff --git a/mini-apps/simple-grid-test/pencils_dim2_testcase_2.txt b/mini-apps/simple-grid-test/pencils_dim2_testcase_2.txt new file mode 100644 index 000000000..a5fd2a665 --- /dev/null +++ b/mini-apps/simple-grid-test/pencils_dim2_testcase_2.txt @@ -0,0 +1,33 @@ +[28, 64, 100, 136, 19] +[34, 70, 106, 142, 19] +[29, 65, 101, 137, 19] +[35, 71, 107, 143, 19] +[40, 76, 112, 148, 22] +[46, 82, 118, 154, 22] +[41, 77, 113, 149, 22] +[47, 83, 119, 155, 22] +[7, 16, 25] +[248, 392, 66, 102, 138, 20] +[260, 404, 66, 102, 138, 20] +[249, 393, 66, 102, 138, 20] +[261, 405, 66, 102, 138, 20] +[36, 72, 108, 144, 20] +[250, 394, 67, 103, 139, 20] +[262, 406, 67, 103, 139, 20] +[251, 395, 67, 103, 139, 20] +[263, 407, 67, 103, 139, 20] +[37, 73, 109, 145, 20] +[42, 78, 114, 150, 23] +[48, 84, 120, 156, 23] +[43, 79, 115, 151, 23] +[49, 85, 121, 157, 23] +[8, 17, 26] +[32, 68, 104, 140, 21] +[38, 74, 110, 146, 21] +[33, 69, 105, 141, 21] +[39, 75, 111, 147, 21] +[44, 80, 116, 152, 24] +[50, 86, 122, 158, 24] +[45, 81, 117, 153, 24] +[51, 87, 123, 159, 24] +[9, 18, 27] diff --git a/mini-apps/simple-grid-test/pencils_dim2_testcase_3.txt b/mini-apps/simple-grid-test/pencils_dim2_testcase_3.txt new file mode 100644 index 000000000..41b431e1a --- /dev/null +++ b/mini-apps/simple-grid-test/pencils_dim2_testcase_3.txt @@ -0,0 +1,48 @@ +[1] +[4] +[7] +[12, 48] +[18, 54] +[13, 49] +[19, 55] +[24, 60] +[30, 66] +[25, 61] +[31, 67] +[8] +[90, 234, 378, 522] +[102, 246, 390, 534] +[91, 235, 379, 523] +[103, 247, 391, 535] +[114, 258, 402, 546] +[126, 270, 414, 558] +[115, 259, 403, 547] +[127, 271, 415, 559] +[678, 1254, 1830, 2406, 380, 524] +[702, 1278, 1854, 2430, 380, 524] +[679, 1255, 1831, 2407, 380, 524] +[703, 1279, 1855, 2431, 380, 524] +[726, 1302, 1878, 2454, 392, 536] +[750, 1326, 1902, 2478, 392, 536] +[727, 1303, 1879, 2455, 392, 536] +[751, 1327, 1903, 2479, 392, 536] +[680, 1256, 1832, 2408, 381, 525] +[704, 1280, 1856, 2432, 381, 525] +[5312, 7616, 1257, 1833, 2409, 381, 525] +[5360, 7664, 1257, 1833, 2409, 381, 525] +[5313, 7617, 1257, 1833, 2409, 381, 525] +[5361, 7665, 1257, 1833, 2409, 381, 525] +[705, 1281, 1857, 2433, 381, 525] +[728, 1304, 1880, 2456, 393, 537] +[752, 1328, 1904, 2480, 393, 537] +[729, 1305, 1881, 2457, 393, 537] +[753, 1329, 1905, 2481, 393, 537] +[116, 260, 404, 548] +[128, 272, 416, 560] +[117, 261, 405, 549] +[129, 273, 417, 561] +[26, 62] +[32, 68] +[27, 63] +[33, 69] +[9] diff --git a/mini-apps/simple-grid-test/sort_refined_ids_recursive.py b/mini-apps/simple-grid-test/sort_refined_ids_recursive.py index 16157e797..c43fcea52 100644 --- a/mini-apps/simple-grid-test/sort_refined_ids_recursive.py +++ b/mini-apps/simple-grid-test/sort_refined_ids_recursive.py @@ -101,7 +101,6 @@ def getChildren(children, parentId, dimension = 0, up = True, left = True): #print(up,left,myChildren) return myChildren -#def buildPencils(pencils,initialPencil,idsIn,isRefined,refLvls,children,dimension = 0,path = list()): def buildPencils(pencils,initialPencil,idsIn,dimension = 0,path = list()): # pencils - list of completed pencils @@ -116,10 +115,10 @@ def buildPencils(pencils,initialPencil,idsIn,dimension = 0,path = list()): # children - global array that contains the children of each refined cell import copy - # Copy the input ids to a working set of ids + # (Hard) Copy the input ids to a working set of ids ids = copy.copy(idsIn) - # Copy the already computed pencil to the output list + # (Hard) Copy the already computed pencil to the output list idsOut = copy.copy(initialPencil) # Walk along the input pencil @@ -171,7 +170,6 @@ def buildPencils(pencils,initialPencil,idsIn,dimension = 0,path = list()): # working set to the current children. myChildren.extend(myIds) - #buildPencils(pencils,idsOut,myChildren,isRefined,refLvls,children,dimension,myPath) buildPencils(pencils,idsOut,myChildren,dimension,myPath) # Add unrefined cells to the pencil directly @@ -187,9 +185,24 @@ def buildPencils(pencils,initialPencil,idsIn,dimension = 0,path = list()): #print(idsOut) #print(pencils) +import argparse + +parser = argparse.ArgumentParser(description='Create pencils on a refined grid.') +parser.add_argument('--dimension', metavar = 'N', type=int, nargs=1, + default=[0], help='Dimension') +parser.add_argument('--filename', metavar = 'fn', type=str, nargs=1, + default=['test.vtk'], help='Input file name') +args = parser.parse_args() + +if args.dimension[0] > 0 and args.dimension[0] <= 2: + dimension = args.dimension[0] +else: + dimension = 0 + debug = False -filename = 'refined_4.vtk' +#filename = 'test.vtk' +filename = args.filename[0] fh = open(filename) lines = fh.readlines() fh.close() @@ -267,7 +280,7 @@ def buildPencils(pencils,initialPencil,idsIn,dimension = 0,path = list()): parentId = parents[parentId] # Begin sorting, select the dimension by which we sort -dimension = 1 +# dimension = 0 # dimensions = ('x','y','z') print print('Building pencils along dimension {:1d}'.format(dimension)) @@ -276,7 +289,7 @@ def buildPencils(pencils,initialPencil,idsIn,dimension = 0,path = list()): #sortedIds = list() mapping = dict() for id in ids: - # Sort the mesh ids using Sebastians c++ code + # Sort the unrefined mesh ids following Sebastians c++ code if dimension == 0: dims = (zdim, ydim, xdim) @@ -324,7 +337,6 @@ def buildPencils(pencils,initialPencil,idsIn,dimension = 0,path = list()): # Loop over the unrefined pencils for unrefinedPencil in unrefinedPencils: - #pencils = buildPencils(pencils,[],unrefinedPencil['ids'],isRefined,refLvls,children,dimension) pencils = buildPencils(pencils,[],unrefinedPencil['ids'],dimension) t2 = time.time() diff --git a/mini-apps/simple-grid-test/testcase_1.vtk b/mini-apps/simple-grid-test/testcase_1.vtk new file mode 100644 index 000000000..99538e0da --- /dev/null +++ b/mini-apps/simple-grid-test/testcase_1.vtk @@ -0,0 +1,1000 @@ +# vtk DataFile Version 2.0 +Cartesian cell refinable grid +ASCII +DATASET UNSTRUCTURED_GRID +POINTS 720 float +0 0 0 +1 0 0 +0 1 0 +1 1 0 +0 0 1 +1 0 1 +0 1 1 +1 1 1 +1 0 0 +2 0 0 +1 1 0 +2 1 0 +1 0 1 +2 0 1 +1 1 1 +2 1 1 +3 0 0 +4 0 0 +3 1 0 +4 1 0 +3 0 1 +4 0 1 +3 1 1 +4 1 1 +4 0 0 +5 0 0 +4 1 0 +5 1 0 +4 0 1 +5 0 1 +4 1 1 +5 1 1 +8 0 0 +9 0 0 +8 1 0 +9 1 0 +8 0 1 +9 0 1 +8 1 1 +9 1 1 +0 1 0 +1 1 0 +0 2 0 +1 2 0 +0 1 1 +1 1 1 +0 2 1 +1 2 1 +1 1 0 +2 1 0 +1 2 0 +2 2 0 +1 1 1 +2 1 1 +1 2 1 +2 2 1 +2 1 0 +3 1 0 +2 2 0 +3 2 0 +2 1 1 +3 1 1 +2 2 1 +3 2 1 +3 1 0 +4 1 0 +3 2 0 +4 2 0 +3 1 1 +4 1 1 +3 2 1 +4 2 1 +4 1 0 +5 1 0 +4 2 0 +5 2 0 +4 1 1 +5 1 1 +4 2 1 +5 2 1 +8 1 0 +9 1 0 +8 2 0 +9 2 0 +8 1 1 +9 1 1 +8 2 1 +9 2 1 +0 2 0 +1 2 0 +0 3 0 +1 3 0 +0 2 1 +1 2 1 +0 3 1 +1 3 1 +1 2 0 +2 2 0 +1 3 0 +2 3 0 +1 2 1 +2 2 1 +1 3 1 +2 3 1 +2 2 0 +3 2 0 +2 3 0 +3 3 0 +2 2 1 +3 2 1 +2 3 1 +3 3 1 +3 2 0 +4 2 0 +3 3 0 +4 3 0 +3 2 1 +4 2 1 +3 3 1 +4 3 1 +4 2 0 +5 2 0 +4 3 0 +5 3 0 +4 2 1 +5 2 1 +4 3 1 +5 3 1 +5 2 0 +6 2 0 +5 3 0 +6 3 0 +5 2 1 +6 2 1 +5 3 1 +6 3 1 +6 2 0 +7 2 0 +6 3 0 +7 3 0 +6 2 1 +7 2 1 +6 3 1 +7 3 1 +7 2 0 +8 2 0 +7 3 0 +8 3 0 +7 2 1 +8 2 1 +7 3 1 +8 3 1 +8 2 0 +9 2 0 +8 3 0 +9 3 0 +8 2 1 +9 2 1 +8 3 1 +9 3 1 +2 0 0 +2.5 0 0 +2 0.5 0 +2.5 0.5 0 +2 0 0.5 +2.5 0 0.5 +2 0.5 0.5 +2.5 0.5 0.5 +2.5 0 0 +3 0 0 +2.5 0.5 0 +3 0.5 0 +2.5 0 0.5 +3 0 0.5 +2.5 0.5 0.5 +3 0.5 0.5 +5 0 0 +5.5 0 0 +5 0.5 0 +5.5 0.5 0 +5 0 0.5 +5.5 0 0.5 +5 0.5 0.5 +5.5 0.5 0.5 +5.5 0 0 +6 0 0 +5.5 0.5 0 +6 0.5 0 +5.5 0 0.5 +6 0 0.5 +5.5 0.5 0.5 +6 0.5 0.5 +7 0 0 +7.5 0 0 +7 0.5 0 +7.5 0.5 0 +7 0 0.5 +7.5 0 0.5 +7 0.5 0.5 +7.5 0.5 0.5 +7.5 0 0 +8 0 0 +7.5 0.5 0 +8 0.5 0 +7.5 0 0.5 +8 0 0.5 +7.5 0.5 0.5 +8 0.5 0.5 +2 0.5 0 +2.5 0.5 0 +2 1 0 +2.5 1 0 +2 0.5 0.5 +2.5 0.5 0.5 +2 1 0.5 +2.5 1 0.5 +2.5 0.5 0 +3 0.5 0 +2.5 1 0 +3 1 0 +2.5 0.5 0.5 +3 0.5 0.5 +2.5 1 0.5 +3 1 0.5 +5 0.5 0 +5.5 0.5 0 +5 1 0 +5.5 1 0 +5 0.5 0.5 +5.5 0.5 0.5 +5 1 0.5 +5.5 1 0.5 +5.5 0.5 0 +6 0.5 0 +5.5 1 0 +6 1 0 +5.5 0.5 0.5 +6 0.5 0.5 +5.5 1 0.5 +6 1 0.5 +6 0.5 0 +6.5 0.5 0 +6 1 0 +6.5 1 0 +6 0.5 0.5 +6.5 0.5 0.5 +6 1 0.5 +6.5 1 0.5 +6.5 0.5 0 +7 0.5 0 +6.5 1 0 +7 1 0 +6.5 0.5 0.5 +7 0.5 0.5 +6.5 1 0.5 +7 1 0.5 +7 0.5 0 +7.5 0.5 0 +7 1 0 +7.5 1 0 +7 0.5 0.5 +7.5 0.5 0.5 +7 1 0.5 +7.5 1 0.5 +7.5 0.5 0 +8 0.5 0 +7.5 1 0 +8 1 0 +7.5 0.5 0.5 +8 0.5 0.5 +7.5 1 0.5 +8 1 0.5 +5 1 0 +5.5 1 0 +5 1.5 0 +5.5 1.5 0 +5 1 0.5 +5.5 1 0.5 +5 1.5 0.5 +5.5 1.5 0.5 +5.5 1 0 +6 1 0 +5.5 1.5 0 +6 1.5 0 +5.5 1 0.5 +6 1 0.5 +5.5 1.5 0.5 +6 1.5 0.5 +6 1 0 +6.5 1 0 +6 1.5 0 +6.5 1.5 0 +6 1 0.5 +6.5 1 0.5 +6 1.5 0.5 +6.5 1.5 0.5 +6.5 1 0 +7 1 0 +6.5 1.5 0 +7 1.5 0 +6.5 1 0.5 +7 1 0.5 +6.5 1.5 0.5 +7 1.5 0.5 +7 1 0 +7.5 1 0 +7 1.5 0 +7.5 1.5 0 +7 1 0.5 +7.5 1 0.5 +7 1.5 0.5 +7.5 1.5 0.5 +7.5 1 0 +8 1 0 +7.5 1.5 0 +8 1.5 0 +7.5 1 0.5 +8 1 0.5 +7.5 1.5 0.5 +8 1.5 0.5 +5 1.5 0 +5.5 1.5 0 +5 2 0 +5.5 2 0 +5 1.5 0.5 +5.5 1.5 0.5 +5 2 0.5 +5.5 2 0.5 +5.5 1.5 0 +6 1.5 0 +5.5 2 0 +6 2 0 +5.5 1.5 0.5 +6 1.5 0.5 +5.5 2 0.5 +6 2 0.5 +6 1.5 0 +6.5 1.5 0 +6 2 0 +6.5 2 0 +6 1.5 0.5 +6.5 1.5 0.5 +6 2 0.5 +6.5 2 0.5 +6.5 1.5 0 +7 1.5 0 +6.5 2 0 +7 2 0 +6.5 1.5 0.5 +7 1.5 0.5 +6.5 2 0.5 +7 2 0.5 +7 1.5 0 +7.5 1.5 0 +7 2 0 +7.5 2 0 +7 1.5 0.5 +7.5 1.5 0.5 +7 2 0.5 +7.5 2 0.5 +7.5 1.5 0 +8 1.5 0 +7.5 2 0 +8 2 0 +7.5 1.5 0.5 +8 1.5 0.5 +7.5 2 0.5 +8 2 0.5 +2 0 0.5 +2.5 0 0.5 +2 0.5 0.5 +2.5 0.5 0.5 +2 0 1 +2.5 0 1 +2 0.5 1 +2.5 0.5 1 +2.5 0 0.5 +3 0 0.5 +2.5 0.5 0.5 +3 0.5 0.5 +2.5 0 1 +3 0 1 +2.5 0.5 1 +3 0.5 1 +5 0 0.5 +5.5 0 0.5 +5 0.5 0.5 +5.5 0.5 0.5 +5 0 1 +5.5 0 1 +5 0.5 1 +5.5 0.5 1 +5.5 0 0.5 +6 0 0.5 +5.5 0.5 0.5 +6 0.5 0.5 +5.5 0 1 +6 0 1 +5.5 0.5 1 +6 0.5 1 +6 0 0.5 +6.5 0 0.5 +6 0.5 0.5 +6.5 0.5 0.5 +6 0 1 +6.5 0 1 +6 0.5 1 +6.5 0.5 1 +6.5 0 0.5 +7 0 0.5 +6.5 0.5 0.5 +7 0.5 0.5 +6.5 0 1 +7 0 1 +6.5 0.5 1 +7 0.5 1 +7 0 0.5 +7.5 0 0.5 +7 0.5 0.5 +7.5 0.5 0.5 +7 0 1 +7.5 0 1 +7 0.5 1 +7.5 0.5 1 +7.5 0 0.5 +8 0 0.5 +7.5 0.5 0.5 +8 0.5 0.5 +7.5 0 1 +8 0 1 +7.5 0.5 1 +8 0.5 1 +2 0.5 0.5 +2.5 0.5 0.5 +2 1 0.5 +2.5 1 0.5 +2 0.5 1 +2.5 0.5 1 +2 1 1 +2.5 1 1 +2.5 0.5 0.5 +3 0.5 0.5 +2.5 1 0.5 +3 1 0.5 +2.5 0.5 1 +3 0.5 1 +2.5 1 1 +3 1 1 +5 0.5 0.5 +5.5 0.5 0.5 +5 1 0.5 +5.5 1 0.5 +5 0.5 1 +5.5 0.5 1 +5 1 1 +5.5 1 1 +5.5 0.5 0.5 +6 0.5 0.5 +5.5 1 0.5 +6 1 0.5 +5.5 0.5 1 +6 0.5 1 +5.5 1 1 +6 1 1 +6 0.5 0.5 +6.5 0.5 0.5 +6 1 0.5 +6.5 1 0.5 +6 0.5 1 +6.5 0.5 1 +6 1 1 +6.5 1 1 +6.5 0.5 0.5 +7 0.5 0.5 +6.5 1 0.5 +7 1 0.5 +6.5 0.5 1 +7 0.5 1 +6.5 1 1 +7 1 1 +7 0.5 0.5 +7.5 0.5 0.5 +7 1 0.5 +7.5 1 0.5 +7 0.5 1 +7.5 0.5 1 +7 1 1 +7.5 1 1 +7.5 0.5 0.5 +8 0.5 0.5 +7.5 1 0.5 +8 1 0.5 +7.5 0.5 1 +8 0.5 1 +7.5 1 1 +8 1 1 +5 1 0.5 +5.5 1 0.5 +5 1.5 0.5 +5.5 1.5 0.5 +5 1 1 +5.5 1 1 +5 1.5 1 +5.5 1.5 1 +5.5 1 0.5 +6 1 0.5 +5.5 1.5 0.5 +6 1.5 0.5 +5.5 1 1 +6 1 1 +5.5 1.5 1 +6 1.5 1 +6 1 0.5 +6.5 1 0.5 +6 1.5 0.5 +6.5 1.5 0.5 +6 1 1 +6.5 1 1 +6 1.5 1 +6.5 1.5 1 +6.5 1 0.5 +7 1 0.5 +6.5 1.5 0.5 +7 1.5 0.5 +6.5 1 1 +7 1 1 +6.5 1.5 1 +7 1.5 1 +7 1 0.5 +7.5 1 0.5 +7 1.5 0.5 +7.5 1.5 0.5 +7 1 1 +7.5 1 1 +7 1.5 1 +7.5 1.5 1 +7.5 1 0.5 +8 1 0.5 +7.5 1.5 0.5 +8 1.5 0.5 +7.5 1 1 +8 1 1 +7.5 1.5 1 +8 1.5 1 +5 1.5 0.5 +5.5 1.5 0.5 +5 2 0.5 +5.5 2 0.5 +5 1.5 1 +5.5 1.5 1 +5 2 1 +5.5 2 1 +5.5 1.5 0.5 +6 1.5 0.5 +5.5 2 0.5 +6 2 0.5 +5.5 1.5 1 +6 1.5 1 +5.5 2 1 +6 2 1 +6 1.5 0.5 +6.5 1.5 0.5 +6 2 0.5 +6.5 2 0.5 +6 1.5 1 +6.5 1.5 1 +6 2 1 +6.5 2 1 +6.5 1.5 0.5 +7 1.5 0.5 +6.5 2 0.5 +7 2 0.5 +6.5 1.5 1 +7 1.5 1 +6.5 2 1 +7 2 1 +7 1.5 0.5 +7.5 1.5 0.5 +7 2 0.5 +7.5 2 0.5 +7 1.5 1 +7.5 1.5 1 +7 2 1 +7.5 2 1 +7.5 1.5 0.5 +8 1.5 0.5 +7.5 2 0.5 +8 2 0.5 +7.5 1.5 1 +8 1.5 1 +7.5 2 1 +8 2 1 +6 0 0 +6.25 0 0 +6 0.25 0 +6.25 0.25 0 +6 0 0.25 +6.25 0 0.25 +6 0.25 0.25 +6.25 0.25 0.25 +6.25 0 0 +6.5 0 0 +6.25 0.25 0 +6.5 0.25 0 +6.25 0 0.25 +6.5 0 0.25 +6.25 0.25 0.25 +6.5 0.25 0.25 +6.5 0 0 +6.75 0 0 +6.5 0.25 0 +6.75 0.25 0 +6.5 0 0.25 +6.75 0 0.25 +6.5 0.25 0.25 +6.75 0.25 0.25 +6.75 0 0 +7 0 0 +6.75 0.25 0 +7 0.25 0 +6.75 0 0.25 +7 0 0.25 +6.75 0.25 0.25 +7 0.25 0.25 +6 0.25 0 +6.25 0.25 0 +6 0.5 0 +6.25 0.5 0 +6 0.25 0.25 +6.25 0.25 0.25 +6 0.5 0.25 +6.25 0.5 0.25 +6.25 0.25 0 +6.5 0.25 0 +6.25 0.5 0 +6.5 0.5 0 +6.25 0.25 0.25 +6.5 0.25 0.25 +6.25 0.5 0.25 +6.5 0.5 0.25 +6.5 0.25 0 +6.75 0.25 0 +6.5 0.5 0 +6.75 0.5 0 +6.5 0.25 0.25 +6.75 0.25 0.25 +6.5 0.5 0.25 +6.75 0.5 0.25 +6.75 0.25 0 +7 0.25 0 +6.75 0.5 0 +7 0.5 0 +6.75 0.25 0.25 +7 0.25 0.25 +6.75 0.5 0.25 +7 0.5 0.25 +6 0 0.25 +6.25 0 0.25 +6 0.25 0.25 +6.25 0.25 0.25 +6 0 0.5 +6.25 0 0.5 +6 0.25 0.5 +6.25 0.25 0.5 +6.25 0 0.25 +6.5 0 0.25 +6.25 0.25 0.25 +6.5 0.25 0.25 +6.25 0 0.5 +6.5 0 0.5 +6.25 0.25 0.5 +6.5 0.25 0.5 +6.5 0 0.25 +6.75 0 0.25 +6.5 0.25 0.25 +6.75 0.25 0.25 +6.5 0 0.5 +6.75 0 0.5 +6.5 0.25 0.5 +6.75 0.25 0.5 +6.75 0 0.25 +7 0 0.25 +6.75 0.25 0.25 +7 0.25 0.25 +6.75 0 0.5 +7 0 0.5 +6.75 0.25 0.5 +7 0.25 0.5 +6 0.25 0.25 +6.25 0.25 0.25 +6 0.5 0.25 +6.25 0.5 0.25 +6 0.25 0.5 +6.25 0.25 0.5 +6 0.5 0.5 +6.25 0.5 0.5 +6.25 0.25 0.25 +6.5 0.25 0.25 +6.25 0.5 0.25 +6.5 0.5 0.25 +6.25 0.25 0.5 +6.5 0.25 0.5 +6.25 0.5 0.5 +6.5 0.5 0.5 +6.5 0.25 0.25 +6.75 0.25 0.25 +6.5 0.5 0.25 +6.75 0.5 0.25 +6.5 0.25 0.5 +6.75 0.25 0.5 +6.5 0.5 0.5 +6.75 0.5 0.5 +6.75 0.25 0.25 +7 0.25 0.25 +6.75 0.5 0.25 +7 0.5 0.25 +6.75 0.25 0.5 +7 0.25 0.5 +6.75 0.5 0.5 +7 0.5 0.5 +CELLS 90 810 +8 0 1 2 3 4 5 6 7 +8 8 9 10 11 12 13 14 15 +8 16 17 18 19 20 21 22 23 +8 24 25 26 27 28 29 30 31 +8 32 33 34 35 36 37 38 39 +8 40 41 42 43 44 45 46 47 +8 48 49 50 51 52 53 54 55 +8 56 57 58 59 60 61 62 63 +8 64 65 66 67 68 69 70 71 +8 72 73 74 75 76 77 78 79 +8 80 81 82 83 84 85 86 87 +8 88 89 90 91 92 93 94 95 +8 96 97 98 99 100 101 102 103 +8 104 105 106 107 108 109 110 111 +8 112 113 114 115 116 117 118 119 +8 120 121 122 123 124 125 126 127 +8 128 129 130 131 132 133 134 135 +8 136 137 138 139 140 141 142 143 +8 144 145 146 147 148 149 150 151 +8 152 153 154 155 156 157 158 159 +8 160 161 162 163 164 165 166 167 +8 168 169 170 171 172 173 174 175 +8 176 177 178 179 180 181 182 183 +8 184 185 186 187 188 189 190 191 +8 192 193 194 195 196 197 198 199 +8 200 201 202 203 204 205 206 207 +8 208 209 210 211 212 213 214 215 +8 216 217 218 219 220 221 222 223 +8 224 225 226 227 228 229 230 231 +8 232 233 234 235 236 237 238 239 +8 240 241 242 243 244 245 246 247 +8 248 249 250 251 252 253 254 255 +8 256 257 258 259 260 261 262 263 +8 264 265 266 267 268 269 270 271 +8 272 273 274 275 276 277 278 279 +8 280 281 282 283 284 285 286 287 +8 288 289 290 291 292 293 294 295 +8 296 297 298 299 300 301 302 303 +8 304 305 306 307 308 309 310 311 +8 312 313 314 315 316 317 318 319 +8 320 321 322 323 324 325 326 327 +8 328 329 330 331 332 333 334 335 +8 336 337 338 339 340 341 342 343 +8 344 345 346 347 348 349 350 351 +8 352 353 354 355 356 357 358 359 +8 360 361 362 363 364 365 366 367 +8 368 369 370 371 372 373 374 375 +8 376 377 378 379 380 381 382 383 +8 384 385 386 387 388 389 390 391 +8 392 393 394 395 396 397 398 399 +8 400 401 402 403 404 405 406 407 +8 408 409 410 411 412 413 414 415 +8 416 417 418 419 420 421 422 423 +8 424 425 426 427 428 429 430 431 +8 432 433 434 435 436 437 438 439 +8 440 441 442 443 444 445 446 447 +8 448 449 450 451 452 453 454 455 +8 456 457 458 459 460 461 462 463 +8 464 465 466 467 468 469 470 471 +8 472 473 474 475 476 477 478 479 +8 480 481 482 483 484 485 486 487 +8 488 489 490 491 492 493 494 495 +8 496 497 498 499 500 501 502 503 +8 504 505 506 507 508 509 510 511 +8 512 513 514 515 516 517 518 519 +8 520 521 522 523 524 525 526 527 +8 528 529 530 531 532 533 534 535 +8 536 537 538 539 540 541 542 543 +8 544 545 546 547 548 549 550 551 +8 552 553 554 555 556 557 558 559 +8 560 561 562 563 564 565 566 567 +8 568 569 570 571 572 573 574 575 +8 576 577 578 579 580 581 582 583 +8 584 585 586 587 588 589 590 591 +8 592 593 594 595 596 597 598 599 +8 600 601 602 603 604 605 606 607 +8 608 609 610 611 612 613 614 615 +8 616 617 618 619 620 621 622 623 +8 624 625 626 627 628 629 630 631 +8 632 633 634 635 636 637 638 639 +8 640 641 642 643 644 645 646 647 +8 648 649 650 651 652 653 654 655 +8 656 657 658 659 660 661 662 663 +8 664 665 666 667 668 669 670 671 +8 672 673 674 675 676 677 678 679 +8 680 681 682 683 684 685 686 687 +8 688 689 690 691 692 693 694 695 +8 696 697 698 699 700 701 702 703 +8 704 705 706 707 708 709 710 711 +8 712 713 714 715 716 717 718 719 +CELL_TYPES 90 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +CELL_DATA 90 +SCALARS id int 1 +LOOKUP_TABLE default +1 +2 +4 +5 +9 +10 +11 +12 +13 +14 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +32 +33 +38 +39 +42 +43 +50 +51 +56 +57 +58 +59 +60 +61 +74 +75 +76 +77 +78 +79 +92 +93 +94 +95 +96 +97 +140 +141 +146 +147 +148 +149 +150 +151 +158 +159 +164 +165 +166 +167 +168 +169 +182 +183 +184 +185 +186 +187 +200 +201 +202 +203 +204 +205 +268 +269 +270 +271 +304 +305 +306 +307 +700 +701 +702 +703 +736 +737 +738 +739 diff --git a/mini-apps/simple-grid-test/testcase_2.vtk b/mini-apps/simple-grid-test/testcase_2.vtk new file mode 100644 index 000000000..4066399b3 --- /dev/null +++ b/mini-apps/simple-grid-test/testcase_2.vtk @@ -0,0 +1,1385 @@ +# vtk DataFile Version 2.0 +Cartesian cell refinable grid +ASCII +DATASET UNSTRUCTURED_GRID +POINTS 1000 float +0 2 0 +1 2 0 +0 3 0 +1 3 0 +0 2 1 +1 2 1 +0 3 1 +1 3 1 +1 2 0 +2 2 0 +1 3 0 +2 3 0 +1 2 1 +2 2 1 +1 3 1 +2 3 1 +2 2 0 +3 2 0 +2 3 0 +3 3 0 +2 2 1 +3 2 1 +2 3 1 +3 3 1 +0 2 1 +1 2 1 +0 3 1 +1 3 1 +0 2 2 +1 2 2 +0 3 2 +1 3 2 +1 2 1 +2 2 1 +1 3 1 +2 3 1 +1 2 2 +2 2 2 +1 3 2 +2 3 2 +2 2 1 +3 2 1 +2 3 1 +3 3 1 +2 2 2 +3 2 2 +2 3 2 +3 3 2 +0 0 2 +1 0 2 +0 1 2 +1 1 2 +0 0 3 +1 0 3 +0 1 3 +1 1 3 +1 0 2 +2 0 2 +1 1 2 +2 1 2 +1 0 3 +2 0 3 +1 1 3 +2 1 3 +2 0 2 +3 0 2 +2 1 2 +3 1 2 +2 0 3 +3 0 3 +2 1 3 +3 1 3 +0 1 2 +1 1 2 +0 2 2 +1 2 2 +0 1 3 +1 1 3 +0 2 3 +1 2 3 +1 1 2 +2 1 2 +1 2 2 +2 2 2 +1 1 3 +2 1 3 +1 2 3 +2 2 3 +2 1 2 +3 1 2 +2 2 2 +3 2 2 +2 1 3 +3 1 3 +2 2 3 +3 2 3 +0 2 2 +1 2 2 +0 3 2 +1 3 2 +0 2 3 +1 2 3 +0 3 3 +1 3 3 +1 2 2 +2 2 2 +1 3 2 +2 3 2 +1 2 3 +2 2 3 +1 3 3 +2 3 3 +2 2 2 +3 2 2 +2 3 2 +3 3 2 +2 2 3 +3 2 3 +2 3 3 +3 3 3 +0 0 0 +0.5 0 0 +0 0.5 0 +0.5 0.5 0 +0 0 0.5 +0.5 0 0.5 +0 0.5 0.5 +0.5 0.5 0.5 +0.5 0 0 +1 0 0 +0.5 0.5 0 +1 0.5 0 +0.5 0 0.5 +1 0 0.5 +0.5 0.5 0.5 +1 0.5 0.5 +2 0 0 +2.5 0 0 +2 0.5 0 +2.5 0.5 0 +2 0 0.5 +2.5 0 0.5 +2 0.5 0.5 +2.5 0.5 0.5 +2.5 0 0 +3 0 0 +2.5 0.5 0 +3 0.5 0 +2.5 0 0.5 +3 0 0.5 +2.5 0.5 0.5 +3 0.5 0.5 +0 0.5 0 +0.5 0.5 0 +0 1 0 +0.5 1 0 +0 0.5 0.5 +0.5 0.5 0.5 +0 1 0.5 +0.5 1 0.5 +0.5 0.5 0 +1 0.5 0 +0.5 1 0 +1 1 0 +0.5 0.5 0.5 +1 0.5 0.5 +0.5 1 0.5 +1 1 0.5 +1 0.5 0 +1.5 0.5 0 +1 1 0 +1.5 1 0 +1 0.5 0.5 +1.5 0.5 0.5 +1 1 0.5 +1.5 1 0.5 +1.5 0.5 0 +2 0.5 0 +1.5 1 0 +2 1 0 +1.5 0.5 0.5 +2 0.5 0.5 +1.5 1 0.5 +2 1 0.5 +2 0.5 0 +2.5 0.5 0 +2 1 0 +2.5 1 0 +2 0.5 0.5 +2.5 0.5 0.5 +2 1 0.5 +2.5 1 0.5 +2.5 0.5 0 +3 0.5 0 +2.5 1 0 +3 1 0 +2.5 0.5 0.5 +3 0.5 0.5 +2.5 1 0.5 +3 1 0.5 +0 1 0 +0.5 1 0 +0 1.5 0 +0.5 1.5 0 +0 1 0.5 +0.5 1 0.5 +0 1.5 0.5 +0.5 1.5 0.5 +0.5 1 0 +1 1 0 +0.5 1.5 0 +1 1.5 0 +0.5 1 0.5 +1 1 0.5 +0.5 1.5 0.5 +1 1.5 0.5 +1 1 0 +1.5 1 0 +1 1.5 0 +1.5 1.5 0 +1 1 0.5 +1.5 1 0.5 +1 1.5 0.5 +1.5 1.5 0.5 +1.5 1 0 +2 1 0 +1.5 1.5 0 +2 1.5 0 +1.5 1 0.5 +2 1 0.5 +1.5 1.5 0.5 +2 1.5 0.5 +2 1 0 +2.5 1 0 +2 1.5 0 +2.5 1.5 0 +2 1 0.5 +2.5 1 0.5 +2 1.5 0.5 +2.5 1.5 0.5 +2.5 1 0 +3 1 0 +2.5 1.5 0 +3 1.5 0 +2.5 1 0.5 +3 1 0.5 +2.5 1.5 0.5 +3 1.5 0.5 +0 1.5 0 +0.5 1.5 0 +0 2 0 +0.5 2 0 +0 1.5 0.5 +0.5 1.5 0.5 +0 2 0.5 +0.5 2 0.5 +0.5 1.5 0 +1 1.5 0 +0.5 2 0 +1 2 0 +0.5 1.5 0.5 +1 1.5 0.5 +0.5 2 0.5 +1 2 0.5 +1 1.5 0 +1.5 1.5 0 +1 2 0 +1.5 2 0 +1 1.5 0.5 +1.5 1.5 0.5 +1 2 0.5 +1.5 2 0.5 +1.5 1.5 0 +2 1.5 0 +1.5 2 0 +2 2 0 +1.5 1.5 0.5 +2 1.5 0.5 +1.5 2 0.5 +2 2 0.5 +2 1.5 0 +2.5 1.5 0 +2 2 0 +2.5 2 0 +2 1.5 0.5 +2.5 1.5 0.5 +2 2 0.5 +2.5 2 0.5 +2.5 1.5 0 +3 1.5 0 +2.5 2 0 +3 2 0 +2.5 1.5 0.5 +3 1.5 0.5 +2.5 2 0.5 +3 2 0.5 +0 0 0.5 +0.5 0 0.5 +0 0.5 0.5 +0.5 0.5 0.5 +0 0 1 +0.5 0 1 +0 0.5 1 +0.5 0.5 1 +0.5 0 0.5 +1 0 0.5 +0.5 0.5 0.5 +1 0.5 0.5 +0.5 0 1 +1 0 1 +0.5 0.5 1 +1 0.5 1 +1 0 0.5 +1.5 0 0.5 +1 0.5 0.5 +1.5 0.5 0.5 +1 0 1 +1.5 0 1 +1 0.5 1 +1.5 0.5 1 +1.5 0 0.5 +2 0 0.5 +1.5 0.5 0.5 +2 0.5 0.5 +1.5 0 1 +2 0 1 +1.5 0.5 1 +2 0.5 1 +2 0 0.5 +2.5 0 0.5 +2 0.5 0.5 +2.5 0.5 0.5 +2 0 1 +2.5 0 1 +2 0.5 1 +2.5 0.5 1 +2.5 0 0.5 +3 0 0.5 +2.5 0.5 0.5 +3 0.5 0.5 +2.5 0 1 +3 0 1 +2.5 0.5 1 +3 0.5 1 +0 0.5 0.5 +0.5 0.5 0.5 +0 1 0.5 +0.5 1 0.5 +0 0.5 1 +0.5 0.5 1 +0 1 1 +0.5 1 1 +0.5 0.5 0.5 +1 0.5 0.5 +0.5 1 0.5 +1 1 0.5 +0.5 0.5 1 +1 0.5 1 +0.5 1 1 +1 1 1 +1 0.5 0.5 +1.5 0.5 0.5 +1 1 0.5 +1.5 1 0.5 +1 0.5 1 +1.5 0.5 1 +1 1 1 +1.5 1 1 +1.5 0.5 0.5 +2 0.5 0.5 +1.5 1 0.5 +2 1 0.5 +1.5 0.5 1 +2 0.5 1 +1.5 1 1 +2 1 1 +2 0.5 0.5 +2.5 0.5 0.5 +2 1 0.5 +2.5 1 0.5 +2 0.5 1 +2.5 0.5 1 +2 1 1 +2.5 1 1 +2.5 0.5 0.5 +3 0.5 0.5 +2.5 1 0.5 +3 1 0.5 +2.5 0.5 1 +3 0.5 1 +2.5 1 1 +3 1 1 +0 1 0.5 +0.5 1 0.5 +0 1.5 0.5 +0.5 1.5 0.5 +0 1 1 +0.5 1 1 +0 1.5 1 +0.5 1.5 1 +0.5 1 0.5 +1 1 0.5 +0.5 1.5 0.5 +1 1.5 0.5 +0.5 1 1 +1 1 1 +0.5 1.5 1 +1 1.5 1 +1 1 0.5 +1.5 1 0.5 +1 1.5 0.5 +1.5 1.5 0.5 +1 1 1 +1.5 1 1 +1 1.5 1 +1.5 1.5 1 +1.5 1 0.5 +2 1 0.5 +1.5 1.5 0.5 +2 1.5 0.5 +1.5 1 1 +2 1 1 +1.5 1.5 1 +2 1.5 1 +2 1 0.5 +2.5 1 0.5 +2 1.5 0.5 +2.5 1.5 0.5 +2 1 1 +2.5 1 1 +2 1.5 1 +2.5 1.5 1 +2.5 1 0.5 +3 1 0.5 +2.5 1.5 0.5 +3 1.5 0.5 +2.5 1 1 +3 1 1 +2.5 1.5 1 +3 1.5 1 +0 1.5 0.5 +0.5 1.5 0.5 +0 2 0.5 +0.5 2 0.5 +0 1.5 1 +0.5 1.5 1 +0 2 1 +0.5 2 1 +0.5 1.5 0.5 +1 1.5 0.5 +0.5 2 0.5 +1 2 0.5 +0.5 1.5 1 +1 1.5 1 +0.5 2 1 +1 2 1 +1 1.5 0.5 +1.5 1.5 0.5 +1 2 0.5 +1.5 2 0.5 +1 1.5 1 +1.5 1.5 1 +1 2 1 +1.5 2 1 +1.5 1.5 0.5 +2 1.5 0.5 +1.5 2 0.5 +2 2 0.5 +1.5 1.5 1 +2 1.5 1 +1.5 2 1 +2 2 1 +2 1.5 0.5 +2.5 1.5 0.5 +2 2 0.5 +2.5 2 0.5 +2 1.5 1 +2.5 1.5 1 +2 2 1 +2.5 2 1 +2.5 1.5 0.5 +3 1.5 0.5 +2.5 2 0.5 +3 2 0.5 +2.5 1.5 1 +3 1.5 1 +2.5 2 1 +3 2 1 +0 0 1 +0.5 0 1 +0 0.5 1 +0.5 0.5 1 +0 0 1.5 +0.5 0 1.5 +0 0.5 1.5 +0.5 0.5 1.5 +0.5 0 1 +1 0 1 +0.5 0.5 1 +1 0.5 1 +0.5 0 1.5 +1 0 1.5 +0.5 0.5 1.5 +1 0.5 1.5 +1 0 1 +1.5 0 1 +1 0.5 1 +1.5 0.5 1 +1 0 1.5 +1.5 0 1.5 +1 0.5 1.5 +1.5 0.5 1.5 +1.5 0 1 +2 0 1 +1.5 0.5 1 +2 0.5 1 +1.5 0 1.5 +2 0 1.5 +1.5 0.5 1.5 +2 0.5 1.5 +2 0 1 +2.5 0 1 +2 0.5 1 +2.5 0.5 1 +2 0 1.5 +2.5 0 1.5 +2 0.5 1.5 +2.5 0.5 1.5 +2.5 0 1 +3 0 1 +2.5 0.5 1 +3 0.5 1 +2.5 0 1.5 +3 0 1.5 +2.5 0.5 1.5 +3 0.5 1.5 +0 0.5 1 +0.5 0.5 1 +0 1 1 +0.5 1 1 +0 0.5 1.5 +0.5 0.5 1.5 +0 1 1.5 +0.5 1 1.5 +0.5 0.5 1 +1 0.5 1 +0.5 1 1 +1 1 1 +0.5 0.5 1.5 +1 0.5 1.5 +0.5 1 1.5 +1 1 1.5 +1 0.5 1 +1.5 0.5 1 +1 1 1 +1.5 1 1 +1 0.5 1.5 +1.5 0.5 1.5 +1 1 1.5 +1.5 1 1.5 +1.5 0.5 1 +2 0.5 1 +1.5 1 1 +2 1 1 +1.5 0.5 1.5 +2 0.5 1.5 +1.5 1 1.5 +2 1 1.5 +2 0.5 1 +2.5 0.5 1 +2 1 1 +2.5 1 1 +2 0.5 1.5 +2.5 0.5 1.5 +2 1 1.5 +2.5 1 1.5 +2.5 0.5 1 +3 0.5 1 +2.5 1 1 +3 1 1 +2.5 0.5 1.5 +3 0.5 1.5 +2.5 1 1.5 +3 1 1.5 +0 1 1 +0.5 1 1 +0 1.5 1 +0.5 1.5 1 +0 1 1.5 +0.5 1 1.5 +0 1.5 1.5 +0.5 1.5 1.5 +0.5 1 1 +1 1 1 +0.5 1.5 1 +1 1.5 1 +0.5 1 1.5 +1 1 1.5 +0.5 1.5 1.5 +1 1.5 1.5 +1 1 1 +1.5 1 1 +1 1.5 1 +1.5 1.5 1 +1 1 1.5 +1.5 1 1.5 +1 1.5 1.5 +1.5 1.5 1.5 +1.5 1 1 +2 1 1 +1.5 1.5 1 +2 1.5 1 +1.5 1 1.5 +2 1 1.5 +1.5 1.5 1.5 +2 1.5 1.5 +2 1 1 +2.5 1 1 +2 1.5 1 +2.5 1.5 1 +2 1 1.5 +2.5 1 1.5 +2 1.5 1.5 +2.5 1.5 1.5 +2.5 1 1 +3 1 1 +2.5 1.5 1 +3 1.5 1 +2.5 1 1.5 +3 1 1.5 +2.5 1.5 1.5 +3 1.5 1.5 +0 1.5 1 +0.5 1.5 1 +0 2 1 +0.5 2 1 +0 1.5 1.5 +0.5 1.5 1.5 +0 2 1.5 +0.5 2 1.5 +0.5 1.5 1 +1 1.5 1 +0.5 2 1 +1 2 1 +0.5 1.5 1.5 +1 1.5 1.5 +0.5 2 1.5 +1 2 1.5 +1 1.5 1 +1.5 1.5 1 +1 2 1 +1.5 2 1 +1 1.5 1.5 +1.5 1.5 1.5 +1 2 1.5 +1.5 2 1.5 +1.5 1.5 1 +2 1.5 1 +1.5 2 1 +2 2 1 +1.5 1.5 1.5 +2 1.5 1.5 +1.5 2 1.5 +2 2 1.5 +2 1.5 1 +2.5 1.5 1 +2 2 1 +2.5 2 1 +2 1.5 1.5 +2.5 1.5 1.5 +2 2 1.5 +2.5 2 1.5 +2.5 1.5 1 +3 1.5 1 +2.5 2 1 +3 2 1 +2.5 1.5 1.5 +3 1.5 1.5 +2.5 2 1.5 +3 2 1.5 +0 0 1.5 +0.5 0 1.5 +0 0.5 1.5 +0.5 0.5 1.5 +0 0 2 +0.5 0 2 +0 0.5 2 +0.5 0.5 2 +0.5 0 1.5 +1 0 1.5 +0.5 0.5 1.5 +1 0.5 1.5 +0.5 0 2 +1 0 2 +0.5 0.5 2 +1 0.5 2 +1 0 1.5 +1.5 0 1.5 +1 0.5 1.5 +1.5 0.5 1.5 +1 0 2 +1.5 0 2 +1 0.5 2 +1.5 0.5 2 +1.5 0 1.5 +2 0 1.5 +1.5 0.5 1.5 +2 0.5 1.5 +1.5 0 2 +2 0 2 +1.5 0.5 2 +2 0.5 2 +2 0 1.5 +2.5 0 1.5 +2 0.5 1.5 +2.5 0.5 1.5 +2 0 2 +2.5 0 2 +2 0.5 2 +2.5 0.5 2 +2.5 0 1.5 +3 0 1.5 +2.5 0.5 1.5 +3 0.5 1.5 +2.5 0 2 +3 0 2 +2.5 0.5 2 +3 0.5 2 +0 0.5 1.5 +0.5 0.5 1.5 +0 1 1.5 +0.5 1 1.5 +0 0.5 2 +0.5 0.5 2 +0 1 2 +0.5 1 2 +0.5 0.5 1.5 +1 0.5 1.5 +0.5 1 1.5 +1 1 1.5 +0.5 0.5 2 +1 0.5 2 +0.5 1 2 +1 1 2 +1 0.5 1.5 +1.5 0.5 1.5 +1 1 1.5 +1.5 1 1.5 +1 0.5 2 +1.5 0.5 2 +1 1 2 +1.5 1 2 +1.5 0.5 1.5 +2 0.5 1.5 +1.5 1 1.5 +2 1 1.5 +1.5 0.5 2 +2 0.5 2 +1.5 1 2 +2 1 2 +2 0.5 1.5 +2.5 0.5 1.5 +2 1 1.5 +2.5 1 1.5 +2 0.5 2 +2.5 0.5 2 +2 1 2 +2.5 1 2 +2.5 0.5 1.5 +3 0.5 1.5 +2.5 1 1.5 +3 1 1.5 +2.5 0.5 2 +3 0.5 2 +2.5 1 2 +3 1 2 +0 1 1.5 +0.5 1 1.5 +0 1.5 1.5 +0.5 1.5 1.5 +0 1 2 +0.5 1 2 +0 1.5 2 +0.5 1.5 2 +0.5 1 1.5 +1 1 1.5 +0.5 1.5 1.5 +1 1.5 1.5 +0.5 1 2 +1 1 2 +0.5 1.5 2 +1 1.5 2 +1 1 1.5 +1.5 1 1.5 +1 1.5 1.5 +1.5 1.5 1.5 +1 1 2 +1.5 1 2 +1 1.5 2 +1.5 1.5 2 +1.5 1 1.5 +2 1 1.5 +1.5 1.5 1.5 +2 1.5 1.5 +1.5 1 2 +2 1 2 +1.5 1.5 2 +2 1.5 2 +2 1 1.5 +2.5 1 1.5 +2 1.5 1.5 +2.5 1.5 1.5 +2 1 2 +2.5 1 2 +2 1.5 2 +2.5 1.5 2 +2.5 1 1.5 +3 1 1.5 +2.5 1.5 1.5 +3 1.5 1.5 +2.5 1 2 +3 1 2 +2.5 1.5 2 +3 1.5 2 +0 1.5 1.5 +0.5 1.5 1.5 +0 2 1.5 +0.5 2 1.5 +0 1.5 2 +0.5 1.5 2 +0 2 2 +0.5 2 2 +0.5 1.5 1.5 +1 1.5 1.5 +0.5 2 1.5 +1 2 1.5 +0.5 1.5 2 +1 1.5 2 +0.5 2 2 +1 2 2 +1 1.5 1.5 +1.5 1.5 1.5 +1 2 1.5 +1.5 2 1.5 +1 1.5 2 +1.5 1.5 2 +1 2 2 +1.5 2 2 +1.5 1.5 1.5 +2 1.5 1.5 +1.5 2 1.5 +2 2 1.5 +1.5 1.5 2 +2 1.5 2 +1.5 2 2 +2 2 2 +2 1.5 1.5 +2.5 1.5 1.5 +2 2 1.5 +2.5 2 1.5 +2 1.5 2 +2.5 1.5 2 +2 2 2 +2.5 2 2 +2.5 1.5 1.5 +3 1.5 1.5 +2.5 2 1.5 +3 2 1.5 +2.5 1.5 2 +3 1.5 2 +2.5 2 2 +3 2 2 +1 0 0 +1.25 0 0 +1 0.25 0 +1.25 0.25 0 +1 0 0.25 +1.25 0 0.25 +1 0.25 0.25 +1.25 0.25 0.25 +1.25 0 0 +1.5 0 0 +1.25 0.25 0 +1.5 0.25 0 +1.25 0 0.25 +1.5 0 0.25 +1.25 0.25 0.25 +1.5 0.25 0.25 +1.5 0 0 +1.75 0 0 +1.5 0.25 0 +1.75 0.25 0 +1.5 0 0.25 +1.75 0 0.25 +1.5 0.25 0.25 +1.75 0.25 0.25 +1.75 0 0 +2 0 0 +1.75 0.25 0 +2 0.25 0 +1.75 0 0.25 +2 0 0.25 +1.75 0.25 0.25 +2 0.25 0.25 +1 0.25 0 +1.25 0.25 0 +1 0.5 0 +1.25 0.5 0 +1 0.25 0.25 +1.25 0.25 0.25 +1 0.5 0.25 +1.25 0.5 0.25 +1.25 0.25 0 +1.5 0.25 0 +1.25 0.5 0 +1.5 0.5 0 +1.25 0.25 0.25 +1.5 0.25 0.25 +1.25 0.5 0.25 +1.5 0.5 0.25 +1.5 0.25 0 +1.75 0.25 0 +1.5 0.5 0 +1.75 0.5 0 +1.5 0.25 0.25 +1.75 0.25 0.25 +1.5 0.5 0.25 +1.75 0.5 0.25 +1.75 0.25 0 +2 0.25 0 +1.75 0.5 0 +2 0.5 0 +1.75 0.25 0.25 +2 0.25 0.25 +1.75 0.5 0.25 +2 0.5 0.25 +1 0 0.25 +1.25 0 0.25 +1 0.25 0.25 +1.25 0.25 0.25 +1 0 0.5 +1.25 0 0.5 +1 0.25 0.5 +1.25 0.25 0.5 +1.25 0 0.25 +1.5 0 0.25 +1.25 0.25 0.25 +1.5 0.25 0.25 +1.25 0 0.5 +1.5 0 0.5 +1.25 0.25 0.5 +1.5 0.25 0.5 +1.5 0 0.25 +1.75 0 0.25 +1.5 0.25 0.25 +1.75 0.25 0.25 +1.5 0 0.5 +1.75 0 0.5 +1.5 0.25 0.5 +1.75 0.25 0.5 +1.75 0 0.25 +2 0 0.25 +1.75 0.25 0.25 +2 0.25 0.25 +1.75 0 0.5 +2 0 0.5 +1.75 0.25 0.5 +2 0.25 0.5 +1 0.25 0.25 +1.25 0.25 0.25 +1 0.5 0.25 +1.25 0.5 0.25 +1 0.25 0.5 +1.25 0.25 0.5 +1 0.5 0.5 +1.25 0.5 0.5 +1.25 0.25 0.25 +1.5 0.25 0.25 +1.25 0.5 0.25 +1.5 0.5 0.25 +1.25 0.25 0.5 +1.5 0.25 0.5 +1.25 0.5 0.5 +1.5 0.5 0.5 +1.5 0.25 0.25 +1.75 0.25 0.25 +1.5 0.5 0.25 +1.75 0.5 0.25 +1.5 0.25 0.5 +1.75 0.25 0.5 +1.5 0.5 0.5 +1.75 0.5 0.5 +1.75 0.25 0.25 +2 0.25 0.25 +1.75 0.5 0.25 +2 0.5 0.25 +1.75 0.25 0.5 +2 0.25 0.5 +1.75 0.5 0.5 +2 0.5 0.5 +CELLS 125 1125 +8 0 1 2 3 4 5 6 7 +8 8 9 10 11 12 13 14 15 +8 16 17 18 19 20 21 22 23 +8 24 25 26 27 28 29 30 31 +8 32 33 34 35 36 37 38 39 +8 40 41 42 43 44 45 46 47 +8 48 49 50 51 52 53 54 55 +8 56 57 58 59 60 61 62 63 +8 64 65 66 67 68 69 70 71 +8 72 73 74 75 76 77 78 79 +8 80 81 82 83 84 85 86 87 +8 88 89 90 91 92 93 94 95 +8 96 97 98 99 100 101 102 103 +8 104 105 106 107 108 109 110 111 +8 112 113 114 115 116 117 118 119 +8 120 121 122 123 124 125 126 127 +8 128 129 130 131 132 133 134 135 +8 136 137 138 139 140 141 142 143 +8 144 145 146 147 148 149 150 151 +8 152 153 154 155 156 157 158 159 +8 160 161 162 163 164 165 166 167 +8 168 169 170 171 172 173 174 175 +8 176 177 178 179 180 181 182 183 +8 184 185 186 187 188 189 190 191 +8 192 193 194 195 196 197 198 199 +8 200 201 202 203 204 205 206 207 +8 208 209 210 211 212 213 214 215 +8 216 217 218 219 220 221 222 223 +8 224 225 226 227 228 229 230 231 +8 232 233 234 235 236 237 238 239 +8 240 241 242 243 244 245 246 247 +8 248 249 250 251 252 253 254 255 +8 256 257 258 259 260 261 262 263 +8 264 265 266 267 268 269 270 271 +8 272 273 274 275 276 277 278 279 +8 280 281 282 283 284 285 286 287 +8 288 289 290 291 292 293 294 295 +8 296 297 298 299 300 301 302 303 +8 304 305 306 307 308 309 310 311 +8 312 313 314 315 316 317 318 319 +8 320 321 322 323 324 325 326 327 +8 328 329 330 331 332 333 334 335 +8 336 337 338 339 340 341 342 343 +8 344 345 346 347 348 349 350 351 +8 352 353 354 355 356 357 358 359 +8 360 361 362 363 364 365 366 367 +8 368 369 370 371 372 373 374 375 +8 376 377 378 379 380 381 382 383 +8 384 385 386 387 388 389 390 391 +8 392 393 394 395 396 397 398 399 +8 400 401 402 403 404 405 406 407 +8 408 409 410 411 412 413 414 415 +8 416 417 418 419 420 421 422 423 +8 424 425 426 427 428 429 430 431 +8 432 433 434 435 436 437 438 439 +8 440 441 442 443 444 445 446 447 +8 448 449 450 451 452 453 454 455 +8 456 457 458 459 460 461 462 463 +8 464 465 466 467 468 469 470 471 +8 472 473 474 475 476 477 478 479 +8 480 481 482 483 484 485 486 487 +8 488 489 490 491 492 493 494 495 +8 496 497 498 499 500 501 502 503 +8 504 505 506 507 508 509 510 511 +8 512 513 514 515 516 517 518 519 +8 520 521 522 523 524 525 526 527 +8 528 529 530 531 532 533 534 535 +8 536 537 538 539 540 541 542 543 +8 544 545 546 547 548 549 550 551 +8 552 553 554 555 556 557 558 559 +8 560 561 562 563 564 565 566 567 +8 568 569 570 571 572 573 574 575 +8 576 577 578 579 580 581 582 583 +8 584 585 586 587 588 589 590 591 +8 592 593 594 595 596 597 598 599 +8 600 601 602 603 604 605 606 607 +8 608 609 610 611 612 613 614 615 +8 616 617 618 619 620 621 622 623 +8 624 625 626 627 628 629 630 631 +8 632 633 634 635 636 637 638 639 +8 640 641 642 643 644 645 646 647 +8 648 649 650 651 652 653 654 655 +8 656 657 658 659 660 661 662 663 +8 664 665 666 667 668 669 670 671 +8 672 673 674 675 676 677 678 679 +8 680 681 682 683 684 685 686 687 +8 688 689 690 691 692 693 694 695 +8 696 697 698 699 700 701 702 703 +8 704 705 706 707 708 709 710 711 +8 712 713 714 715 716 717 718 719 +8 720 721 722 723 724 725 726 727 +8 728 729 730 731 732 733 734 735 +8 736 737 738 739 740 741 742 743 +8 744 745 746 747 748 749 750 751 +8 752 753 754 755 756 757 758 759 +8 760 761 762 763 764 765 766 767 +8 768 769 770 771 772 773 774 775 +8 776 777 778 779 780 781 782 783 +8 784 785 786 787 788 789 790 791 +8 792 793 794 795 796 797 798 799 +8 800 801 802 803 804 805 806 807 +8 808 809 810 811 812 813 814 815 +8 816 817 818 819 820 821 822 823 +8 824 825 826 827 828 829 830 831 +8 832 833 834 835 836 837 838 839 +8 840 841 842 843 844 845 846 847 +8 848 849 850 851 852 853 854 855 +8 856 857 858 859 860 861 862 863 +8 864 865 866 867 868 869 870 871 +8 872 873 874 875 876 877 878 879 +8 880 881 882 883 884 885 886 887 +8 888 889 890 891 892 893 894 895 +8 896 897 898 899 900 901 902 903 +8 904 905 906 907 908 909 910 911 +8 912 913 914 915 916 917 918 919 +8 920 921 922 923 924 925 926 927 +8 928 929 930 931 932 933 934 935 +8 936 937 938 939 940 941 942 943 +8 944 945 946 947 948 949 950 951 +8 952 953 954 955 956 957 958 959 +8 960 961 962 963 964 965 966 967 +8 968 969 970 971 972 973 974 975 +8 976 977 978 979 980 981 982 983 +8 984 985 986 987 988 989 990 991 +8 992 993 994 995 996 997 998 999 +CELL_TYPES 125 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +CELL_DATA 125 +SCALARS id int 1 +LOOKUP_TABLE default +7 +8 +9 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +64 +65 +66 +67 +68 +69 +70 +71 +72 +73 +74 +75 +76 +77 +78 +79 +80 +81 +82 +83 +84 +85 +86 +87 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 +154 +155 +156 +157 +158 +159 +248 +249 +250 +251 +260 +261 +262 +263 +392 +393 +394 +395 +404 +405 +406 +407 diff --git a/mini-apps/simple-grid-test/testcase_3.vtk b/mini-apps/simple-grid-test/testcase_3.vtk new file mode 100644 index 000000000..f5abab6ba --- /dev/null +++ b/mini-apps/simple-grid-test/testcase_3.vtk @@ -0,0 +1,1726 @@ +# vtk DataFile Version 2.0 +Cartesian cell refinable grid +ASCII +DATASET UNSTRUCTURED_GRID +POINTS 1248 float +0 0 0 +1 0 0 +0 1 0 +1 1 0 +0 0 1 +1 0 1 +0 1 1 +1 1 1 +0 1 0 +1 1 0 +0 2 0 +1 2 0 +0 1 1 +1 1 1 +0 2 1 +1 2 1 +0 2 0 +1 2 0 +0 3 0 +1 3 0 +0 2 1 +1 2 1 +0 3 1 +1 3 1 +1 2 0 +2 2 0 +1 3 0 +2 3 0 +1 2 1 +2 2 1 +1 3 1 +2 3 1 +2 2 0 +3 2 0 +2 3 0 +3 3 0 +2 2 1 +3 2 1 +2 3 1 +3 3 1 +1 0 0 +1.5 0 0 +1 0.5 0 +1.5 0.5 0 +1 0 0.5 +1.5 0 0.5 +1 0.5 0.5 +1.5 0.5 0.5 +1.5 0 0 +2 0 0 +1.5 0.5 0 +2 0.5 0 +1.5 0 0.5 +2 0 0.5 +1.5 0.5 0.5 +2 0.5 0.5 +1 0.5 0 +1.5 0.5 0 +1 1 0 +1.5 1 0 +1 0.5 0.5 +1.5 0.5 0.5 +1 1 0.5 +1.5 1 0.5 +1.5 0.5 0 +2 0.5 0 +1.5 1 0 +2 1 0 +1.5 0.5 0.5 +2 0.5 0.5 +1.5 1 0.5 +2 1 0.5 +1 1 0 +1.5 1 0 +1 1.5 0 +1.5 1.5 0 +1 1 0.5 +1.5 1 0.5 +1 1.5 0.5 +1.5 1.5 0.5 +1.5 1 0 +2 1 0 +1.5 1.5 0 +2 1.5 0 +1.5 1 0.5 +2 1 0.5 +1.5 1.5 0.5 +2 1.5 0.5 +2 1 0 +2.5 1 0 +2 1.5 0 +2.5 1.5 0 +2 1 0.5 +2.5 1 0.5 +2 1.5 0.5 +2.5 1.5 0.5 +2.5 1 0 +3 1 0 +2.5 1.5 0 +3 1.5 0 +2.5 1 0.5 +3 1 0.5 +2.5 1.5 0.5 +3 1.5 0.5 +1 1.5 0 +1.5 1.5 0 +1 2 0 +1.5 2 0 +1 1.5 0.5 +1.5 1.5 0.5 +1 2 0.5 +1.5 2 0.5 +1.5 1.5 0 +2 1.5 0 +1.5 2 0 +2 2 0 +1.5 1.5 0.5 +2 1.5 0.5 +1.5 2 0.5 +2 2 0.5 +2 1.5 0 +2.5 1.5 0 +2 2 0 +2.5 2 0 +2 1.5 0.5 +2.5 1.5 0.5 +2 2 0.5 +2.5 2 0.5 +2.5 1.5 0 +3 1.5 0 +2.5 2 0 +3 2 0 +2.5 1.5 0.5 +3 1.5 0.5 +2.5 2 0.5 +3 2 0.5 +1 0 0.5 +1.5 0 0.5 +1 0.5 0.5 +1.5 0.5 0.5 +1 0 1 +1.5 0 1 +1 0.5 1 +1.5 0.5 1 +1.5 0 0.5 +2 0 0.5 +1.5 0.5 0.5 +2 0.5 0.5 +1.5 0 1 +2 0 1 +1.5 0.5 1 +2 0.5 1 +1 0.5 0.5 +1.5 0.5 0.5 +1 1 0.5 +1.5 1 0.5 +1 0.5 1 +1.5 0.5 1 +1 1 1 +1.5 1 1 +1.5 0.5 0.5 +2 0.5 0.5 +1.5 1 0.5 +2 1 0.5 +1.5 0.5 1 +2 0.5 1 +1.5 1 1 +2 1 1 +1 1 0.5 +1.5 1 0.5 +1 1.5 0.5 +1.5 1.5 0.5 +1 1 1 +1.5 1 1 +1 1.5 1 +1.5 1.5 1 +1.5 1 0.5 +2 1 0.5 +1.5 1.5 0.5 +2 1.5 0.5 +1.5 1 1 +2 1 1 +1.5 1.5 1 +2 1.5 1 +2 1 0.5 +2.5 1 0.5 +2 1.5 0.5 +2.5 1.5 0.5 +2 1 1 +2.5 1 1 +2 1.5 1 +2.5 1.5 1 +2.5 1 0.5 +3 1 0.5 +2.5 1.5 0.5 +3 1.5 0.5 +2.5 1 1 +3 1 1 +2.5 1.5 1 +3 1.5 1 +1 1.5 0.5 +1.5 1.5 0.5 +1 2 0.5 +1.5 2 0.5 +1 1.5 1 +1.5 1.5 1 +1 2 1 +1.5 2 1 +1.5 1.5 0.5 +2 1.5 0.5 +1.5 2 0.5 +2 2 0.5 +1.5 1.5 1 +2 1.5 1 +1.5 2 1 +2 2 1 +2 1.5 0.5 +2.5 1.5 0.5 +2 2 0.5 +2.5 2 0.5 +2 1.5 1 +2.5 1.5 1 +2 2 1 +2.5 2 1 +2.5 1.5 0.5 +3 1.5 0.5 +2.5 2 0.5 +3 2 0.5 +2.5 1.5 1 +3 1.5 1 +2.5 2 1 +3 2 1 +2 0 0 +2.25 0 0 +2 0.25 0 +2.25 0.25 0 +2 0 0.25 +2.25 0 0.25 +2 0.25 0.25 +2.25 0.25 0.25 +2.25 0 0 +2.5 0 0 +2.25 0.25 0 +2.5 0.25 0 +2.25 0 0.25 +2.5 0 0.25 +2.25 0.25 0.25 +2.5 0.25 0.25 +2 0.25 0 +2.25 0.25 0 +2 0.5 0 +2.25 0.5 0 +2 0.25 0.25 +2.25 0.25 0.25 +2 0.5 0.25 +2.25 0.5 0.25 +2.25 0.25 0 +2.5 0.25 0 +2.25 0.5 0 +2.5 0.5 0 +2.25 0.25 0.25 +2.5 0.25 0.25 +2.25 0.5 0.25 +2.5 0.5 0.25 +2 0.5 0 +2.25 0.5 0 +2 0.75 0 +2.25 0.75 0 +2 0.5 0.25 +2.25 0.5 0.25 +2 0.75 0.25 +2.25 0.75 0.25 +2.25 0.5 0 +2.5 0.5 0 +2.25 0.75 0 +2.5 0.75 0 +2.25 0.5 0.25 +2.5 0.5 0.25 +2.25 0.75 0.25 +2.5 0.75 0.25 +2.5 0.5 0 +2.75 0.5 0 +2.5 0.75 0 +2.75 0.75 0 +2.5 0.5 0.25 +2.75 0.5 0.25 +2.5 0.75 0.25 +2.75 0.75 0.25 +2.75 0.5 0 +3 0.5 0 +2.75 0.75 0 +3 0.75 0 +2.75 0.5 0.25 +3 0.5 0.25 +2.75 0.75 0.25 +3 0.75 0.25 +2 0.75 0 +2.25 0.75 0 +2 1 0 +2.25 1 0 +2 0.75 0.25 +2.25 0.75 0.25 +2 1 0.25 +2.25 1 0.25 +2.25 0.75 0 +2.5 0.75 0 +2.25 1 0 +2.5 1 0 +2.25 0.75 0.25 +2.5 0.75 0.25 +2.25 1 0.25 +2.5 1 0.25 +2.5 0.75 0 +2.75 0.75 0 +2.5 1 0 +2.75 1 0 +2.5 0.75 0.25 +2.75 0.75 0.25 +2.5 1 0.25 +2.75 1 0.25 +2.75 0.75 0 +3 0.75 0 +2.75 1 0 +3 1 0 +2.75 0.75 0.25 +3 0.75 0.25 +2.75 1 0.25 +3 1 0.25 +2 0 0.25 +2.25 0 0.25 +2 0.25 0.25 +2.25 0.25 0.25 +2 0 0.5 +2.25 0 0.5 +2 0.25 0.5 +2.25 0.25 0.5 +2.25 0 0.25 +2.5 0 0.25 +2.25 0.25 0.25 +2.5 0.25 0.25 +2.25 0 0.5 +2.5 0 0.5 +2.25 0.25 0.5 +2.5 0.25 0.5 +2 0.25 0.25 +2.25 0.25 0.25 +2 0.5 0.25 +2.25 0.5 0.25 +2 0.25 0.5 +2.25 0.25 0.5 +2 0.5 0.5 +2.25 0.5 0.5 +2.25 0.25 0.25 +2.5 0.25 0.25 +2.25 0.5 0.25 +2.5 0.5 0.25 +2.25 0.25 0.5 +2.5 0.25 0.5 +2.25 0.5 0.5 +2.5 0.5 0.5 +2 0.5 0.25 +2.25 0.5 0.25 +2 0.75 0.25 +2.25 0.75 0.25 +2 0.5 0.5 +2.25 0.5 0.5 +2 0.75 0.5 +2.25 0.75 0.5 +2.25 0.5 0.25 +2.5 0.5 0.25 +2.25 0.75 0.25 +2.5 0.75 0.25 +2.25 0.5 0.5 +2.5 0.5 0.5 +2.25 0.75 0.5 +2.5 0.75 0.5 +2.5 0.5 0.25 +2.75 0.5 0.25 +2.5 0.75 0.25 +2.75 0.75 0.25 +2.5 0.5 0.5 +2.75 0.5 0.5 +2.5 0.75 0.5 +2.75 0.75 0.5 +2.75 0.5 0.25 +3 0.5 0.25 +2.75 0.75 0.25 +3 0.75 0.25 +2.75 0.5 0.5 +3 0.5 0.5 +2.75 0.75 0.5 +3 0.75 0.5 +2 0.75 0.25 +2.25 0.75 0.25 +2 1 0.25 +2.25 1 0.25 +2 0.75 0.5 +2.25 0.75 0.5 +2 1 0.5 +2.25 1 0.5 +2.25 0.75 0.25 +2.5 0.75 0.25 +2.25 1 0.25 +2.5 1 0.25 +2.25 0.75 0.5 +2.5 0.75 0.5 +2.25 1 0.5 +2.5 1 0.5 +2.5 0.75 0.25 +2.75 0.75 0.25 +2.5 1 0.25 +2.75 1 0.25 +2.5 0.75 0.5 +2.75 0.75 0.5 +2.5 1 0.5 +2.75 1 0.5 +2.75 0.75 0.25 +3 0.75 0.25 +2.75 1 0.25 +3 1 0.25 +2.75 0.75 0.5 +3 0.75 0.5 +2.75 1 0.5 +3 1 0.5 +2 0 0.5 +2.25 0 0.5 +2 0.25 0.5 +2.25 0.25 0.5 +2 0 0.75 +2.25 0 0.75 +2 0.25 0.75 +2.25 0.25 0.75 +2.25 0 0.5 +2.5 0 0.5 +2.25 0.25 0.5 +2.5 0.25 0.5 +2.25 0 0.75 +2.5 0 0.75 +2.25 0.25 0.75 +2.5 0.25 0.75 +2.5 0 0.5 +2.75 0 0.5 +2.5 0.25 0.5 +2.75 0.25 0.5 +2.5 0 0.75 +2.75 0 0.75 +2.5 0.25 0.75 +2.75 0.25 0.75 +2.75 0 0.5 +3 0 0.5 +2.75 0.25 0.5 +3 0.25 0.5 +2.75 0 0.75 +3 0 0.75 +2.75 0.25 0.75 +3 0.25 0.75 +2 0.25 0.5 +2.25 0.25 0.5 +2 0.5 0.5 +2.25 0.5 0.5 +2 0.25 0.75 +2.25 0.25 0.75 +2 0.5 0.75 +2.25 0.5 0.75 +2.25 0.25 0.5 +2.5 0.25 0.5 +2.25 0.5 0.5 +2.5 0.5 0.5 +2.25 0.25 0.75 +2.5 0.25 0.75 +2.25 0.5 0.75 +2.5 0.5 0.75 +2.5 0.25 0.5 +2.75 0.25 0.5 +2.5 0.5 0.5 +2.75 0.5 0.5 +2.5 0.25 0.75 +2.75 0.25 0.75 +2.5 0.5 0.75 +2.75 0.5 0.75 +2.75 0.25 0.5 +3 0.25 0.5 +2.75 0.5 0.5 +3 0.5 0.5 +2.75 0.25 0.75 +3 0.25 0.75 +2.75 0.5 0.75 +3 0.5 0.75 +2 0.5 0.5 +2.25 0.5 0.5 +2 0.75 0.5 +2.25 0.75 0.5 +2 0.5 0.75 +2.25 0.5 0.75 +2 0.75 0.75 +2.25 0.75 0.75 +2.25 0.5 0.5 +2.5 0.5 0.5 +2.25 0.75 0.5 +2.5 0.75 0.5 +2.25 0.5 0.75 +2.5 0.5 0.75 +2.25 0.75 0.75 +2.5 0.75 0.75 +2.5 0.5 0.5 +2.75 0.5 0.5 +2.5 0.75 0.5 +2.75 0.75 0.5 +2.5 0.5 0.75 +2.75 0.5 0.75 +2.5 0.75 0.75 +2.75 0.75 0.75 +2.75 0.5 0.5 +3 0.5 0.5 +2.75 0.75 0.5 +3 0.75 0.5 +2.75 0.5 0.75 +3 0.5 0.75 +2.75 0.75 0.75 +3 0.75 0.75 +2 0.75 0.5 +2.25 0.75 0.5 +2 1 0.5 +2.25 1 0.5 +2 0.75 0.75 +2.25 0.75 0.75 +2 1 0.75 +2.25 1 0.75 +2.25 0.75 0.5 +2.5 0.75 0.5 +2.25 1 0.5 +2.5 1 0.5 +2.25 0.75 0.75 +2.5 0.75 0.75 +2.25 1 0.75 +2.5 1 0.75 +2.5 0.75 0.5 +2.75 0.75 0.5 +2.5 1 0.5 +2.75 1 0.5 +2.5 0.75 0.75 +2.75 0.75 0.75 +2.5 1 0.75 +2.75 1 0.75 +2.75 0.75 0.5 +3 0.75 0.5 +2.75 1 0.5 +3 1 0.5 +2.75 0.75 0.75 +3 0.75 0.75 +2.75 1 0.75 +3 1 0.75 +2 0 0.75 +2.25 0 0.75 +2 0.25 0.75 +2.25 0.25 0.75 +2 0 1 +2.25 0 1 +2 0.25 1 +2.25 0.25 1 +2.25 0 0.75 +2.5 0 0.75 +2.25 0.25 0.75 +2.5 0.25 0.75 +2.25 0 1 +2.5 0 1 +2.25 0.25 1 +2.5 0.25 1 +2.5 0 0.75 +2.75 0 0.75 +2.5 0.25 0.75 +2.75 0.25 0.75 +2.5 0 1 +2.75 0 1 +2.5 0.25 1 +2.75 0.25 1 +2.75 0 0.75 +3 0 0.75 +2.75 0.25 0.75 +3 0.25 0.75 +2.75 0 1 +3 0 1 +2.75 0.25 1 +3 0.25 1 +2 0.25 0.75 +2.25 0.25 0.75 +2 0.5 0.75 +2.25 0.5 0.75 +2 0.25 1 +2.25 0.25 1 +2 0.5 1 +2.25 0.5 1 +2.25 0.25 0.75 +2.5 0.25 0.75 +2.25 0.5 0.75 +2.5 0.5 0.75 +2.25 0.25 1 +2.5 0.25 1 +2.25 0.5 1 +2.5 0.5 1 +2.5 0.25 0.75 +2.75 0.25 0.75 +2.5 0.5 0.75 +2.75 0.5 0.75 +2.5 0.25 1 +2.75 0.25 1 +2.5 0.5 1 +2.75 0.5 1 +2.75 0.25 0.75 +3 0.25 0.75 +2.75 0.5 0.75 +3 0.5 0.75 +2.75 0.25 1 +3 0.25 1 +2.75 0.5 1 +3 0.5 1 +2 0.5 0.75 +2.25 0.5 0.75 +2 0.75 0.75 +2.25 0.75 0.75 +2 0.5 1 +2.25 0.5 1 +2 0.75 1 +2.25 0.75 1 +2.25 0.5 0.75 +2.5 0.5 0.75 +2.25 0.75 0.75 +2.5 0.75 0.75 +2.25 0.5 1 +2.5 0.5 1 +2.25 0.75 1 +2.5 0.75 1 +2.5 0.5 0.75 +2.75 0.5 0.75 +2.5 0.75 0.75 +2.75 0.75 0.75 +2.5 0.5 1 +2.75 0.5 1 +2.5 0.75 1 +2.75 0.75 1 +2.75 0.5 0.75 +3 0.5 0.75 +2.75 0.75 0.75 +3 0.75 0.75 +2.75 0.5 1 +3 0.5 1 +2.75 0.75 1 +3 0.75 1 +2 0.75 0.75 +2.25 0.75 0.75 +2 1 0.75 +2.25 1 0.75 +2 0.75 1 +2.25 0.75 1 +2 1 1 +2.25 1 1 +2.25 0.75 0.75 +2.5 0.75 0.75 +2.25 1 0.75 +2.5 1 0.75 +2.25 0.75 1 +2.5 0.75 1 +2.25 1 1 +2.5 1 1 +2.5 0.75 0.75 +2.75 0.75 0.75 +2.5 1 0.75 +2.75 1 0.75 +2.5 0.75 1 +2.75 0.75 1 +2.5 1 1 +2.75 1 1 +2.75 0.75 0.75 +3 0.75 0.75 +2.75 1 0.75 +3 1 0.75 +2.75 0.75 1 +3 0.75 1 +2.75 1 1 +3 1 1 +2.5 0 0 +2.625 0 0 +2.5 0.125 0 +2.625 0.125 0 +2.5 0 0.125 +2.625 0 0.125 +2.5 0.125 0.125 +2.625 0.125 0.125 +2.625 0 0 +2.75 0 0 +2.625 0.125 0 +2.75 0.125 0 +2.625 0 0.125 +2.75 0 0.125 +2.625 0.125 0.125 +2.75 0.125 0.125 +2.75 0 0 +2.875 0 0 +2.75 0.125 0 +2.875 0.125 0 +2.75 0 0.125 +2.875 0 0.125 +2.75 0.125 0.125 +2.875 0.125 0.125 +2.5 0.125 0 +2.625 0.125 0 +2.5 0.25 0 +2.625 0.25 0 +2.5 0.125 0.125 +2.625 0.125 0.125 +2.5 0.25 0.125 +2.625 0.25 0.125 +2.625 0.125 0 +2.75 0.125 0 +2.625 0.25 0 +2.75 0.25 0 +2.625 0.125 0.125 +2.75 0.125 0.125 +2.625 0.25 0.125 +2.75 0.25 0.125 +2.75 0.125 0 +2.875 0.125 0 +2.75 0.25 0 +2.875 0.25 0 +2.75 0.125 0.125 +2.875 0.125 0.125 +2.75 0.25 0.125 +2.875 0.25 0.125 +2.875 0.125 0 +3 0.125 0 +2.875 0.25 0 +3 0.25 0 +2.875 0.125 0.125 +3 0.125 0.125 +2.875 0.25 0.125 +3 0.25 0.125 +2.5 0.25 0 +2.625 0.25 0 +2.5 0.375 0 +2.625 0.375 0 +2.5 0.25 0.125 +2.625 0.25 0.125 +2.5 0.375 0.125 +2.625 0.375 0.125 +2.625 0.25 0 +2.75 0.25 0 +2.625 0.375 0 +2.75 0.375 0 +2.625 0.25 0.125 +2.75 0.25 0.125 +2.625 0.375 0.125 +2.75 0.375 0.125 +2.75 0.25 0 +2.875 0.25 0 +2.75 0.375 0 +2.875 0.375 0 +2.75 0.25 0.125 +2.875 0.25 0.125 +2.75 0.375 0.125 +2.875 0.375 0.125 +2.875 0.25 0 +3 0.25 0 +2.875 0.375 0 +3 0.375 0 +2.875 0.25 0.125 +3 0.25 0.125 +2.875 0.375 0.125 +3 0.375 0.125 +2.5 0.375 0 +2.625 0.375 0 +2.5 0.5 0 +2.625 0.5 0 +2.5 0.375 0.125 +2.625 0.375 0.125 +2.5 0.5 0.125 +2.625 0.5 0.125 +2.625 0.375 0 +2.75 0.375 0 +2.625 0.5 0 +2.75 0.5 0 +2.625 0.375 0.125 +2.75 0.375 0.125 +2.625 0.5 0.125 +2.75 0.5 0.125 +2.75 0.375 0 +2.875 0.375 0 +2.75 0.5 0 +2.875 0.5 0 +2.75 0.375 0.125 +2.875 0.375 0.125 +2.75 0.5 0.125 +2.875 0.5 0.125 +2.875 0.375 0 +3 0.375 0 +2.875 0.5 0 +3 0.5 0 +2.875 0.375 0.125 +3 0.375 0.125 +2.875 0.5 0.125 +3 0.5 0.125 +2.5 0 0.125 +2.625 0 0.125 +2.5 0.125 0.125 +2.625 0.125 0.125 +2.5 0 0.25 +2.625 0 0.25 +2.5 0.125 0.25 +2.625 0.125 0.25 +2.625 0 0.125 +2.75 0 0.125 +2.625 0.125 0.125 +2.75 0.125 0.125 +2.625 0 0.25 +2.75 0 0.25 +2.625 0.125 0.25 +2.75 0.125 0.25 +2.75 0 0.125 +2.875 0 0.125 +2.75 0.125 0.125 +2.875 0.125 0.125 +2.75 0 0.25 +2.875 0 0.25 +2.75 0.125 0.25 +2.875 0.125 0.25 +2.875 0 0.125 +3 0 0.125 +2.875 0.125 0.125 +3 0.125 0.125 +2.875 0 0.25 +3 0 0.25 +2.875 0.125 0.25 +3 0.125 0.25 +2.5 0.125 0.125 +2.625 0.125 0.125 +2.5 0.25 0.125 +2.625 0.25 0.125 +2.5 0.125 0.25 +2.625 0.125 0.25 +2.5 0.25 0.25 +2.625 0.25 0.25 +2.625 0.125 0.125 +2.75 0.125 0.125 +2.625 0.25 0.125 +2.75 0.25 0.125 +2.625 0.125 0.25 +2.75 0.125 0.25 +2.625 0.25 0.25 +2.75 0.25 0.25 +2.75 0.125 0.125 +2.875 0.125 0.125 +2.75 0.25 0.125 +2.875 0.25 0.125 +2.75 0.125 0.25 +2.875 0.125 0.25 +2.75 0.25 0.25 +2.875 0.25 0.25 +2.875 0.125 0.125 +3 0.125 0.125 +2.875 0.25 0.125 +3 0.25 0.125 +2.875 0.125 0.25 +3 0.125 0.25 +2.875 0.25 0.25 +3 0.25 0.25 +2.5 0.25 0.125 +2.625 0.25 0.125 +2.5 0.375 0.125 +2.625 0.375 0.125 +2.5 0.25 0.25 +2.625 0.25 0.25 +2.5 0.375 0.25 +2.625 0.375 0.25 +2.625 0.25 0.125 +2.75 0.25 0.125 +2.625 0.375 0.125 +2.75 0.375 0.125 +2.625 0.25 0.25 +2.75 0.25 0.25 +2.625 0.375 0.25 +2.75 0.375 0.25 +2.75 0.25 0.125 +2.875 0.25 0.125 +2.75 0.375 0.125 +2.875 0.375 0.125 +2.75 0.25 0.25 +2.875 0.25 0.25 +2.75 0.375 0.25 +2.875 0.375 0.25 +2.875 0.25 0.125 +3 0.25 0.125 +2.875 0.375 0.125 +3 0.375 0.125 +2.875 0.25 0.25 +3 0.25 0.25 +2.875 0.375 0.25 +3 0.375 0.25 +2.5 0.375 0.125 +2.625 0.375 0.125 +2.5 0.5 0.125 +2.625 0.5 0.125 +2.5 0.375 0.25 +2.625 0.375 0.25 +2.5 0.5 0.25 +2.625 0.5 0.25 +2.625 0.375 0.125 +2.75 0.375 0.125 +2.625 0.5 0.125 +2.75 0.5 0.125 +2.625 0.375 0.25 +2.75 0.375 0.25 +2.625 0.5 0.25 +2.75 0.5 0.25 +2.75 0.375 0.125 +2.875 0.375 0.125 +2.75 0.5 0.125 +2.875 0.5 0.125 +2.75 0.375 0.25 +2.875 0.375 0.25 +2.75 0.5 0.25 +2.875 0.5 0.25 +2.875 0.375 0.125 +3 0.375 0.125 +2.875 0.5 0.125 +3 0.5 0.125 +2.875 0.375 0.25 +3 0.375 0.25 +2.875 0.5 0.25 +3 0.5 0.25 +2.5 0 0.25 +2.625 0 0.25 +2.5 0.125 0.25 +2.625 0.125 0.25 +2.5 0 0.375 +2.625 0 0.375 +2.5 0.125 0.375 +2.625 0.125 0.375 +2.625 0 0.25 +2.75 0 0.25 +2.625 0.125 0.25 +2.75 0.125 0.25 +2.625 0 0.375 +2.75 0 0.375 +2.625 0.125 0.375 +2.75 0.125 0.375 +2.75 0 0.25 +2.875 0 0.25 +2.75 0.125 0.25 +2.875 0.125 0.25 +2.75 0 0.375 +2.875 0 0.375 +2.75 0.125 0.375 +2.875 0.125 0.375 +2.875 0 0.25 +3 0 0.25 +2.875 0.125 0.25 +3 0.125 0.25 +2.875 0 0.375 +3 0 0.375 +2.875 0.125 0.375 +3 0.125 0.375 +2.5 0.125 0.25 +2.625 0.125 0.25 +2.5 0.25 0.25 +2.625 0.25 0.25 +2.5 0.125 0.375 +2.625 0.125 0.375 +2.5 0.25 0.375 +2.625 0.25 0.375 +2.625 0.125 0.25 +2.75 0.125 0.25 +2.625 0.25 0.25 +2.75 0.25 0.25 +2.625 0.125 0.375 +2.75 0.125 0.375 +2.625 0.25 0.375 +2.75 0.25 0.375 +2.75 0.125 0.25 +2.875 0.125 0.25 +2.75 0.25 0.25 +2.875 0.25 0.25 +2.75 0.125 0.375 +2.875 0.125 0.375 +2.75 0.25 0.375 +2.875 0.25 0.375 +2.875 0.125 0.25 +3 0.125 0.25 +2.875 0.25 0.25 +3 0.25 0.25 +2.875 0.125 0.375 +3 0.125 0.375 +2.875 0.25 0.375 +3 0.25 0.375 +2.5 0.25 0.25 +2.625 0.25 0.25 +2.5 0.375 0.25 +2.625 0.375 0.25 +2.5 0.25 0.375 +2.625 0.25 0.375 +2.5 0.375 0.375 +2.625 0.375 0.375 +2.625 0.25 0.25 +2.75 0.25 0.25 +2.625 0.375 0.25 +2.75 0.375 0.25 +2.625 0.25 0.375 +2.75 0.25 0.375 +2.625 0.375 0.375 +2.75 0.375 0.375 +2.75 0.25 0.25 +2.875 0.25 0.25 +2.75 0.375 0.25 +2.875 0.375 0.25 +2.75 0.25 0.375 +2.875 0.25 0.375 +2.75 0.375 0.375 +2.875 0.375 0.375 +2.875 0.25 0.25 +3 0.25 0.25 +2.875 0.375 0.25 +3 0.375 0.25 +2.875 0.25 0.375 +3 0.25 0.375 +2.875 0.375 0.375 +3 0.375 0.375 +2.5 0.375 0.25 +2.625 0.375 0.25 +2.5 0.5 0.25 +2.625 0.5 0.25 +2.5 0.375 0.375 +2.625 0.375 0.375 +2.5 0.5 0.375 +2.625 0.5 0.375 +2.625 0.375 0.25 +2.75 0.375 0.25 +2.625 0.5 0.25 +2.75 0.5 0.25 +2.625 0.375 0.375 +2.75 0.375 0.375 +2.625 0.5 0.375 +2.75 0.5 0.375 +2.75 0.375 0.25 +2.875 0.375 0.25 +2.75 0.5 0.25 +2.875 0.5 0.25 +2.75 0.375 0.375 +2.875 0.375 0.375 +2.75 0.5 0.375 +2.875 0.5 0.375 +2.875 0.375 0.25 +3 0.375 0.25 +2.875 0.5 0.25 +3 0.5 0.25 +2.875 0.375 0.375 +3 0.375 0.375 +2.875 0.5 0.375 +3 0.5 0.375 +2.5 0 0.375 +2.625 0 0.375 +2.5 0.125 0.375 +2.625 0.125 0.375 +2.5 0 0.5 +2.625 0 0.5 +2.5 0.125 0.5 +2.625 0.125 0.5 +2.625 0 0.375 +2.75 0 0.375 +2.625 0.125 0.375 +2.75 0.125 0.375 +2.625 0 0.5 +2.75 0 0.5 +2.625 0.125 0.5 +2.75 0.125 0.5 +2.75 0 0.375 +2.875 0 0.375 +2.75 0.125 0.375 +2.875 0.125 0.375 +2.75 0 0.5 +2.875 0 0.5 +2.75 0.125 0.5 +2.875 0.125 0.5 +2.875 0 0.375 +3 0 0.375 +2.875 0.125 0.375 +3 0.125 0.375 +2.875 0 0.5 +3 0 0.5 +2.875 0.125 0.5 +3 0.125 0.5 +2.5 0.125 0.375 +2.625 0.125 0.375 +2.5 0.25 0.375 +2.625 0.25 0.375 +2.5 0.125 0.5 +2.625 0.125 0.5 +2.5 0.25 0.5 +2.625 0.25 0.5 +2.625 0.125 0.375 +2.75 0.125 0.375 +2.625 0.25 0.375 +2.75 0.25 0.375 +2.625 0.125 0.5 +2.75 0.125 0.5 +2.625 0.25 0.5 +2.75 0.25 0.5 +2.75 0.125 0.375 +2.875 0.125 0.375 +2.75 0.25 0.375 +2.875 0.25 0.375 +2.75 0.125 0.5 +2.875 0.125 0.5 +2.75 0.25 0.5 +2.875 0.25 0.5 +2.875 0.125 0.375 +3 0.125 0.375 +2.875 0.25 0.375 +3 0.25 0.375 +2.875 0.125 0.5 +3 0.125 0.5 +2.875 0.25 0.5 +3 0.25 0.5 +2.5 0.25 0.375 +2.625 0.25 0.375 +2.5 0.375 0.375 +2.625 0.375 0.375 +2.5 0.25 0.5 +2.625 0.25 0.5 +2.5 0.375 0.5 +2.625 0.375 0.5 +2.625 0.25 0.375 +2.75 0.25 0.375 +2.625 0.375 0.375 +2.75 0.375 0.375 +2.625 0.25 0.5 +2.75 0.25 0.5 +2.625 0.375 0.5 +2.75 0.375 0.5 +2.75 0.25 0.375 +2.875 0.25 0.375 +2.75 0.375 0.375 +2.875 0.375 0.375 +2.75 0.25 0.5 +2.875 0.25 0.5 +2.75 0.375 0.5 +2.875 0.375 0.5 +2.875 0.25 0.375 +3 0.25 0.375 +2.875 0.375 0.375 +3 0.375 0.375 +2.875 0.25 0.5 +3 0.25 0.5 +2.875 0.375 0.5 +3 0.375 0.5 +2.5 0.375 0.375 +2.625 0.375 0.375 +2.5 0.5 0.375 +2.625 0.5 0.375 +2.5 0.375 0.5 +2.625 0.375 0.5 +2.5 0.5 0.5 +2.625 0.5 0.5 +2.625 0.375 0.375 +2.75 0.375 0.375 +2.625 0.5 0.375 +2.75 0.5 0.375 +2.625 0.375 0.5 +2.75 0.375 0.5 +2.625 0.5 0.5 +2.75 0.5 0.5 +2.75 0.375 0.375 +2.875 0.375 0.375 +2.75 0.5 0.375 +2.875 0.5 0.375 +2.75 0.375 0.5 +2.875 0.375 0.5 +2.75 0.5 0.5 +2.875 0.5 0.5 +2.875 0.375 0.375 +3 0.375 0.375 +2.875 0.5 0.375 +3 0.5 0.375 +2.875 0.375 0.5 +3 0.375 0.5 +2.875 0.5 0.5 +3 0.5 0.5 +2.875 0 0 +2.9375 0 0 +2.875 0.0625 0 +2.9375 0.0625 0 +2.875 0 0.0625 +2.9375 0 0.0625 +2.875 0.0625 0.0625 +2.9375 0.0625 0.0625 +2.9375 0 0 +3 0 0 +2.9375 0.0625 0 +3 0.0625 0 +2.9375 0 0.0625 +3 0 0.0625 +2.9375 0.0625 0.0625 +3 0.0625 0.0625 +2.875 0.0625 0 +2.9375 0.0625 0 +2.875 0.125 0 +2.9375 0.125 0 +2.875 0.0625 0.0625 +2.9375 0.0625 0.0625 +2.875 0.125 0.0625 +2.9375 0.125 0.0625 +2.9375 0.0625 0 +3 0.0625 0 +2.9375 0.125 0 +3 0.125 0 +2.9375 0.0625 0.0625 +3 0.0625 0.0625 +2.9375 0.125 0.0625 +3 0.125 0.0625 +2.875 0 0.0625 +2.9375 0 0.0625 +2.875 0.0625 0.0625 +2.9375 0.0625 0.0625 +2.875 0 0.125 +2.9375 0 0.125 +2.875 0.0625 0.125 +2.9375 0.0625 0.125 +2.9375 0 0.0625 +3 0 0.0625 +2.9375 0.0625 0.0625 +3 0.0625 0.0625 +2.9375 0 0.125 +3 0 0.125 +2.9375 0.0625 0.125 +3 0.0625 0.125 +2.875 0.0625 0.0625 +2.9375 0.0625 0.0625 +2.875 0.125 0.0625 +2.9375 0.125 0.0625 +2.875 0.0625 0.125 +2.9375 0.0625 0.125 +2.875 0.125 0.125 +2.9375 0.125 0.125 +2.9375 0.0625 0.0625 +3 0.0625 0.0625 +2.9375 0.125 0.0625 +3 0.125 0.0625 +2.9375 0.0625 0.125 +3 0.0625 0.125 +2.9375 0.125 0.125 +3 0.125 0.125 +CELLS 156 1404 +8 0 1 2 3 4 5 6 7 +8 8 9 10 11 12 13 14 15 +8 16 17 18 19 20 21 22 23 +8 24 25 26 27 28 29 30 31 +8 32 33 34 35 36 37 38 39 +8 40 41 42 43 44 45 46 47 +8 48 49 50 51 52 53 54 55 +8 56 57 58 59 60 61 62 63 +8 64 65 66 67 68 69 70 71 +8 72 73 74 75 76 77 78 79 +8 80 81 82 83 84 85 86 87 +8 88 89 90 91 92 93 94 95 +8 96 97 98 99 100 101 102 103 +8 104 105 106 107 108 109 110 111 +8 112 113 114 115 116 117 118 119 +8 120 121 122 123 124 125 126 127 +8 128 129 130 131 132 133 134 135 +8 136 137 138 139 140 141 142 143 +8 144 145 146 147 148 149 150 151 +8 152 153 154 155 156 157 158 159 +8 160 161 162 163 164 165 166 167 +8 168 169 170 171 172 173 174 175 +8 176 177 178 179 180 181 182 183 +8 184 185 186 187 188 189 190 191 +8 192 193 194 195 196 197 198 199 +8 200 201 202 203 204 205 206 207 +8 208 209 210 211 212 213 214 215 +8 216 217 218 219 220 221 222 223 +8 224 225 226 227 228 229 230 231 +8 232 233 234 235 236 237 238 239 +8 240 241 242 243 244 245 246 247 +8 248 249 250 251 252 253 254 255 +8 256 257 258 259 260 261 262 263 +8 264 265 266 267 268 269 270 271 +8 272 273 274 275 276 277 278 279 +8 280 281 282 283 284 285 286 287 +8 288 289 290 291 292 293 294 295 +8 296 297 298 299 300 301 302 303 +8 304 305 306 307 308 309 310 311 +8 312 313 314 315 316 317 318 319 +8 320 321 322 323 324 325 326 327 +8 328 329 330 331 332 333 334 335 +8 336 337 338 339 340 341 342 343 +8 344 345 346 347 348 349 350 351 +8 352 353 354 355 356 357 358 359 +8 360 361 362 363 364 365 366 367 +8 368 369 370 371 372 373 374 375 +8 376 377 378 379 380 381 382 383 +8 384 385 386 387 388 389 390 391 +8 392 393 394 395 396 397 398 399 +8 400 401 402 403 404 405 406 407 +8 408 409 410 411 412 413 414 415 +8 416 417 418 419 420 421 422 423 +8 424 425 426 427 428 429 430 431 +8 432 433 434 435 436 437 438 439 +8 440 441 442 443 444 445 446 447 +8 448 449 450 451 452 453 454 455 +8 456 457 458 459 460 461 462 463 +8 464 465 466 467 468 469 470 471 +8 472 473 474 475 476 477 478 479 +8 480 481 482 483 484 485 486 487 +8 488 489 490 491 492 493 494 495 +8 496 497 498 499 500 501 502 503 +8 504 505 506 507 508 509 510 511 +8 512 513 514 515 516 517 518 519 +8 520 521 522 523 524 525 526 527 +8 528 529 530 531 532 533 534 535 +8 536 537 538 539 540 541 542 543 +8 544 545 546 547 548 549 550 551 +8 552 553 554 555 556 557 558 559 +8 560 561 562 563 564 565 566 567 +8 568 569 570 571 572 573 574 575 +8 576 577 578 579 580 581 582 583 +8 584 585 586 587 588 589 590 591 +8 592 593 594 595 596 597 598 599 +8 600 601 602 603 604 605 606 607 +8 608 609 610 611 612 613 614 615 +8 616 617 618 619 620 621 622 623 +8 624 625 626 627 628 629 630 631 +8 632 633 634 635 636 637 638 639 +8 640 641 642 643 644 645 646 647 +8 648 649 650 651 652 653 654 655 +8 656 657 658 659 660 661 662 663 +8 664 665 666 667 668 669 670 671 +8 672 673 674 675 676 677 678 679 +8 680 681 682 683 684 685 686 687 +8 688 689 690 691 692 693 694 695 +8 696 697 698 699 700 701 702 703 +8 704 705 706 707 708 709 710 711 +8 712 713 714 715 716 717 718 719 +8 720 721 722 723 724 725 726 727 +8 728 729 730 731 732 733 734 735 +8 736 737 738 739 740 741 742 743 +8 744 745 746 747 748 749 750 751 +8 752 753 754 755 756 757 758 759 +8 760 761 762 763 764 765 766 767 +8 768 769 770 771 772 773 774 775 +8 776 777 778 779 780 781 782 783 +8 784 785 786 787 788 789 790 791 +8 792 793 794 795 796 797 798 799 +8 800 801 802 803 804 805 806 807 +8 808 809 810 811 812 813 814 815 +8 816 817 818 819 820 821 822 823 +8 824 825 826 827 828 829 830 831 +8 832 833 834 835 836 837 838 839 +8 840 841 842 843 844 845 846 847 +8 848 849 850 851 852 853 854 855 +8 856 857 858 859 860 861 862 863 +8 864 865 866 867 868 869 870 871 +8 872 873 874 875 876 877 878 879 +8 880 881 882 883 884 885 886 887 +8 888 889 890 891 892 893 894 895 +8 896 897 898 899 900 901 902 903 +8 904 905 906 907 908 909 910 911 +8 912 913 914 915 916 917 918 919 +8 920 921 922 923 924 925 926 927 +8 928 929 930 931 932 933 934 935 +8 936 937 938 939 940 941 942 943 +8 944 945 946 947 948 949 950 951 +8 952 953 954 955 956 957 958 959 +8 960 961 962 963 964 965 966 967 +8 968 969 970 971 972 973 974 975 +8 976 977 978 979 980 981 982 983 +8 984 985 986 987 988 989 990 991 +8 992 993 994 995 996 997 998 999 +8 1000 1001 1002 1003 1004 1005 1006 1007 +8 1008 1009 1010 1011 1012 1013 1014 1015 +8 1016 1017 1018 1019 1020 1021 1022 1023 +8 1024 1025 1026 1027 1028 1029 1030 1031 +8 1032 1033 1034 1035 1036 1037 1038 1039 +8 1040 1041 1042 1043 1044 1045 1046 1047 +8 1048 1049 1050 1051 1052 1053 1054 1055 +8 1056 1057 1058 1059 1060 1061 1062 1063 +8 1064 1065 1066 1067 1068 1069 1070 1071 +8 1072 1073 1074 1075 1076 1077 1078 1079 +8 1080 1081 1082 1083 1084 1085 1086 1087 +8 1088 1089 1090 1091 1092 1093 1094 1095 +8 1096 1097 1098 1099 1100 1101 1102 1103 +8 1104 1105 1106 1107 1108 1109 1110 1111 +8 1112 1113 1114 1115 1116 1117 1118 1119 +8 1120 1121 1122 1123 1124 1125 1126 1127 +8 1128 1129 1130 1131 1132 1133 1134 1135 +8 1136 1137 1138 1139 1140 1141 1142 1143 +8 1144 1145 1146 1147 1148 1149 1150 1151 +8 1152 1153 1154 1155 1156 1157 1158 1159 +8 1160 1161 1162 1163 1164 1165 1166 1167 +8 1168 1169 1170 1171 1172 1173 1174 1175 +8 1176 1177 1178 1179 1180 1181 1182 1183 +8 1184 1185 1186 1187 1188 1189 1190 1191 +8 1192 1193 1194 1195 1196 1197 1198 1199 +8 1200 1201 1202 1203 1204 1205 1206 1207 +8 1208 1209 1210 1211 1212 1213 1214 1215 +8 1216 1217 1218 1219 1220 1221 1222 1223 +8 1224 1225 1226 1227 1228 1229 1230 1231 +8 1232 1233 1234 1235 1236 1237 1238 1239 +8 1240 1241 1242 1243 1244 1245 1246 1247 +CELL_TYPES 156 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +11 +CELL_DATA 156 +SCALARS id int 1 +LOOKUP_TABLE default +1 +4 +7 +8 +9 +12 +13 +18 +19 +24 +25 +26 +27 +30 +31 +32 +33 +48 +49 +54 +55 +60 +61 +62 +63 +66 +67 +68 +69 +90 +91 +102 +103 +114 +115 +116 +117 +126 +127 +128 +129 +234 +235 +246 +247 +258 +259 +260 +261 +270 +271 +272 +273 +378 +379 +380 +381 +390 +391 +392 +393 +402 +403 +404 +405 +414 +415 +416 +417 +522 +523 +524 +525 +534 +535 +536 +537 +546 +547 +548 +549 +558 +559 +560 +561 +678 +679 +680 +702 +703 +704 +705 +726 +727 +728 +729 +750 +751 +752 +753 +1254 +1255 +1256 +1257 +1278 +1279 +1280 +1281 +1302 +1303 +1304 +1305 +1326 +1327 +1328 +1329 +1830 +1831 +1832 +1833 +1854 +1855 +1856 +1857 +1878 +1879 +1880 +1881 +1902 +1903 +1904 +1905 +2406 +2407 +2408 +2409 +2430 +2431 +2432 +2433 +2454 +2455 +2456 +2457 +2478 +2479 +2480 +2481 +5312 +5313 +5360 +5361 +7616 +7617 +7664 +7665 From b48ef768806b55dea2a6a745e96a98bc552c342c Mon Sep 17 00:00:00 2001 From: tkoskela Date: Tue, 22 May 2018 17:19:53 +0300 Subject: [PATCH 008/602] Added mini-app for testing 1d reconstruction with nonuniform grids. --- mini-apps/amr_reconstruction/Makefile | 87 +++++++++++++ mini-apps/amr_reconstruction/common.h | 53 ++++++++ mini-apps/amr_reconstruction/map_test.cpp | 117 ++++++++++++++++++ .../amr_reconstruction/plot_reconstruction.py | 15 +++ 4 files changed, 272 insertions(+) create mode 100644 mini-apps/amr_reconstruction/Makefile create mode 100644 mini-apps/amr_reconstruction/common.h create mode 100644 mini-apps/amr_reconstruction/map_test.cpp create mode 100644 mini-apps/amr_reconstruction/plot_reconstruction.py diff --git a/mini-apps/amr_reconstruction/Makefile b/mini-apps/amr_reconstruction/Makefile new file mode 100644 index 000000000..4e5866cac --- /dev/null +++ b/mini-apps/amr_reconstruction/Makefile @@ -0,0 +1,87 @@ +#set default architecture, can be overridden from the compile line +ARCH = $(VLASIATOR_ARCH) +include ../../MAKE/Makefile.${ARCH} + +#set FP precision to SP (single) or DP (double) +FP_PRECISION = DP + +#Set floating point precision for distribution function to SPF (single) or DPF (double) +DISTRIBUTION_FP_PRECISION = DPF + +#Set vector backend type for vlasov solvers, sets precision and length. +#This mini-app only supports length 4(!) +VECTORCLASS = VEC4D_AGNER +#set vector class + + +#is profiling on? +CXXFLAGS += -DPROFILE + +#Add -DNDEBUG to turn debugging off. If debugging is enabled performance will degrade significantly +CXXFLAGS += -DNDEBUG -fno-tree-vectorize + +#Set order of semilag solver in velocity space acceleration +# ACC_SEMILAG_PLM 2nd order +# ACC_SEMILAG_PPM 3rd order (use this one unless you are testing) +# ACC_SEMILAG_PQM 5th order (in development) +CXXFLAGS += -DACC_SEMILAG_PPM +#define USE_AGNER_VECTORCLASS to use an external vector class that is used in some of the solvers +#If not defined a slower but portable implementation is used, as the external one only supports +#Linux & x86 processors +#CXXFLAGS += -DUSE_AGNER_VECTORCLASS + + + +#////////////////////////////////////////////////////// +# The rest of this file users shouldn't need to change +#////////////////////////////////////////////////////// + +#will need profiler in most places.. +CXXFLAGS += ${INC_PROFILE} + +#define precision +CXXFLAGS += -D${FP_PRECISION} -D${DISTRIBUTION_FP_PRECISION} -D${VECTORCLASS} + + +default: map_test + +all: map_test + +# Compile directory: +INSTALL = $(CURDIR) + +# Executable: +EXE = map_test + +# Collect libraries into single variable: +LIBS += ${LIB_PROFILE} + +# Define common dependencies +DEPS_COMMON = ../../common.h ../../definitions.h + +#all objects for vlasiator + +OBJS = map_test.o + + + +help: + @echo '' + @echo 'make c(lean) delete all generated files' + @echo 'make make map_test' + +# remove data generated by simulation + +clean: + rm -rf *.o *~ $(EXE) + +# Rules for making each object file needed by the executable + +map_test.o: map_test.cpp + ${CMP} ${CXXFLAGS} ${FLAG_OPENMP} ${MATHFLAGS} ${FLAGS} -c map_test.cpp -I../.. ${INC_PROFILE} ${INC_VECTORCLASS} + +# Make executable +map_test: $(OBJS) + $(LNK) ${LDFLAGS} -o ${EXE} $(OBJS) $(LIBS) + + diff --git a/mini-apps/amr_reconstruction/common.h b/mini-apps/amr_reconstruction/common.h new file mode 100644 index 000000000..69665c21b --- /dev/null +++ b/mini-apps/amr_reconstruction/common.h @@ -0,0 +1,53 @@ +/* +This file is part of Vlasiator. + +Copyright 2010, 2011, 2012, 2013 Finnish Meteorological Institute + +*/ + +#ifndef COMMON_H +#define COMMON_H +# include + +typedef uint32_t uint; +typedef const uint32_t cuint; + + +const uint WID = 4; /*!< Number of cells per coordinate in a velocity block. Only a value of 4 supported by vectorized Leveque solver */ +const uint WID2 = WID*WID; /*!< Number of cells per 2D slab in a velocity block. */ +const uint WID3 = WID2*WID; /*!< Number of cells in a velocity block. */ + +//set general floating point precision here. Default is single precision, use -DDP to set double precision +#ifdef DP +typedef double Real; +typedef const double creal; +#else +typedef float Real; +typedef const float creal; +#endif + + +#define MAX_BLOCKS_PER_DIM 1000 +#ifdef ACC_SEMILAG_PLM +#define RECONSTRUCTION_ORDER 1 +#endif +#ifdef ACC_SEMILAG_PPM +#define RECONSTRUCTION_ORDER 2 +#endif +#ifdef ACC_SEMILAG_PQM +#define RECONSTRUCTION_ORDER 4 +#endif + +// Natural constants +namespace physicalconstants { + const Real MU_0 = 1.25663706e-6; /*!< Permeability of vacuo, unit: (kg m) / (s^2 A^2).*/ + const Real K_B = 1.3806503e-23; /*!< Boltzmann's constant, unit: (kg m^2) / (s^2 K).*/ + const Real CHARGE = 1.60217653e-19; /*!< Elementary charge, unit: C. */ + const Real MASS_PROTON = 1.67262158e-27; /*!< Proton rest mass.*/ + const Real R_E = 6.3712e6; /*!< radius of the Earth. */ +} + + +#endif + + diff --git a/mini-apps/amr_reconstruction/map_test.cpp b/mini-apps/amr_reconstruction/map_test.cpp new file mode 100644 index 000000000..84010aa11 --- /dev/null +++ b/mini-apps/amr_reconstruction/map_test.cpp @@ -0,0 +1,117 @@ +#include +#include "common.h" +#include "vlasovsolver/vec.h" +#include "vlasovsolver/cpu_1d_ppm.hpp" +//#include "vlasovsolver/cpu_1d_ppm_nonuniform.hpp" +#include +#include +#include +#include +#include + +/*print all values in the vector valued values array. In this array + there are blocks_per_dim blocks with a width of WID*/ +void print_values(int step, Vec *values, uint blocks_per_dim, Real v_min, Real dv){ + char name[256]; + sprintf(name,"dist_%03d.dat",step); + + FILE* fp=fopen(name,"w"); + for(uint i=0; i < blocks_per_dim * WID; i++){ + Real v=v_min + (i + 0.5)*dv; + fprintf(fp,"%20.12g %20.12g %20.12g %20.12g %20.12g\n", v, values[i + WID][0], values[i + WID][1], values[i + WID][2], values[i + WID][3]); + } + fclose(fp); +} + +void print_reconstruction(int step, Vec dv[], Vec values[], uint blocks_per_dim, Real v_min, Real dv0){ + char name[256]; + sprintf(name,"reconstructions_%03d.dat",step); + FILE* fp=fopen(name,"w"); + FILE* fp2=fopen("a.dat","w"); + + Vec v_r = v_min; + const int subcells = 50; + /*loop through all blocks in column and divide into subcells. Print value of reconstruction*/ + for (unsigned int k_block = 0; k_block 0) + v_r += dv[iend-1+WID]; + + for (uint k_subcell=0; k_subcell< subcells; ++k_subcell){ + Vec v_norm = (Real)(k_subcell + 0.5)/subcells; //normalized v of subcell in source cell + Vec v0 = v_l + v_norm * dv0; + + Vec v = v_r + v_norm * dv[k_block * WID + k_cell + WID]; + +#ifdef ACC_SEMILAG_PPM + Vec target = + a[0] + + 2.0 * v_norm * a[1] + + 3.0 * v_norm * v_norm * a[2]; +#endif + + fprintf(fp,"%20.12g %20.12g %20.12g\n", v[0], values[k_block * WID + k_cell + WID][0], target[0]); + } + //fprintf(fp,"\n"); //empty line to deay wgments in gnuplot + } + } + + fclose(fp); + fclose(fp2); +} + +int main(void) { + + const Real dv0 = 20000; + const Real v_min = -4e6; + const int blocks_per_dim = 100; + const int i_block = 0; //x index of block, fixed in this simple test + const int j_block = 0; //y index of block, fixed in this simple test + const int j_cell = 0; // y index of cell within block (0..WID-1) + + Vec dv[(blocks_per_dim+2)*WID]; + Real dvTmp[(blocks_per_dim+2)*WID]; + Vec values[(blocks_per_dim+2)*WID]; + Real rnd; + //std::random_device rd; + //std::mt19937 e2(rd()); + //std::uniform_real_distribution<> dist(0, 10); + + boost::mt19937 rng; + boost::uniform_real u(0.0, 2.0 * dv0); + boost::variate_generator > gen(rng, u); + + /*initial values*/ + /*clear target & values array*/ + for (uint k=0; k Date: Tue, 22 May 2018 17:21:11 +0300 Subject: [PATCH 009/602] Updated input arguments and comments --- .../sort_refined_ids_recursive.py | 23 ++++++++++++++----- 1 file changed, 17 insertions(+), 6 deletions(-) diff --git a/mini-apps/simple-grid-test/sort_refined_ids_recursive.py b/mini-apps/simple-grid-test/sort_refined_ids_recursive.py index c43fcea52..555940bd5 100644 --- a/mini-apps/simple-grid-test/sort_refined_ids_recursive.py +++ b/mini-apps/simple-grid-test/sort_refined_ids_recursive.py @@ -47,8 +47,17 @@ def getChildren(children, parentId, dimension = 0, up = True, left = True): myChildren = list() - # Select 2/8 children per parent according to the logical parameters up,down,left,right. - # The names are slightly unintuitive in other dimensions but they come from dimension == 0 + # Select 2/8 children per parent according to the logical parameters up, down, left, right. + # The names depict sides of the four children seen when looking along the direction of the + # pencil. + + # ---- ---- + # / /| / /| + # ---- | ---- | + # |UU| | |LR| | + # |DD|/ |LR|/ + # ---- ---- + # if dimension == 0: if up and left: i1 = 0 @@ -189,17 +198,19 @@ def buildPencils(pencils,initialPencil,idsIn,dimension = 0,path = list()): parser = argparse.ArgumentParser(description='Create pencils on a refined grid.') parser.add_argument('--dimension', metavar = 'N', type=int, nargs=1, - default=[0], help='Dimension') + default=[0], help='Dimension (x = 0, y = 1, z = 2)') parser.add_argument('--filename', metavar = 'fn', type=str, nargs=1, - default=['test.vtk'], help='Input file name') + default=['test.vtk'], help='Input vtk file name') +parser.add_argument('--debug', metavar = 'd', type=int, nargs=1, + default=[0], help='Debug printouts (no = 0, yes = 1)') args = parser.parse_args() if args.dimension[0] > 0 and args.dimension[0] <= 2: dimension = args.dimension[0] else: dimension = 0 - -debug = False + +debug = bool(args.debug[0]) #filename = 'test.vtk' filename = args.filename[0] From b9da201bd00274a5e4341dc65d216dd2906f72ac Mon Sep 17 00:00:00 2001 From: tkoskela Date: Tue, 22 May 2018 17:22:46 +0300 Subject: [PATCH 010/602] Added functions for calculating ppm coefficients on nonuniform grid. --- vlasovsolver/cpu_1d_ppm_nonuniform.hpp | 63 +++++++++++++++++++++++ vlasovsolver/cpu_face_estimates.hpp | 71 ++++++++++++++++++++++++++ 2 files changed, 134 insertions(+) create mode 100644 vlasovsolver/cpu_1d_ppm_nonuniform.hpp diff --git a/vlasovsolver/cpu_1d_ppm_nonuniform.hpp b/vlasovsolver/cpu_1d_ppm_nonuniform.hpp new file mode 100644 index 000000000..8bd107b2b --- /dev/null +++ b/vlasovsolver/cpu_1d_ppm_nonuniform.hpp @@ -0,0 +1,63 @@ +/* + * This file is part of Vlasiator. + * Copyright 2010-2016 Finnish Meteorological Institute + * + * For details of usage, see the COPYING file and read the "Rules of the Road" + * at http://www.physics.helsinki.fi/vlasiator/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ + +#ifndef CPU_1D_PPM_H +#define CPU_1D_PPM_H + +#include +#include "vec.h" +#include "algorithm" +#include "cmath" +#include "cpu_slope_limiters.hpp" +#include "cpu_face_estimates.hpp" + +using namespace std; + +/* + Compute parabolic reconstruction with an explicit scheme +*/ +inline void compute_ppm_coeff_nonuniform(const Vec * const dv, const Vec * const values, face_estimate_order order, uint k, Vec a[3]){ + Vec fv_l; /*left face value*/ + Vec fv_r; /*right face value*/ + compute_filtered_face_values_nonuniform(dv, values, k, order, fv_l, fv_r); + + //Coella et al, check for monotonicity + Vec m_face = fv_l; + Vec p_face = fv_r; + m_face = select((p_face - m_face) * (values[k] - 0.5 * (m_face + p_face)) > + (p_face - m_face)*(p_face - m_face) * one_sixth, + 3 * values[k] - 2 * p_face, + m_face); + p_face = select(-(p_face - m_face) * (p_face - m_face) * one_sixth > + (p_face - m_face) * (values[k] - 0.5 * (m_face + p_face)), + 3 * values[k] - 2 * m_face, + p_face); + + //Fit a second order polynomial for reconstruction see, e.g., White + //2008 (PQM article) (note additional integration factors built in, + //contrary to White (2008) eq. 4 + a[0] = m_face; + a[1] = 3.0 * values[k] - 2.0 * m_face - p_face; + a[2] = (m_face + p_face - 2.0 * values[k]); +} + +#endif diff --git a/vlasovsolver/cpu_face_estimates.hpp b/vlasovsolver/cpu_face_estimates.hpp index 243a5e0b5..751d5cc36 100644 --- a/vlasovsolver/cpu_face_estimates.hpp +++ b/vlasovsolver/cpu_face_estimates.hpp @@ -167,6 +167,30 @@ inline void compute_h4_left_face_value(const Vec * const values, uint k, Vec &fv } +/*! + + Compute left face value based on the explicit h4 estimate for nonuniform grid. + Eqn 45 in White et. al. 2008 + + \param u Array with volume averages. It is assumed a large enough stencil is defined around i. + \param i Index of cell in values for which the left face is computed + \param fv_l Face value on left face of cell i + \param h Array with cell widths. Can be in abritrary units since they always cancel. Maybe 1/refinement ratio? +*/ +inline void compute_h4_left_face_value_nonuniform(const Vec * const h, const Vec * const u, uint k, Vec &fv_l) { + + fv_l = ( + 1.0 / ( h[k - 2] + h[k - 1] + h[k] + h[k + 1] ) + * ( ( h[k - 2] + h[k - 1] ) * ( h[k] + h[k + 1] ) / ( h[k - 1] + h[k] ) + * ( u[k - 1] * h[k] + u[k] * h[k - 1] ) + * (1.0 / ( h[k - 2] + h[k - 1] + h[k] ) + 1.0 / ( h[k - 1] + h[k] + h[k + 1] ) ) + + ( h[k] * ( h[k] + h[k + 1] ) ) / ( ( h[k - 2] + h[k - 1] + h[k] ) * (h[k - 2] + h[k - 1] ) ) + * ( u[k - 1] * (h[k - 2] * 2.0 * h[k - 1] ) - ( u[k - 2] * h[k - 1] ) ) + + h[k - 1] * ( h[k - 2] + h[k - 1] ) / ( ( h[k - 1] + h[k] + h[k + 1] ) * ( h[k] + h[k + 1] ) ) + * ( u[k] * ( 2.0 * h[k] + h[k + 1] ) - u[k + 1] * h[k] ) ) + ); +} + /*! Compute left face value based on the explicit h4 estimate. @@ -298,4 +322,51 @@ inline void compute_filtered_face_values(const Vec * const values,uint k, face_e } } + +inline void compute_filtered_face_values_nonuniform(const Vec * const dv, const Vec * const values,uint k, face_estimate_order order, Vec &fv_l, Vec &fv_r){ + switch(order){ + case h4: + compute_h4_left_face_value_nonuniform(dv, values, k, fv_l); + compute_h4_left_face_value_nonuniform(dv, values, k + 1, fv_r); + break; + // case h5: + // compute_h5_face_values(dv, values, k, fv_l, fv_r); + // break; + // case h6: + // compute_h6_left_face_value(dv, values, k, fv_l); + // compute_h6_left_face_value(dv, values, k + 1, fv_r); + // break; + // case h8: + // compute_h8_left_face_value(dv, values, k, fv_l); + // compute_h8_left_face_value(dv, values, k + 1, fv_r); + // break; + default: + break; + } + Vec slope_abs,slope_sign; + slope_limiter(values[k -1], values[k], values[k + 1], slope_abs, slope_sign); + + //check for extrema, flatten if it is + Vecb is_extrema = (slope_abs == Vec(0.0)); + if(horizontal_or(is_extrema)) { + fv_r = select(is_extrema, values[k], fv_r); + fv_l = select(is_extrema, values[k], fv_l); + } + + //Fix left face if needed; boundary value is not bounded + Vecb filter = (values[k -1] - fv_l) * (fv_l - values[k]) < 0 ; + if(horizontal_or (filter)) { + //Go to linear (PLM) estimates if not ok (this is always ok!) + fv_l=select(filter, values[k ] - slope_sign * 0.5 * slope_abs, fv_l); + } + + //Fix face if needed; boundary value is not bounded + filter = (values[k + 1] - fv_r) * (fv_r - values[k]) < 0; + if(horizontal_or (filter)) { + //Go to linear (PLM) estimates if not ok (this is always ok!) + fv_r=select(filter, values[k] + slope_sign * 0.5 * slope_abs, fv_r); + } +} + + #endif From 0d5ac1b06e1c0401bac18a1320a795da22668a61 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Wed, 23 May 2018 10:06:22 +0300 Subject: [PATCH 011/602] Modified so that the nonuniform grid test is done by default --- mini-apps/amr_reconstruction/map_test.cpp | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/mini-apps/amr_reconstruction/map_test.cpp b/mini-apps/amr_reconstruction/map_test.cpp index 84010aa11..157bf091a 100644 --- a/mini-apps/amr_reconstruction/map_test.cpp +++ b/mini-apps/amr_reconstruction/map_test.cpp @@ -1,8 +1,8 @@ #include #include "common.h" #include "vlasovsolver/vec.h" -#include "vlasovsolver/cpu_1d_ppm.hpp" -//#include "vlasovsolver/cpu_1d_ppm_nonuniform.hpp" +//#include "vlasovsolver/cpu_1d_ppm.hpp" +#include "vlasovsolver/cpu_1d_ppm_nonuniform.hpp" #include #include #include @@ -37,8 +37,8 @@ void print_reconstruction(int step, Vec dv[], Vec values[], uint blocks_per_dim #ifdef ACC_SEMILAG_PPM Vec a[3]; //Vec a2[3]; - compute_ppm_coeff( values, h6, (k_block + 1) * WID + k_cell, a); - //compute_ppm_coeff_nonuniform(dv, values, h6, (k_block + 1) * WID + k_cell, a); + //compute_ppm_coeff( values, h6, (k_block + 1) * WID + k_cell, a); + compute_ppm_coeff_nonuniform(dv, values, h6, (k_block + 1) * WID + k_cell, a); fprintf(fp2,"%12.8g %12.8g %12.8g\n",a[0][0],a[1][0],a[2][0]); #endif @@ -96,9 +96,8 @@ int main(void) { /*clear target & values array*/ for (uint k=0; k Date: Fri, 25 May 2018 16:02:45 +0300 Subject: [PATCH 012/602] Added refinement to the uniform grid to test. Shifted values by 0.5 cells which makes the reconstructions much smoother. Changed h-parameter in the coefficient calculation call to h4. --- mini-apps/amr_reconstruction/map_test.cpp | 59 ++++++++++++------- .../amr_reconstruction/plot_reconstruction.py | 22 +++++-- 2 files changed, 54 insertions(+), 27 deletions(-) diff --git a/mini-apps/amr_reconstruction/map_test.cpp b/mini-apps/amr_reconstruction/map_test.cpp index 157bf091a..ac2170280 100644 --- a/mini-apps/amr_reconstruction/map_test.cpp +++ b/mini-apps/amr_reconstruction/map_test.cpp @@ -29,31 +29,27 @@ void print_reconstruction(int step, Vec dv[], Vec values[], uint blocks_per_dim FILE* fp=fopen(name,"w"); FILE* fp2=fopen("a.dat","w"); - Vec v_r = v_min; + Vec v0 = v_min; const int subcells = 50; /*loop through all blocks in column and divide into subcells. Print value of reconstruction*/ for (unsigned int k_block = 0; k_block 0) - v_r += dv[iend-1+WID]; + v0 += dv[iend-1+WID]; + for (uint k_subcell=0; k_subcell< subcells; ++k_subcell){ Vec v_norm = (Real)(k_subcell + 0.5)/subcells; //normalized v of subcell in source cell - Vec v0 = v_l + v_norm * dv0; - - Vec v = v_r + v_norm * dv[k_block * WID + k_cell + WID]; - + Vec v = v0 + v_norm * dv[k_block * WID + k_cell + WID]; + #ifdef ACC_SEMILAG_PPM Vec target = a[0] + @@ -74,41 +70,60 @@ void print_reconstruction(int step, Vec dv[], Vec values[], uint blocks_per_dim int main(void) { const Real dv0 = 20000; - const Real v_min = -4e6; + //const Real v_min = -4e6; const int blocks_per_dim = 100; const int i_block = 0; //x index of block, fixed in this simple test const int j_block = 0; //y index of block, fixed in this simple test const int j_cell = 0; // y index of cell within block (0..WID-1) Vec dv[(blocks_per_dim+2)*WID]; - Real dvTmp[(blocks_per_dim+2)*WID]; Vec values[(blocks_per_dim+2)*WID]; - Real rnd; - //std::random_device rd; - //std::mt19937 e2(rd()); - //std::uniform_real_distribution<> dist(0, 10); boost::mt19937 rng; boost::uniform_real u(0.0, 2.0 * dv0); boost::variate_generator > gen(rng, u); + gen.distribution().reset(); + gen.engine().seed(12345); /*initial values*/ /*clear target & values array*/ for (uint k=0; k 0) + dv[(blocks_per_dim + 2) * WID / 2 - k] = dv[(blocks_per_dim + 2) * WID / 2 - k]/pow(2,(max_refinement - k / cells_per_level)); } + Real v_min = 0.0; + for (uint k=WID;k < (blocks_per_dim + 2) * WID / 2; ++k) { + v_min -= dv[k][0]; + } + Real T = 500000; Real rho = 1.0e6; Real v = v_min; + Real const v1 = 10 * dv0; for(uint i=0; i < blocks_per_dim * WID; i++){ // Real v=v_min + i*dv; + + // Evaluate the function at the middle of the v cell + v = v + 0.5 * dv[i + WID][0]; values[i + WID] = rho * pow(physicalconstants::MASS_PROTON / (2.0 * M_PI * physicalconstants::K_B * T), 1.5) * exp(- physicalconstants::MASS_PROTON * v * v / (2.0 * physicalconstants::K_B * T)); - v = v + dv[i + WID][0]; + + //values[i + WID] = rho * pow(physicalconstants::MASS_PROTON / (2.0 * M_PI * physicalconstants::K_B * T), 1.5) * + // ( exp(- physicalconstants::MASS_PROTON * v * v / (2.0 * physicalconstants::K_B * T)) + + // exp(- physicalconstants::MASS_PROTON * (v+v1) * (v+v1) / (2.0 * physicalconstants::K_B * T))); + // Move to the end of the cell for the next iteration + v = v + 0.5 * dv[i + WID][0]; } // print_values(0,values,blocks_per_dim, v_min, dv); diff --git a/mini-apps/amr_reconstruction/plot_reconstruction.py b/mini-apps/amr_reconstruction/plot_reconstruction.py index e1b0530ce..6909cf54b 100644 --- a/mini-apps/amr_reconstruction/plot_reconstruction.py +++ b/mini-apps/amr_reconstruction/plot_reconstruction.py @@ -1,15 +1,27 @@ from pylab import * +from cycler import cycler -dat = loadtxt('reconstructions_000.dat') +fname = 'reconstruction_10000' + +dat = loadtxt('reconstructions_10000.dat') figure() clf() -plot(dat[:,0],dat[:,1], '.', label = 'Values') -plot(dat[:,0],dat[:,2], '-', label = 'Reconstruction') +T = 5e5 +m_p = 1.67262158e-27 +k_B = 1.3806503e-23 +f = 1.0e6 * (m_p / (2.0 * pi * k_B * T)) ** 1.5 * exp(-m_p * dat[:,0] ** 2 / (2.0 * k_B * T)) +#f = 1.0e6 * (m_p / (2.0 * pi * k_B * T)) ** 1.5 * ( +# exp(-m_p * dat[:,0] ** 2 / (2.0 * k_B * T)) + +# exp(-m_p * (dat[:,0] + 2e5) ** 2 / (2.0 * k_B * T))) +rc('axes', prop_cycle = cycler('color', ['c','m','y','k'])) +plot(dat[:,0],f , '-', lw = 2, label = 'Maxwellian') +plot(dat[:,0],dat[:,1], '-', lw = 2, label = 'Volume Average') +plot(dat[:,0],dat[:,2], '-', lw = 2, label = 'Reconstruction') imax = find(dat[:,1] == max(dat[:,1])) vmax = dat[imax[0],0] grid(True) legend() xlim(vmax-5e5,vmax+5e5) -savefig('reconstruction.png') -savefig('reconstruction.eps') +savefig(fname+'.png') +savefig(fname+'.eps') show() From 7ede24f94d416c30b28edf4caf3eddb396b7155c Mon Sep 17 00:00:00 2001 From: tkoskela Date: Fri, 25 May 2018 16:04:32 +0300 Subject: [PATCH 013/602] Fixed a bug where a + was replaced by a * by accident --- vlasovsolver/cpu_face_estimates.hpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/vlasovsolver/cpu_face_estimates.hpp b/vlasovsolver/cpu_face_estimates.hpp index 751d5cc36..cfa1e96ca 100644 --- a/vlasovsolver/cpu_face_estimates.hpp +++ b/vlasovsolver/cpu_face_estimates.hpp @@ -185,7 +185,7 @@ inline void compute_h4_left_face_value_nonuniform(const Vec * const h, const Vec * ( u[k - 1] * h[k] + u[k] * h[k - 1] ) * (1.0 / ( h[k - 2] + h[k - 1] + h[k] ) + 1.0 / ( h[k - 1] + h[k] + h[k + 1] ) ) + ( h[k] * ( h[k] + h[k + 1] ) ) / ( ( h[k - 2] + h[k - 1] + h[k] ) * (h[k - 2] + h[k - 1] ) ) - * ( u[k - 1] * (h[k - 2] * 2.0 * h[k - 1] ) - ( u[k - 2] * h[k - 1] ) ) + * ( u[k - 1] * (h[k - 2] + 2.0 * h[k - 1] ) - ( u[k - 2] * h[k - 1] ) ) + h[k - 1] * ( h[k - 2] + h[k - 1] ) / ( ( h[k - 1] + h[k] + h[k + 1] ) * ( h[k] + h[k + 1] ) ) * ( u[k] * ( 2.0 * h[k] + h[k + 1] ) - u[k + 1] * h[k] ) ) ); @@ -341,6 +341,7 @@ inline void compute_filtered_face_values_nonuniform(const Vec * const dv, const // compute_h8_left_face_value(dv, values, k + 1, fv_r); // break; default: + std::cout << "Order " << order << " has not been implemented (yet)\n"; break; } Vec slope_abs,slope_sign; From 9bfbe4f93f7215fce33d40436c954a11547adce0 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Fri, 25 May 2018 16:04:55 +0300 Subject: [PATCH 014/602] Some writes added for debugging, commented out --- vlasovsolver/cpu_1d_ppm.hpp | 5 +++++ vlasovsolver/cpu_1d_ppm_nonuniform.hpp | 9 +++++++++ 2 files changed, 14 insertions(+) diff --git a/vlasovsolver/cpu_1d_ppm.hpp b/vlasovsolver/cpu_1d_ppm.hpp index dd5985d2b..ff060e4a8 100644 --- a/vlasovsolver/cpu_1d_ppm.hpp +++ b/vlasovsolver/cpu_1d_ppm.hpp @@ -58,6 +58,11 @@ inline void compute_ppm_coeff(const Vec * const values, face_estimate_order orde a[0] = m_face; a[1] = 3.0 * values[k] - 2.0 * m_face - p_face; a[2] = (m_face + p_face - 2.0 * values[k]); + + //std::cout << "value = " << values[k][0] << ", m_face = " << m_face[0] << ", p_face = " << p_face[0] << "\n"; + //std::cout << values[k][0] - m_face[0] << ", " << values[k][0] - p_face[0] << "\n"; + + //std::cout << values[k][0] << " " << m_face[0] << " " << p_face[0] << "\n"; } #endif diff --git a/vlasovsolver/cpu_1d_ppm_nonuniform.hpp b/vlasovsolver/cpu_1d_ppm_nonuniform.hpp index 8bd107b2b..35639a69b 100644 --- a/vlasovsolver/cpu_1d_ppm_nonuniform.hpp +++ b/vlasovsolver/cpu_1d_ppm_nonuniform.hpp @@ -43,6 +43,10 @@ inline void compute_ppm_coeff_nonuniform(const Vec * const dv, const Vec * const //Coella et al, check for monotonicity Vec m_face = fv_l; Vec p_face = fv_r; + + //std::cout << "value = " << values[k][0] << ", m_face = " << m_face[0] << ", p_face = " << p_face[0] << "\n"; + //std::cout << values[k][0] - m_face[0] << ", " << values[k][0] - p_face[0] << "\n"; + m_face = select((p_face - m_face) * (values[k] - 0.5 * (m_face + p_face)) > (p_face - m_face)*(p_face - m_face) * one_sixth, 3 * values[k] - 2 * p_face, @@ -58,6 +62,11 @@ inline void compute_ppm_coeff_nonuniform(const Vec * const dv, const Vec * const a[0] = m_face; a[1] = 3.0 * values[k] - 2.0 * m_face - p_face; a[2] = (m_face + p_face - 2.0 * values[k]); + + //std::cout << "value = " << values[k][0] << ", m_face = " << m_face[0] << ", p_face = " << p_face[0] << "\n"; + //std::cout << values[k][0] - m_face[0] << ", " << values[k][0] - p_face[0] << "\n"; + + //std::cout << values[k][0] << " " << m_face[0] << " " << p_face[0] << "\n"; } #endif From 1b568fef65b3d72fbc084b78baa0f789b69c8a94 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Mon, 28 May 2018 15:30:34 +0300 Subject: [PATCH 015/602] Propagate prototype added. Works with uniform grid, nonuniform has issues. --- mini-apps/amr_reconstruction/map_test.cpp | 152 ++++++++++++++---- .../amr_reconstruction/plot_reconstruction.py | 17 +- 2 files changed, 132 insertions(+), 37 deletions(-) diff --git a/mini-apps/amr_reconstruction/map_test.cpp b/mini-apps/amr_reconstruction/map_test.cpp index ac2170280..00da2e19f 100644 --- a/mini-apps/amr_reconstruction/map_test.cpp +++ b/mini-apps/amr_reconstruction/map_test.cpp @@ -23,13 +23,94 @@ void print_values(int step, Vec *values, uint blocks_per_dim, Real v_min, Real d fclose(fp); } -void print_reconstruction(int step, Vec dv[], Vec values[], uint blocks_per_dim, Real v_min, Real dv0){ +void propagate(Vec dr[], Vec values[], Real z_translation, uint blocks_per_dim ) { + + // Determine direction of translation + // part of density goes here (cell index change along spatial direcion) + const int target_scell_index = (z_translation > 0) ? 1: -1; + + // Vector buffer where we write data, initialized to 0*/ + Vec targetValues[(blocks_per_dim + 2) * WID]; + + + auto it = max_element(std::begin(dr), std::end(dr)); + Real i_dz = 1.0 / (it) + + for (uint k_block = 0; k_block < blocks_per_dim; k_block++){ + + for (uint k_cell=0; k_cell < WID; ++k_cell) { + + uint gid = k_block * WID + k_cell + WID; + // init target_values + targetValues[gid] = 0.0; + + } + } + for (uint k_block = 0; k_block < blocks_per_dim; k_block++){ + + for (uint k_cell=0; k_cell < WID; ++k_cell){ + + uint gid = k_block * WID + k_cell + WID; + + // Calculate normalized coordinates in current cell. + // The coordinates (scaled units from 0 to 1) between which we will + // integrate to put mass in the target neighboring cell. + Realv z_1,z_2; + if ( z_translation < 0 ) { + z_1 = 0; + z_2 = -z_translation * i_dz; + } else { + z_1 = 1.0 - z_translation * i_dz; + z_2 = 1.0; + } + + if( abs(z_1) > 1.0 || abs(z_2) > 1.0 ) { + std::cout << "Error, CFL condition violated\n"; + std::cout << "Exiting\n"; + std::exit(1); + } + + //std::cout << z_1 << ", " << z_2 << "\n"; + + // Compute polynomial coefficients + Vec a[3]; + //compute_ppm_coeff_nonuniform(dr, values, h4, gid + target_scell_index, a); + compute_ppm_coeff_nonuniform(dr, values, h4, gid, a); + + // Compute integral + const Vec ngbr_target_density = + z_2 * ( a[0] + z_2 * ( a[1] + z_2 * a[2] ) ) - + z_1 * ( a[0] + z_1 * ( a[1] + z_1 * a[2] ) ); + + // Store mapped density in two target cells + // in the neighbor cell we will put this density + targetValues[gid + target_scell_index] += ngbr_target_density; + // in the current original cells we will put the rest of the original density + targetValues[gid] += values[gid] - ngbr_target_density; + //std::cout << values[gid][0] << ", " << ngbr_target_density[0] << ", " << targetValues[gid][0] << "\n"; + } + } + + // Store target data into source data + for (uint k_block = 0; k_block 0) - v0 += dv[iend-1+WID]; + r0 += dr[iend-1+WID]; for (uint k_subcell=0; k_subcell< subcells; ++k_subcell){ - Vec v_norm = (Real)(k_subcell + 0.5)/subcells; //normalized v of subcell in source cell - Vec v = v0 + v_norm * dv[k_block * WID + k_cell + WID]; + Vec r_norm = (Real)(k_subcell + 0.5)/subcells; //normalized r of subcell in source cell + Vec r = r0 + r_norm * dr[k_block * WID + k_cell + WID]; #ifdef ACC_SEMILAG_PPM Vec target = a[0] + - 2.0 * v_norm * a[1] + - 3.0 * v_norm * v_norm * a[2]; + 2.0 * r_norm * a[1] + + 3.0 * r_norm * r_norm * a[2]; #endif - fprintf(fp,"%20.12g %20.12g %20.12g\n", v[0], values[k_block * WID + k_cell + WID][0], target[0]); + fprintf(fp,"%20.12g %20.12g %20.12g\n", r[0], values[k_block * WID + k_cell + WID][0], target[0]); } //fprintf(fp,"\n"); //empty line to deay wgments in gnuplot } } fclose(fp); - fclose(fp2); } int main(void) { - const Real dv0 = 20000; - //const Real v_min = -4e6; + const Real dr0 = 20000; const int blocks_per_dim = 100; const int i_block = 0; //x index of block, fixed in this simple test const int j_block = 0; //y index of block, fixed in this simple test const int j_cell = 0; // y index of cell within block (0..WID-1) - Vec dv[(blocks_per_dim+2)*WID]; + Vec dr[(blocks_per_dim+2)*WID]; Vec values[(blocks_per_dim+2)*WID]; boost::mt19937 rng; - boost::uniform_real u(0.0, 2.0 * dv0); + boost::uniform_real u(0.0, 2.0 * dr0); boost::variate_generator > gen(rng, u); gen.distribution().reset(); gen.engine().seed(12345); @@ -89,43 +167,51 @@ int main(void) { /*clear target & values array*/ for (uint k=0; k 0) - dv[(blocks_per_dim + 2) * WID / 2 - k] = dv[(blocks_per_dim + 2) * WID / 2 - k]/pow(2,(max_refinement - k / cells_per_level)); + dr[(blocks_per_dim + 2) * WID / 2 - k] = dr[(blocks_per_dim + 2) * WID / 2 - k]/pow(2,(max_refinement - k / cells_per_level)); } - Real v_min = 0.0; + Real r_min = 0.0; for (uint k=WID;k < (blocks_per_dim + 2) * WID / 2; ++k) { - v_min -= dv[k][0]; + r_min -= dr[k][0]; } Real T = 500000; Real rho = 1.0e6; - Real v = v_min; - Real const v1 = 10 * dv0; + Real r = r_min; + Real const r1 = 10 * dr0; for(uint i=0; i < blocks_per_dim * WID; i++){ - // Real v=v_min + i*dv; - // Evaluate the function at the middle of the v cell - v = v + 0.5 * dv[i + WID][0]; + // Evaluate the function at the middle of the cell + r = r + 0.5 * dr[i + WID][0]; values[i + WID] = rho * pow(physicalconstants::MASS_PROTON / (2.0 * M_PI * physicalconstants::K_B * T), 1.5) * - exp(- physicalconstants::MASS_PROTON * v * v / (2.0 * physicalconstants::K_B * T)); + exp(- physicalconstants::MASS_PROTON * r * r / (2.0 * physicalconstants::K_B * T)); //values[i + WID] = rho * pow(physicalconstants::MASS_PROTON / (2.0 * M_PI * physicalconstants::K_B * T), 1.5) * // ( exp(- physicalconstants::MASS_PROTON * v * v / (2.0 * physicalconstants::K_B * T)) + // exp(- physicalconstants::MASS_PROTON * (v+v1) * (v+v1) / (2.0 * physicalconstants::K_B * T))); // Move to the end of the cell for the next iteration - v = v + 0.5 * dv[i + WID][0]; + r = r + 0.5 * dr[i + WID][0]; + } + + print_reconstruction(0, dr, values, blocks_per_dim, r_min); + + uint nstep = 200; + Real step = -500.0; + + for (uint istep=0; istep < nstep; ++istep) { + propagate(dr, values, step, blocks_per_dim); + if ((istep+1) % 10 == 0) + print_reconstruction(istep+1, dr, values, blocks_per_dim, r_min); } - // print_values(0,values,blocks_per_dim, v_min, dv); - print_reconstruction(0, dv, values, blocks_per_dim, v_min, dv0); } diff --git a/mini-apps/amr_reconstruction/plot_reconstruction.py b/mini-apps/amr_reconstruction/plot_reconstruction.py index 6909cf54b..9e3554b6b 100644 --- a/mini-apps/amr_reconstruction/plot_reconstruction.py +++ b/mini-apps/amr_reconstruction/plot_reconstruction.py @@ -1,10 +1,18 @@ from pylab import * from cycler import cycler +import argparse -fname = 'reconstruction_10000' +parser = argparse.ArgumentParser(description='Plot 1d reconstructions') +parser.add_argument('--step', metavar = 'N', type=int, nargs=1, + default=[0], help='Step to plot') +args = parser.parse_args() -dat = loadtxt('reconstructions_10000.dat') -figure() +#fname = 'reconstruction_100' +fname = 'reconstruction_{:03d}'.format(args.step[0]) + +#dat = loadtxt('reconstructions_010.dat') +dat = loadtxt('reconstructions_{:03d}.dat'.format(args.step[0])) +figure(1) clf() T = 5e5 m_p = 1.67262158e-27 @@ -24,4 +32,5 @@ xlim(vmax-5e5,vmax+5e5) savefig(fname+'.png') savefig(fname+'.eps') -show() +show(block=False) +#print(sum(dat[:,2])) From d819bbd7ec0c20e70f8af07711436cb2747be83a Mon Sep 17 00:00:00 2001 From: tkoskela Date: Tue, 29 May 2018 17:31:27 +0300 Subject: [PATCH 016/602] Finished propagation in map_test.cpp. Added python scripts to plot the result. --- .../fun_plot_reconstruction.py | 61 +++++++++++++++++++ mini-apps/amr_reconstruction/map_test.cpp | 56 +++++++++-------- .../plot_reconstructions.py | 15 +++++ 3 files changed, 108 insertions(+), 24 deletions(-) create mode 100644 mini-apps/amr_reconstruction/fun_plot_reconstruction.py create mode 100644 mini-apps/amr_reconstruction/plot_reconstructions.py diff --git a/mini-apps/amr_reconstruction/fun_plot_reconstruction.py b/mini-apps/amr_reconstruction/fun_plot_reconstruction.py new file mode 100644 index 000000000..9ecb01e71 --- /dev/null +++ b/mini-apps/amr_reconstruction/fun_plot_reconstruction.py @@ -0,0 +1,61 @@ +from cycler import cycler +import argparse +import matplotlib.animation as animation +import matplotlib.pyplot as plt +import matplotlib.mlab as mlab +import numpy as np + +def plot_reconstruction(step,scale='linear'): + + #parser = argparse.ArgumentParser(description='Plot 1d reconstructions') + #parser.add_argument('--step', metavar = 'N', type=int, nargs=1, + # default=[0], help='Step to plot') + #args = parser.parse_args() + + #fname = 'reconstruction_100' + fname = 'reconstruction_{:03d}'.format(step) + + #dat = loadtxt('reconstructions_010.dat') + dat = np.loadtxt('reconstructions_{:03d}.dat'.format(step)) + plt.figure(1,figsize=(8,6),dpi = 100) + plt.clf() + T = 5e5 + m_p = 1.67262158e-27 + k_B = 1.3806503e-23 + + imax = mlab.find(dat[:,1] == max(dat[:,1])) + rmax = dat[imax[0],0] + + r0 = 2e5 + dr = -500 + + r = dat[:,0] - (r0 + step * dr) + f = 1.0e18 * (m_p / (2.0 * np.pi * k_B * T)) ** 1.5 * np.exp(-m_p * r ** 2 / (2.0 * k_B * T)) + #f = 1.0e6 * (m_p / (2.0 * pi * k_B * T)) ** 1.5 * ( + # exp(-m_p * dat[:,0] ** 2 / (2.0 * k_B * T)) + + # exp(-m_p * (dat[:,0] + 2e5) ** 2 / (2.0 * k_B * T))) + plt.rc('axes', prop_cycle = cycler('color', ['c','m','y','k'])) + plt.plot(dat[:,0],f , '-', lw = 2, label = 'Analytic Function') + plt.plot(dat[:,0],dat[:,1], '-', lw = 2, label = 'Volume Average') + plt.plot(dat[:,0],dat[:,2], '-', lw = 2, label = 'Reconstruction') + plt.grid(True) + plt.legend(loc=0) + + if scale is 'log': + plt.ylim(1e-16,1e3) + plt.xlim(-8e5,8e5) + else: + plt.xlim(-5e5,5e5) + + ax = plt.gca() + ax.set_yscale(scale) + plt.savefig(fname+'.png') + #plt.savefig(fname+'.eps') + + #show(block=False) + #print(sum(dat[:,2])) + #print(np.trapz(dat[:,2],dat[:,0]),max(dat[:,2])) + print(sum(np.gradient(dat[:,0]) * dat[:,2]),max(dat[:,2])) + return sum(np.gradient(dat[:,0]) * dat[:,2]),max(dat[:,2]) + + #return dat diff --git a/mini-apps/amr_reconstruction/map_test.cpp b/mini-apps/amr_reconstruction/map_test.cpp index 00da2e19f..6556cf6c8 100644 --- a/mini-apps/amr_reconstruction/map_test.cpp +++ b/mini-apps/amr_reconstruction/map_test.cpp @@ -32,9 +32,11 @@ void propagate(Vec dr[], Vec values[], Real z_translation, uint blocks_per_dim ) // Vector buffer where we write data, initialized to 0*/ Vec targetValues[(blocks_per_dim + 2) * WID]; + Real maxdr = 1.0e-8; - auto it = max_element(std::begin(dr), std::end(dr)); - Real i_dz = 1.0 / (it) + for (uint i = 0; i < (blocks_per_dim + 2) * WID; i++) { + maxdr = max(maxdr,dr[i][0]); + } for (uint k_block = 0; k_block < blocks_per_dim; k_block++){ @@ -54,13 +56,15 @@ void propagate(Vec dr[], Vec values[], Real z_translation, uint blocks_per_dim ) // Calculate normalized coordinates in current cell. // The coordinates (scaled units from 0 to 1) between which we will - // integrate to put mass in the target neighboring cell. + // integrate to put mass in the target neighboring cell. + // Normalize the coordinates to the origin cell. Then we scale with the difference + // in volume between target and origin later when adding the integrated value. Realv z_1,z_2; if ( z_translation < 0 ) { z_1 = 0; - z_2 = -z_translation * i_dz; + z_2 = -z_translation / dr[gid][0]; } else { - z_1 = 1.0 - z_translation * i_dz; + z_1 = 1.0 - z_translation / dr[gid][0]; z_2 = 1.0; } @@ -69,8 +73,6 @@ void propagate(Vec dr[], Vec values[], Real z_translation, uint blocks_per_dim ) std::cout << "Exiting\n"; std::exit(1); } - - //std::cout << z_1 << ", " << z_2 << "\n"; // Compute polynomial coefficients Vec a[3]; @@ -84,10 +86,9 @@ void propagate(Vec dr[], Vec values[], Real z_translation, uint blocks_per_dim ) // Store mapped density in two target cells // in the neighbor cell we will put this density - targetValues[gid + target_scell_index] += ngbr_target_density; + targetValues[gid + target_scell_index] += ngbr_target_density * dr[gid] / dr[gid + target_scell_index]; // in the current original cells we will put the rest of the original density targetValues[gid] += values[gid] - ngbr_target_density; - //std::cout << values[gid][0] << ", " << ngbr_target_density[0] << ", " << targetValues[gid][0] << "\n"; } } @@ -146,6 +147,16 @@ void print_reconstruction(int step, Vec dr[], Vec values[], uint blocks_per_dim fclose(fp); } +void refine(Vec dr[], int ir, int max_refinement, int cells_per_level) { + + for (uint k=0; k < max_refinement * cells_per_level; ++k) { + dr[ir + k] = dr[ir + k]/pow(2,(max_refinement - k / cells_per_level)); + if (k > 0) + dr[ir - k] = dr[ir - k]/pow(2,(max_refinement - k / cells_per_level)); + } + +} + int main(void) { const Real dr0 = 20000; @@ -171,41 +182,38 @@ int main(void) { //dr[k] = gen(); } - uint max_refinement = 1; - uint cells_per_level = 2; - for (uint k=0; k < max_refinement * cells_per_level; ++k) { - dr[(blocks_per_dim + 2) * WID / 2 + k] = dr[(blocks_per_dim + 2) * WID / 2 + k]/pow(2,(max_refinement - k / cells_per_level)); - if (k > 0) - dr[(blocks_per_dim + 2) * WID / 2 - k] = dr[(blocks_per_dim + 2) * WID / 2 - k]/pow(2,(max_refinement - k / cells_per_level)); - } + int ir = (blocks_per_dim + 2) * WID / 2; + int ir2 = (blocks_per_dim + 2) * WID / 3; + int max_refinement = 5; + int cells_per_level = 2; + refine(dr,ir,max_refinement,cells_per_level); + // refine(dr,ir2,max_refinement,cells_per_level); + Real r_min = 0.0; for (uint k=WID;k < (blocks_per_dim + 2) * WID / 2; ++k) { r_min -= dr[k][0]; } Real T = 500000; - Real rho = 1.0e6; + Real rho = 1.0e18; Real r = r_min; - Real const r1 = 10 * dr0; + Real r1 = 10.0 * dr0; for(uint i=0; i < blocks_per_dim * WID; i++){ // Evaluate the function at the middle of the cell r = r + 0.5 * dr[i + WID][0]; values[i + WID] = rho * pow(physicalconstants::MASS_PROTON / (2.0 * M_PI * physicalconstants::K_B * T), 1.5) * - exp(- physicalconstants::MASS_PROTON * r * r / (2.0 * physicalconstants::K_B * T)); - - //values[i + WID] = rho * pow(physicalconstants::MASS_PROTON / (2.0 * M_PI * physicalconstants::K_B * T), 1.5) * - // ( exp(- physicalconstants::MASS_PROTON * v * v / (2.0 * physicalconstants::K_B * T)) + - // exp(- physicalconstants::MASS_PROTON * (v+v1) * (v+v1) / (2.0 * physicalconstants::K_B * T))); + exp(- physicalconstants::MASS_PROTON * (r - r1) * (r - r1) / (2.0 * physicalconstants::K_B * T)); + // Move to the end of the cell for the next iteration r = r + 0.5 * dr[i + WID][0]; } print_reconstruction(0, dr, values, blocks_per_dim, r_min); - uint nstep = 200; + uint nstep = 900; Real step = -500.0; for (uint istep=0; istep < nstep; ++istep) { diff --git a/mini-apps/amr_reconstruction/plot_reconstructions.py b/mini-apps/amr_reconstruction/plot_reconstructions.py new file mode 100644 index 000000000..b0d80e5e9 --- /dev/null +++ b/mini-apps/amr_reconstruction/plot_reconstructions.py @@ -0,0 +1,15 @@ +import fun_plot_reconstruction +from pylab import * + +mass = list() +peak = list() + +for i in arange(0,910,10): + m,p = fun_plot_reconstruction.plot_reconstruction(i,'log') + mass.append(m) + peak.append(p) + pause(0.01) + +print +print('percentage of mass lost = {:5f} %'.format((mass[0]-mass[-1])/mass[0] * 100)) +print('percentage of peak lost = {:5f} %'.format((peak[0]-peak[-1])/peak[0] * 100)) From 912349c90f965793d58f70539f0335395da05156 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Thu, 7 Jun 2018 10:44:48 +0300 Subject: [PATCH 017/602] Added mini-app that builds pencils along a single dimension. Same logic as in the recursive python function. --- mini-apps/build_pencils/Makefile | 16 ++ mini-apps/build_pencils/grid_test.cpp | 375 ++++++++++++++++++++++++++ 2 files changed, 391 insertions(+) create mode 100644 mini-apps/build_pencils/Makefile create mode 100644 mini-apps/build_pencils/grid_test.cpp diff --git a/mini-apps/build_pencils/Makefile b/mini-apps/build_pencils/Makefile new file mode 100644 index 000000000..c05eede04 --- /dev/null +++ b/mini-apps/build_pencils/Makefile @@ -0,0 +1,16 @@ +ARCH=$(VLASIATOR_ARCH) +include ../../MAKE/Makefile.${ARCH} + +FLAGS = -W -Wall -Wextra -pedantic -std=c++11 -O0 +INCLUDES = ${INC_DCCRG} -L$/usr/lib/x86_64-linux-gnu -lboost_program_options -I$/usr/include/boost -L/home/tkoskela/lib/zoltan/Zoltan_v3.83/build/lib -lzoltan -I/home/tkoskela/lib/zoltan/Zoltan_v3.83/build/include + +default: grid_test + +clean: + rm -rf *.o grid_test + +grid_test.o: grid_test.cpp + ${CMP} ${FLAGS} ${INCLUDES} -c $^ + +grid_test: grid_test.o + $(CMP) ${FLAGS} $^ ${INCLUDES} -o $@ diff --git a/mini-apps/build_pencils/grid_test.cpp b/mini-apps/build_pencils/grid_test.cpp new file mode 100644 index 000000000..802351147 --- /dev/null +++ b/mini-apps/build_pencils/grid_test.cpp @@ -0,0 +1,375 @@ +#include "dccrg.hpp" +#include "mpi.h" +#include +#include +#include +#include +#include +#include +#include "../../definitions.h" +#include + +using namespace std; + +struct grid_data { + + int value = 0; + + std::tuple get_mpi_datatype() + { + return std::make_tuple(this, 0, MPI_BYTE); + } + +}; + + +struct setOfPencils { + + uint N; // Number of pencils in the set + std::vector lengthOfPencils; // Lengths of pencils + std::vector ids; // List of cells + std::vector x,y; // x,y - position (Maybe replace with uint width?) + + setOfPencils() { + N = 0; + } + + void addPencil(vector idsIn, Real xIn, Real yIn) { + + N += 1; + lengthOfPencils.push_back(idsIn.size()); + ids.insert(ids.end(),idsIn.begin(),idsIn.end()); + x.push_back(xIn); + y.push_back(yIn); + + } + +}; + + + +void insertVectorIntoVector(vector &v1,vector v2,uint i) { + + vector tmp(v1.begin(),v1.begin() + i); + tmp.insert(tmp.end(),v2.begin(),v2.end()); + tmp.insert(tmp.end(),v1.begin() + i, v1.end()); + v1.clear(); + v1 = tmp; + v2.clear(); + +} + +vector getMyChildren(vector &children, + uint dimension, bool up, bool left ) { + + bool down = !up; + bool right = !left; + + uint i1 = 999; + uint i2 = 999; + + switch(dimension) { + case 0 : + if (up && left) { + i1 = 0; + i2 = 1; + break; + } + if (down && left) { + i1 = 2; + i2 = 3; + break; + } + if (up && right) { + i1 = 4; + i2 = 5; + break; + } + if (down && right) { + i1 = 6; + i2 = 7; + break; + } + case 1 : + if (up && left) { + i1 = 0; + i2 = 2; + break; + } + if (down && left) { + i1 = 1; + i2 = 3; + break; + } + if (up && right) { + i1 = 4; + i2 = 6; + break; + } + if (down && right) { + i1 = 5; + i2 = 7; + break; + } + case 2 : + if (up && left) { + i1 = 0; + i2 = 4; + break; + } + if (down && left) { + i1 = 1; + i2 = 5; + break; + } + if (up && right) { + i1 = 2; + i2 = 6; + break; + } + if (down && right) { + i1 = 3; + i2 = 7; + break; + } + default: + break; + + } + + vector myChildren {children[i1],children[i2]}; + return myChildren; + +} + +void printVector(vector v) { + + for (auto k = v.begin(); k != v.end(); ++k) + std::cout << *k << ' '; + std::cout << "\n"; + +} + +setOfPencils buildPencils( dccrg::Dccrg grid, + setOfPencils &pencils, vector idsOut, + vector idsIn, int dimension, + vector> path) { + + // Not necessary since c++ passes a copy by default. + // Copy the input ids to a working set of ids + // vector ids( idsIn ); + // Copy the already computed pencil to the output list + // vector idsOut( idsInPencil ); + + uint i = 0; + uint length = idsIn.size(); + + // Walk along the input pencil. Initially length is equal to the length of the + // Unrefined pencil. When refined cells are encountered, the length is increased + // accordingly to go through the entire pencil. + while (i < length) { + + uint i1 = i + 1; + uint id = idsIn[i]; + + vector children = grid.get_all_children(id); + bool hasChildren = ( grid.get_parent(children[0]) == id ); + + // Check if the current cell contains refined cells + if (hasChildren) { + + // Check if we have encountered this refinement level before and stored + // the path this builder followed + if (path.size() > grid.get_refinement_level(id)) { + + // Get children using the stored path + vector myChildren = getMyChildren(children,dimension, + get<0>(path[grid.get_refinement_level(id)]), + get<1>(path[grid.get_refinement_level(id)])); + + // Add the children to the working set at index i1 + + insertVectorIntoVector(idsIn,myChildren,i1); + length += myChildren.size(); + + } else { + + // Spawn new builders to construct pencils at the new refinement level + + for (bool up : { true, false }) { + for (bool left : { true, false }) { + + // Store the path this builder has chosen + vector < tuple > myPath = path; + myPath.push_back(tuple(up,left)); + + // Get children along my path. + vector myChildren = getMyChildren(children,dimension,up,left); + // Get the ids that have not been processed yet. + vector remainingIds(idsIn.begin() + i1, idsIn.end()); + + // The current builder continues along the bottom-right path. + // Other paths will spawn a new builder. + if (!up && !left) { + + // Add the children to the working set. Next iteration of the + // main loop (over idsIn) will start on the first child + + // Add the children to the working set at index i1 + insertVectorIntoVector(idsIn,myChildren,i1); + length += myChildren.size(); + path = myPath; + + } else { + + // Create a new working set by adding the remainder of the old + // working set to the end of the current children list + + myChildren.insert(myChildren.end(),remainingIds.begin(),remainingIds.end()); + + buildPencils(grid,pencils,idsOut,myChildren,dimension,myPath); + + }; + + }; + }; + }; + + } else { + + // Add unrefined cells to the pencil directly + + idsOut.push_back(id); + + }; // closes if(isRefined) + + // Move to the next cell + i++; + + }; // closes loop over ids + + pencils.addPencil(idsOut,0.0,0.0); + return pencils; + +} // closes function + +int main(int argc, char* argv[]) { + + if (MPI_Init(&argc, &argv) != MPI_SUCCESS) { + // cerr << "Coudln't initialize MPI." << endl; + abort(); + } + + MPI_Comm comm = MPI_COMM_WORLD; + + int rank = 0, comm_size = 0; + MPI_Comm_rank(comm, &rank); + MPI_Comm_size(comm, &comm_size); + + dccrg::Dccrg grid; + + const int xDim = 9; + const int yDim = 3; + const int zDim = 1; + const std::array grid_size = {{xDim,yDim,zDim}}; + + grid.initialize(grid_size, comm, "RANDOM", 1); + + grid.balance_load(); + + bool doRefine = true; + const std::array refinementIds = {{3,7,40,41}}; + if(doRefine) { + for(uint i = 0; i < refinementIds.size(); i++) { + if(refinementIds[i] > 0) { + grid.refine_completely(refinementIds[i]); + grid.stop_refining(); + } + } + } + + grid.balance_load(); + + auto cells = grid.cells; + sort(cells.begin(), cells.end()); + + vector ids; + + std::cout << "Grid size at 0 refinement is " << xDim << " x " << yDim << " x " << zDim << std::endl; + for (const auto& cell: cells) { + // std::cout << "Data of cell " << cell.id << " is stored at " << cell.data << std::endl; + // Collect a list of cell ids. + ids.push_back(cell.id); + + // Add parent cells of refined cells to the list of cell ids. + CellID parent = grid.get_parent(cell.id); + if (parent > 0 && + !(std::find(ids.begin(), ids.end(), parent) != ids.end())) { + ids.push_back(parent); + } + } + + sort(ids.begin(),ids.end()); + + uint ibeg = 0; + uint iend = 0; + + list < vector < CellID >> unrefinedPencils; + + std::cout << "The unrefined pencils are :\n"; + for (uint i = 0; i myIds; + ibeg = i * xDim * yDim + j * xDim; + iend = i * xDim * yDim + (j + 1) * xDim; + for (uint k = ibeg; k < iend; k++) { + std::cout << ids[k] << " "; + myIds.push_back(ids[k]); + } + unrefinedPencils.push_back(myIds); + std::cout << "\n"; + } + } + + ibeg = 0; + iend = 0; + + setOfPencils pencilInitial; + vector idsInitial; + uint dimension = 0; + vector> path; + + setOfPencils pencils; + for ( auto &myIds : unrefinedPencils ) { + pencils = buildPencils(grid, pencilInitial, idsInitial, myIds, dimension, path); + } + + std::cout << "I have created the following pencils:\n"; + for (uint i = 0; i < pencils.N; i++) { + iend += pencils.lengthOfPencils[i]; + for (auto j = pencils.ids.begin() + ibeg; j != pencils.ids.begin() + iend; ++j) { + std::cout << *j << " "; + } + ibeg = iend; + std::cout << "\n"; + } + + std::ofstream outfile; + + grid.write_vtk_file("test.vtk"); + + outfile.open("test.vtk", std::ofstream::app); + // write each cells id + outfile << "CELL_DATA " << cells.size() << std::endl; + outfile << "SCALARS id int 1" << std::endl; + outfile << "LOOKUP_TABLE default" << std::endl; + for (const auto& cell: cells) { + outfile << cell.id << std::endl; + } + outfile.close(); + + MPI_Finalize(); + + return 0; + +} From 80c9d4104cea67ce8507dfc9d87475ed04c43885 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Thu, 7 Jun 2018 17:44:33 +0300 Subject: [PATCH 018/602] Added cpu_sort_ids.hpp to take care of the reordering for different dimensions. --- mini-apps/build_pencils/Makefile | 4 +- mini-apps/build_pencils/cpu_sort_ids.hpp | 137 +++++++++++++++++++++++ mini-apps/build_pencils/grid_test.cpp | 69 ++++++++---- 3 files changed, 186 insertions(+), 24 deletions(-) create mode 100644 mini-apps/build_pencils/cpu_sort_ids.hpp diff --git a/mini-apps/build_pencils/Makefile b/mini-apps/build_pencils/Makefile index c05eede04..7e290df3a 100644 --- a/mini-apps/build_pencils/Makefile +++ b/mini-apps/build_pencils/Makefile @@ -9,8 +9,8 @@ default: grid_test clean: rm -rf *.o grid_test -grid_test.o: grid_test.cpp +grid_test.o: grid_test.cpp cpu_sort_ids.hpp ${CMP} ${FLAGS} ${INCLUDES} -c $^ -grid_test: grid_test.o +grid_test: grid_test.o cpu_sort_ids.hpp $(CMP) ${FLAGS} $^ ${INCLUDES} -o $@ diff --git a/mini-apps/build_pencils/cpu_sort_ids.hpp b/mini-apps/build_pencils/cpu_sort_ids.hpp new file mode 100644 index 000000000..110387426 --- /dev/null +++ b/mini-apps/build_pencils/cpu_sort_ids.hpp @@ -0,0 +1,137 @@ +#include +#include +#include +#include + +#ifndef CPU_SORT_IDS_HPP +#define CPU_SORT_IDS_HPP + + +// Comparator function for sorting vector of pairs +template inline bool paircomparator( const std::pair & l, const std::pair & r ) { + return l.first < r.first; +} + + + +template inline bool sortIds(const uint dimension, + const LENGTH meshSize, + const std::vector& ids, + std::vector< std::pair >& sortedIds){ + + sortedIds.resize(ids.size()); + //TODO conditionally parallel version? +#pragma omp parallel for + for (uint i = 0; i < ids.size() ; ++i ) { + const ID id = ids[i]; + + switch( dimension ) { + case 0: { + const ID idMapped = id; // Mapping the block id to different coordinate system if dimension is not zero: + sortedIds[i] = std::make_pair( idMapped, id); + } + break; + case 1: { + // Do operation: + // id = x + y*x_max + z*y_max*x_max + //=> id' = id - (x + y*x_max) + y + x*y_max = x + y*x_max + z*y_max*x_max - (x + y*x_max) + y + x*y_max + // = y + x*y_max + z*y_max*x_max + const ID x_index = id % meshSize[0]; + const ID y_index = (id / meshSize[0]) % meshSize[1]; + + // Mapping the block id to different coordinate system if dimension is not zero: + const ID idMapped = id - (x_index + y_index*meshSize[0]) + y_index + x_index * meshSize[1]; + + sortedIds[i] = std::make_pair( idMapped, id ); + } + break; + case 2: { + // Do operation: + // id = x + y*x_max + z*y_max*x_max + //=> id' = z + y*z_max + x*z_max*y_max + const ID x_index = id % meshSize[0]; + const ID y_index = (id / meshSize[0]) % meshSize[1]; + const ID z_index = (id / (meshSize[0] * meshSize[1])); + + // Mapping the id id to different coordinate system if dimension is not zero: + //const uint idMapped + // = z_indice + // + y_indice * meshSize[2] + // + x_indice*meshSize[1]*meshSize[2]; + const ID idMapped + = z_index + + y_index*meshSize[2] + + x_index*meshSize[1]*meshSize[2]; + sortedIds[i] = std::make_pair( idMapped, id ); + } + break; + } + } + // Finally, sort the list of pairs + std::sort( sortedIds.begin(), sortedIds.end(), paircomparator ); + +} + + + + + + +template void sortIdlistByDimension(const uint dimension, const LENGTH meshSize, + std::vector & ids, + std::vector & columnIdOffsets, + std::vector & columnNumIds, + std::vector & setColumnOffsets, + std::vector & setNumColumns) { + + const uint nIds = ids.size(); + + //sort Ids + std::vector > sortedIdPairs; + sortIds(dimension, meshSize, ids, sortedIdPairs); + + + // Put in the sorted ids, and also compute column offsets and lengths: + columnIdOffsets.push_back(0); //first offset + setColumnOffsets.push_back(0); //first offset + uint prev_column_id, prev_dimension_id; + + for (uint i=0; i 0 && ( column_id != prev_column_id || dimension_id != (prev_dimension_id + 1) )){ + //encountered new column! For i=0, we already entered the correct offset (0). + //We also identify it as a new column if there is a break in the column (e.g., gap between two populations) + /*add offset where the next column will begin*/ + columnIdOffsets.push_back(i); + /*add length of the current column that now ended*/ + columnNumIds.push_back(columnIdOffsets[columnIdOffsets.size()-1] - columnIdOffsets[columnIdOffsets.size()-2]); + + if (column_id != prev_column_id ){ + //encountered new set of columns, add offset to new set starting at present column + setColumnOffsets.push_back(columnIdOffsets.size() - 1); + /*add length of the previous column set that ended*/ + setNumColumns.push_back(setColumnOffsets[setColumnOffsets.size()-1] - setColumnOffsets[setColumnOffsets.size()-2]); + } + } + prev_column_id = column_id; + prev_dimension_id = dimension_id; + } + + columnNumIds.push_back(nIds - columnIdOffsets[columnIdOffsets.size()-1]); + setNumColumns.push_back(columnNumIds.size() - setColumnOffsets[setColumnOffsets.size()-1]); +} + + + + + + +#endif diff --git a/mini-apps/build_pencils/grid_test.cpp b/mini-apps/build_pencils/grid_test.cpp index 802351147..4f508ae4b 100644 --- a/mini-apps/build_pencils/grid_test.cpp +++ b/mini-apps/build_pencils/grid_test.cpp @@ -8,6 +8,8 @@ #include #include "../../definitions.h" #include +#include "cpu_sort_ids.hpp" +#include using namespace std; @@ -153,7 +155,7 @@ void printVector(vector v) { setOfPencils buildPencils( dccrg::Dccrg grid, setOfPencils &pencils, vector idsOut, vector idsIn, int dimension, - vector> path) { + vector> path) { // Not necessary since c++ passes a copy by default. // Copy the input ids to a working set of ids @@ -184,8 +186,8 @@ setOfPencils buildPencils( dccrg::Dccrg grid, // Get children using the stored path vector myChildren = getMyChildren(children,dimension, - get<0>(path[grid.get_refinement_level(id)]), - get<1>(path[grid.get_refinement_level(id)])); + path[grid.get_refinement_level(id)].first, + path[grid.get_refinement_level(id)].second); // Add the children to the working set at index i1 @@ -200,8 +202,8 @@ setOfPencils buildPencils( dccrg::Dccrg grid, for (bool left : { true, false }) { // Store the path this builder has chosen - vector < tuple > myPath = path; - myPath.push_back(tuple(up,left)); + vector < pair > myPath = path; + myPath.push_back(pair(up,left)); // Get children along my path. vector myChildren = getMyChildren(children,dimension,up,left); @@ -268,9 +270,9 @@ int main(int argc, char* argv[]) { dccrg::Dccrg grid; - const int xDim = 9; - const int yDim = 3; - const int zDim = 1; + const uint xDim = 9; + const uint yDim = 3; + const uint zDim = 1; const std::array grid_size = {{xDim,yDim,zDim}}; grid.initialize(grid_size, comm, "RANDOM", 1); @@ -314,19 +316,43 @@ int main(int argc, char* argv[]) { uint ibeg = 0; uint iend = 0; - list < vector < CellID >> unrefinedPencils; + uint dimension = 0; + vector dims; + + switch( dimension ) { + case 0 : { + dims = {zDim,yDim,xDim}; + } + break; + case 1 : { + dims = {zDim,xDim,yDim}; + } + break; + case 2 : { + dims = {yDim,xDim,zDim}; + } + break; + default : { + dims = {0,0,0}; + } + }; + map mapping; + sortIds< CellID, dccrg::Grid_Length::type >(dimension, grid_size, ids, mapping); + + list < vector < CellID >> unrefinedPencils; std::cout << "The unrefined pencils are :\n"; - for (uint i = 0; i myIds; - ibeg = i * xDim * yDim + j * xDim; - iend = i * xDim * yDim + (j + 1) * xDim; + for (uint i = 0; i < dims[0]; i++) { + for (uint j = 0; j < dims[1]; j++) { + vector unrefinedIds; + ibeg = 1 + i * dims[2] * dims[1] + j * dims[2]; + iend = 1 + i * dims[2] * dims[1] + (j + 1) * dims[2]; for (uint k = ibeg; k < iend; k++) { - std::cout << ids[k] << " "; - myIds.push_back(ids[k]); + std::cout << mapping[k] << " "; + unrefinedIds.push_back(mapping[k]); + //unrefinedIds.push_back(ids[k]); } - unrefinedPencils.push_back(myIds); + unrefinedPencils.push_back(unrefinedIds); std::cout << "\n"; } } @@ -336,12 +362,11 @@ int main(int argc, char* argv[]) { setOfPencils pencilInitial; vector idsInitial; - uint dimension = 0; - vector> path; - + vector> path; + setOfPencils pencils; - for ( auto &myIds : unrefinedPencils ) { - pencils = buildPencils(grid, pencilInitial, idsInitial, myIds, dimension, path); + for ( auto &unrefinedIds : unrefinedPencils ) { + pencils = buildPencils(grid, pencilInitial, idsInitial, unrefinedIds, dimension, path); } std::cout << "I have created the following pencils:\n"; From 350625bdb1ab2038ce4d86da875b9380437066eb Mon Sep 17 00:00:00 2001 From: tkoskela Date: Fri, 8 Jun 2018 14:50:07 +0300 Subject: [PATCH 019/602] Changes to default values and file format. --- .../amr_reconstruction/fun_plot_reconstruction.py | 13 +++++++------ mini-apps/amr_reconstruction/map_test.cpp | 14 ++++++++------ .../amr_reconstruction/plot_reconstruction.py | 6 +++--- .../amr_reconstruction/plot_reconstructions.py | 2 +- 4 files changed, 19 insertions(+), 16 deletions(-) diff --git a/mini-apps/amr_reconstruction/fun_plot_reconstruction.py b/mini-apps/amr_reconstruction/fun_plot_reconstruction.py index 9ecb01e71..b1d407948 100644 --- a/mini-apps/amr_reconstruction/fun_plot_reconstruction.py +++ b/mini-apps/amr_reconstruction/fun_plot_reconstruction.py @@ -13,10 +13,10 @@ def plot_reconstruction(step,scale='linear'): #args = parser.parse_args() #fname = 'reconstruction_100' - fname = 'reconstruction_{:03d}'.format(step) + fname = 'reconstruction_{:05d}'.format(step) #dat = loadtxt('reconstructions_010.dat') - dat = np.loadtxt('reconstructions_{:03d}.dat'.format(step)) + dat = np.loadtxt('reconstructions_{:05d}.dat'.format(step)) plt.figure(1,figsize=(8,6),dpi = 100) plt.clf() T = 5e5 @@ -26,8 +26,8 @@ def plot_reconstruction(step,scale='linear'): imax = mlab.find(dat[:,1] == max(dat[:,1])) rmax = dat[imax[0],0] - r0 = 2e5 - dr = -500 + r0 = -2e5 + dr = 500 r = dat[:,0] - (r0 + step * dr) f = 1.0e18 * (m_p / (2.0 * np.pi * k_B * T)) ** 1.5 * np.exp(-m_p * r ** 2 / (2.0 * k_B * T)) @@ -43,9 +43,10 @@ def plot_reconstruction(step,scale='linear'): if scale is 'log': plt.ylim(1e-16,1e3) - plt.xlim(-8e5,8e5) + plt.xlim(-1e6,1e6) else: - plt.xlim(-5e5,5e5) + pass + plt.xlim(0e6,2e6) ax = plt.gca() ax.set_yscale(scale) diff --git a/mini-apps/amr_reconstruction/map_test.cpp b/mini-apps/amr_reconstruction/map_test.cpp index 6556cf6c8..68638e455 100644 --- a/mini-apps/amr_reconstruction/map_test.cpp +++ b/mini-apps/amr_reconstruction/map_test.cpp @@ -53,6 +53,7 @@ void propagate(Vec dr[], Vec values[], Real z_translation, uint blocks_per_dim ) for (uint k_cell=0; k_cell < WID; ++k_cell){ uint gid = k_block * WID + k_cell + WID; + //uint gid = (blocks_per_dim + 2) * WID - (k_block * WID + k_cell + WID); // Calculate normalized coordinates in current cell. // The coordinates (scaled units from 0 to 1) between which we will @@ -97,7 +98,8 @@ void propagate(Vec dr[], Vec values[], Real z_translation, uint blocks_per_dim ) for (uint k_cell=0; k_cell Date: Fri, 8 Jun 2018 14:50:38 +0300 Subject: [PATCH 020/602] Bug fixes, modifications to be compatible with map_test --- mini-apps/build_pencils/cpu_sort_ids.hpp | 103 ++++++++++++----------- 1 file changed, 54 insertions(+), 49 deletions(-) diff --git a/mini-apps/build_pencils/cpu_sort_ids.hpp b/mini-apps/build_pencils/cpu_sort_ids.hpp index 110387426..9b430eb9b 100644 --- a/mini-apps/build_pencils/cpu_sort_ids.hpp +++ b/mini-apps/build_pencils/cpu_sort_ids.hpp @@ -2,6 +2,8 @@ #include #include #include +#include +#include #ifndef CPU_SORT_IDS_HPP #define CPU_SORT_IDS_HPP @@ -14,62 +16,65 @@ template inline bool paircomparator( const std::pair & l, -template inline bool sortIds(const uint dimension, +template inline void sortIds(const uint dimension, const LENGTH meshSize, const std::vector& ids, - std::vector< std::pair >& sortedIds){ + std::map& mapping){ - sortedIds.resize(ids.size()); + //sortedIds.resize(ids.size()); //TODO conditionally parallel version? #pragma omp parallel for for (uint i = 0; i < ids.size() ; ++i ) { - const ID id = ids[i]; - - switch( dimension ) { - case 0: { - const ID idMapped = id; // Mapping the block id to different coordinate system if dimension is not zero: - sortedIds[i] = std::make_pair( idMapped, id); - } - break; - case 1: { - // Do operation: - // id = x + y*x_max + z*y_max*x_max - //=> id' = id - (x + y*x_max) + y + x*y_max = x + y*x_max + z*y_max*x_max - (x + y*x_max) + y + x*y_max - // = y + x*y_max + z*y_max*x_max - const ID x_index = id % meshSize[0]; - const ID y_index = (id / meshSize[0]) % meshSize[1]; - - // Mapping the block id to different coordinate system if dimension is not zero: - const ID idMapped = id - (x_index + y_index*meshSize[0]) + y_index + x_index * meshSize[1]; - - sortedIds[i] = std::make_pair( idMapped, id ); - } - break; - case 2: { - // Do operation: - // id = x + y*x_max + z*y_max*x_max - //=> id' = z + y*z_max + x*z_max*y_max - const ID x_index = id % meshSize[0]; - const ID y_index = (id / meshSize[0]) % meshSize[1]; - const ID z_index = (id / (meshSize[0] * meshSize[1])); - - // Mapping the id id to different coordinate system if dimension is not zero: - //const uint idMapped - // = z_indice - // + y_indice * meshSize[2] - // + x_indice*meshSize[1]*meshSize[2]; - const ID idMapped - = z_index - + y_index*meshSize[2] - + x_index*meshSize[1]*meshSize[2]; - sortedIds[i] = std::make_pair( idMapped, id ); - } - break; - } + const ID id = ids[i]; + + if (id > meshSize[0] * meshSize[1] * meshSize[2]) + continue; + + switch( dimension ) { + case 0: { + const ID idMapped = id; // Mapping the block id to different coordinate system if dimension is not zero: + mapping[idMapped] = id; + } + break; + case 1: { + // Do operation: + // id = x + y*x_max + z*y_max*x_max + //=> id' = id - (x + y*x_max) + y + x*y_max = x + y*x_max + z*y_max*x_max - (x + y*x_max) + y + x*y_max + // = y + x*y_max + z*y_max*x_max + const ID x_index = (id-1) % meshSize[0]; + const ID y_index = ((id-1) / meshSize[0]) % meshSize[1]; + + // Mapping the block id to different coordinate system if dimension is not zero: + const ID idMapped = id - (x_index + y_index*meshSize[0]) + y_index + x_index * meshSize[1]; + + mapping[idMapped] = id; + } + break; + case 2: { + // Do operation: + // id = x + y*x_max + z*y_max*x_max + //=> id' = z + y*z_max + x*z_max*y_max + const ID x_index = (id-1) % meshSize[0]; + const ID y_index = ((id-1) / meshSize[0]) % meshSize[1]; + const ID z_index = ((id-1) / (meshSize[0] * meshSize[1])); + + // Mapping the id id to different coordinate system if dimension is not zero: + //const uint idMapped + // = z_indice + // + y_indice * meshSize[2] + // + x_indice*meshSize[1]*meshSize[2]; + const ID idMapped = 1 + z_index + y_index*meshSize[2] + x_index*meshSize[1]*meshSize[2]; + mapping[idMapped] = id; } - // Finally, sort the list of pairs - std::sort( sortedIds.begin(), sortedIds.end(), paircomparator ); - + break; + } +} +// Finally, sort the list of pairs +// std::sort( sortedIds.begin(), sortedIds.end(), paircomparator ); + +//for (auto j = 1; j <= mapping.size(); j++) +// std::cout << j << ", " << mapping[j] << "\n"; + } From c8e74ee8d8085c73761e47ce6b857757cabc0fb7 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Fri, 8 Jun 2018 14:51:31 +0300 Subject: [PATCH 021/602] Changes to default values for comparison with amr_reconstruction/map_test --- mini-apps/acceleration-vlasiator/map_test.cpp | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/mini-apps/acceleration-vlasiator/map_test.cpp b/mini-apps/acceleration-vlasiator/map_test.cpp index af9b56f37..b30ff83ff 100644 --- a/mini-apps/acceleration-vlasiator/map_test.cpp +++ b/mini-apps/acceleration-vlasiator/map_test.cpp @@ -166,7 +166,7 @@ void print_reconstruction(int step, Vec values[], uint blocks_per_dim, Real v_m uint i_block, uint j_block, uint j_cell, Real intersection, Real intersection_di, Real intersection_dj, Real intersection_dk){ char name[256]; - sprintf(name,"reconstructions_%03d.dat",step); + sprintf(name,"reconstructions_%05d.dat",step); FILE* fp=fopen(name,"w"); @@ -263,7 +263,7 @@ int main(void) { Real intersection_dj = 0.0 * dv; //does not matter here, fixed j. - const int iterations = 10000; + const int iterations = 1000; /*clear target & values array*/ for (uint k=0; k Date: Tue, 12 Jun 2018 14:13:28 +0300 Subject: [PATCH 022/602] Improved outputs. Enabled compiler optimization. --- mini-apps/build_pencils/Makefile | 2 +- mini-apps/build_pencils/cpu_sort_ids.hpp | 2 +- mini-apps/build_pencils/grid_test.cpp | 18 +++++++++++------- 3 files changed, 13 insertions(+), 9 deletions(-) diff --git a/mini-apps/build_pencils/Makefile b/mini-apps/build_pencils/Makefile index 7e290df3a..25a33855f 100644 --- a/mini-apps/build_pencils/Makefile +++ b/mini-apps/build_pencils/Makefile @@ -1,7 +1,7 @@ ARCH=$(VLASIATOR_ARCH) include ../../MAKE/Makefile.${ARCH} -FLAGS = -W -Wall -Wextra -pedantic -std=c++11 -O0 +FLAGS = -W -Wall -Wextra -pedantic -std=c++11 -O3 INCLUDES = ${INC_DCCRG} -L$/usr/lib/x86_64-linux-gnu -lboost_program_options -I$/usr/include/boost -L/home/tkoskela/lib/zoltan/Zoltan_v3.83/build/lib -lzoltan -I/home/tkoskela/lib/zoltan/Zoltan_v3.83/build/include default: grid_test diff --git a/mini-apps/build_pencils/cpu_sort_ids.hpp b/mini-apps/build_pencils/cpu_sort_ids.hpp index 9b430eb9b..f25396275 100644 --- a/mini-apps/build_pencils/cpu_sort_ids.hpp +++ b/mini-apps/build_pencils/cpu_sort_ids.hpp @@ -23,7 +23,7 @@ template inline void sortIds(const uint dimension, //sortedIds.resize(ids.size()); //TODO conditionally parallel version? -#pragma omp parallel for + //#pragma omp parallel for for (uint i = 0; i < ids.size() ; ++i ) { const ID id = ids[i]; diff --git a/mini-apps/build_pencils/grid_test.cpp b/mini-apps/build_pencils/grid_test.cpp index 4f508ae4b..fc14827e0 100644 --- a/mini-apps/build_pencils/grid_test.cpp +++ b/mini-apps/build_pencils/grid_test.cpp @@ -2,7 +2,6 @@ #include "mpi.h" #include #include -#include #include #include #include @@ -198,8 +197,8 @@ setOfPencils buildPencils( dccrg::Dccrg grid, // Spawn new builders to construct pencils at the new refinement level - for (bool up : { true, false }) { - for (bool left : { true, false }) { + for (bool left : { true, false }) { + for (bool up : { true, false }) { // Store the path this builder has chosen vector < pair > myPath = path; @@ -270,9 +269,9 @@ int main(int argc, char* argv[]) { dccrg::Dccrg grid; - const uint xDim = 9; + const uint xDim = 3; const uint yDim = 3; - const uint zDim = 1; + const uint zDim = 3; const std::array grid_size = {{xDim,yDim,zDim}}; grid.initialize(grid_size, comm, "RANDOM", 1); @@ -280,7 +279,7 @@ int main(int argc, char* argv[]) { grid.balance_load(); bool doRefine = true; - const std::array refinementIds = {{3,7,40,41}}; + const std::array refinementIds = {{3,7,38,58}}; if(doRefine) { for(uint i = 0; i < refinementIds.size(); i++) { if(refinementIds[i] > 0) { @@ -308,6 +307,11 @@ int main(int argc, char* argv[]) { if (parent > 0 && !(std::find(ids.begin(), ids.end(), parent) != ids.end())) { ids.push_back(parent); + std::cout << "Cell " << parent << " at refinement level " << grid.get_refinement_level(parent) << " has been refined into "; + for (const auto& child: grid.get_all_children(parent)) { + std::cout << child << " "; + } + std::cout << "\n"; } } @@ -341,7 +345,7 @@ int main(int argc, char* argv[]) { sortIds< CellID, dccrg::Grid_Length::type >(dimension, grid_size, ids, mapping); list < vector < CellID >> unrefinedPencils; - std::cout << "The unrefined pencils are :\n"; + std::cout << "The unrefined pencils along dimension " << dimension << " are :\n"; for (uint i = 0; i < dims[0]; i++) { for (uint j = 0; j < dims[1]; j++) { vector unrefinedIds; From 5532f384eca35dbf8b3dd6bcf3dc8cac86d51565 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Wed, 13 Jun 2018 15:02:34 +0300 Subject: [PATCH 023/602] Re-wrote the pencil builder by using the get_face_neighbors_of - function of dccrg. This has the benefit of requiring no a priori information about the shape or dimensions of the local mesh, which will not be easily available when running with mpi and load balancing. --- .../build_pencils/grid_test_neighbors.cpp | 325 ++++++++++++++++++ 1 file changed, 325 insertions(+) create mode 100644 mini-apps/build_pencils/grid_test_neighbors.cpp diff --git a/mini-apps/build_pencils/grid_test_neighbors.cpp b/mini-apps/build_pencils/grid_test_neighbors.cpp new file mode 100644 index 000000000..7962d715d --- /dev/null +++ b/mini-apps/build_pencils/grid_test_neighbors.cpp @@ -0,0 +1,325 @@ +#include "dccrg.hpp" +#include "mpi.h" +#include +#include +#include +#include +#include +#include "../../definitions.h" +#include "cpu_sort_ids.hpp" + +using namespace std; + +struct grid_data { + + int value = 0; + + std::tuple get_mpi_datatype() + { + return std::make_tuple(this, 0, MPI_BYTE); + } + +}; + +struct setOfPencils { + + uint N; // Number of pencils in the set + std::vector lengthOfPencils; // Lengths of pencils + std::vector ids; // List of cells + std::vector x,y; // x,y - position (Maybe replace with uint width?) + + setOfPencils() { + N = 0; + } + + void addPencil(std::vector idsIn, Real xIn, Real yIn) { + + N += 1; + lengthOfPencils.push_back(idsIn.size()); + ids.insert(ids.end(),idsIn.begin(),idsIn.end()); + x.push_back(xIn); + y.push_back(yIn); + + } + +}; + + +CellID selectNeighbor(dccrg::Dccrg grid, CellID id, int dimension = 0, int path = 0) { + + const auto neighbors = grid.get_face_neighbors_of(id); + + vector < CellID > myNeighbors; + + for (const auto cell : neighbors) { + if (cell.second == dimension + 1) + myNeighbors.push_back(cell.first); + } + + CellID retVal; + + switch( myNeighbors.size() ) { + case 0 : { + // did not find neighbors + retVal = 0; + } + break; + case 1 : { + retVal = myNeighbors[0]; + } + break; + case 4 : { + retVal = myNeighbors[path]; + } + break; + default: { + // something is wrong + retVal = 0; + } + break; + } + + return retVal; + +} + +setOfPencils buildPencilsWithNeighbors( dccrg::Dccrg grid, + setOfPencils &pencils, CellID startingId, + vector ids, int dimension, + vector path) { + + CellID nextNeighbor; + int id = startingId; + int startingRefLvl = grid.get_refinement_level(id); + + if( ids.size() == 0 ) + ids.push_back(startingId); + + // If the cell where we start is refined, we need to figure out which path + // to follow in future refined cells. This is a bit hacky but we have to + // use the order or the children of the parent cell to figure out which + // corner we are in. + // Maybe you could use physical coordinates here? + if( startingRefLvl > 0 ) { + for ( uint i = 0; i < startingRefLvl; i++) { + auto parent = grid.get_parent(id); + auto children = grid.get_all_children(parent); + auto it = std::find(children.begin(),children.end(),id); + auto index = std::distance(children.begin(),it); + auto index2 = index; + switch( dimension ) { + case 0: { + index2 = index / 2; + } + break; + case 1: { + index2 = index - index / 2; + } + break; + case 2: { + index2 = index % 4; + } + break; + } + path.insert(path.begin(),index2); + id = parent; + } + } + + id = startingId; + + while (id > 0) { + + // Find the refinement level in the neighboring cell + nextNeighbor = selectNeighbor(grid,id,dimension); + + if (nextNeighbor == 0) + break; + + int refLvl = grid.get_refinement_level(nextNeighbor); + + //std::cout << "I am cell " << id << ". Next neighbor is " << nextNeighbor << " at refinement level " << refLvl << std::endl; + + if (refLvl == 0 ) { + + // If the neighbor is unrefined, add it to the pencil + ids.push_back(nextNeighbor); + + } else { + + // Check if we have encountered this refinement level before and stored + // the path this builder follows. + if ( path.size() >= refLvl ) { + + // std::cout << " I have seen refinement level " << refLvl << " before. Path is "; + // for (auto k = path.begin(); k != path.end(); ++k) + // std::cout << *k << ' '; + // std::cout << std::endl; + + nextNeighbor = selectNeighbor(grid,id,dimension,path[refLvl-1]); + if (nextNeighbor == 0) + break; + + ids.push_back(nextNeighbor); + + } else { + + // std::cout << " I have NOT seen refinement level " << refLvl << " before. Path is "; + // for (auto k = path.begin(); k != path.end(); ++k) + // std::cout << *k << ' '; + // std::cout << std::endl; + + + // New refinement level, create a path through each neighbor cell + for ( uint i : {0,1,2,3} ) { + + vector < int > myPath = path; + myPath.push_back(i); + + nextNeighbor = selectNeighbor(grid,id,dimension,myPath.back()); + if (nextNeighbor == 0) + break; + + if ( i == 3 ) { + + // This builder continues along neighbor 3 + ids.push_back(nextNeighbor); + path = myPath; + + } else { + + // Spawn new builders for neighbors 0,1,2 + buildPencilsWithNeighbors(grid,pencils,id,ids,dimension,myPath); + + } + + } + + } + + } // Closes if (refLvl == 0) + + id = nextNeighbor; + + } // Closes while loop + + pencils.addPencil(ids,0.0,0.0); + return pencils; + +} + + +void printVector(vector v) { + + for (auto k = v.begin(); k != v.end(); ++k) + std::cout << *k << ' '; + std::cout << "\n"; + +} + +int main(int argc, char* argv[]) { + + if (MPI_Init(&argc, &argv) != MPI_SUCCESS) { + // cerr << "Coudln't initialize MPI." << endl; + abort(); + } + + MPI_Comm comm = MPI_COMM_WORLD; + + int rank = 0, comm_size = 0; + MPI_Comm_rank(comm, &rank); + MPI_Comm_size(comm, &comm_size); + + dccrg::Dccrg grid; + + const uint xDim = 9; + const uint yDim = 3; + const uint zDim = 1; + const std::array grid_size = {{xDim,yDim,zDim}}; + + grid.initialize(grid_size, comm, "RANDOM", 1); + + grid.balance_load(); + + bool doRefine = true; + const std::array refinementIds = {{10,14,64,72}}; + if(doRefine) { + for(uint i = 0; i < refinementIds.size(); i++) { + if(refinementIds[i] > 0) { + grid.refine_completely(refinementIds[i]); + grid.stop_refining(); + } + } + } + + grid.balance_load(); + + auto cells = grid.cells; + sort(cells.begin(), cells.end()); + + vector ids; + vector startingIds; + + uint dimension = 0; + + for (const auto& cell: cells) { + // std::cout << "Data of cell " << cell.id << " is stored at " << cell.data << std::endl; + // Collect a list of cell ids. + ids.push_back(cell.id); + + // Collect a list of cell ids that do not have a neighbor on the negative direction + vector negativeNeighbors; + for (auto neighbor : grid.get_face_neighbors_of(cell.id)) { + + if (neighbor.second == - (dimension + 1)) + negativeNeighbors.push_back(neighbor.first); + } + if (negativeNeighbors.size() == 0) + startingIds.push_back(cell.id); + } + + std::cout << "Starting cell ids for pencils are "; + printVector(startingIds); + + sort(ids.begin(),ids.end()); + + vector idsInitial; + vector path; + setOfPencils pencils; + + for (auto id : startingIds) { + pencils = buildPencilsWithNeighbors(grid,pencils,id,idsInitial,dimension,path); + } + + uint ibeg = 0; + uint iend = 0; + + std::cout << "I have created " << pencils.N << " pencils along dimension " << dimension << ":\n"; + for (uint i = 0; i < pencils.N; i++) { + iend += pencils.lengthOfPencils[i]; + for (auto j = pencils.ids.begin() + ibeg; j != pencils.ids.begin() + iend; ++j) { + std::cout << *j << " "; + } + ibeg = iend; + std::cout << "\n"; + } + + std::ofstream outfile; + + grid.write_vtk_file("test.vtk"); + + outfile.open("test.vtk", std::ofstream::app); + // write each cells id + outfile << "CELL_DATA " << cells.size() << std::endl; + outfile << "SCALARS id int 1" << std::endl; + outfile << "LOOKUP_TABLE default" << std::endl; + for (const auto& cell: cells) { + outfile << cell.id << std::endl; + } + outfile.close(); + + MPI_Finalize(); + + return 0; + +} From 7f64334ce4d4468c173abca5826ca3b025bda9bd Mon Sep 17 00:00:00 2001 From: tkoskela Date: Wed, 13 Jun 2018 15:05:12 +0300 Subject: [PATCH 024/602] Added build options into Makefile --- mini-apps/build_pencils/Makefile | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/mini-apps/build_pencils/Makefile b/mini-apps/build_pencils/Makefile index 25a33855f..6ec287be9 100644 --- a/mini-apps/build_pencils/Makefile +++ b/mini-apps/build_pencils/Makefile @@ -4,7 +4,7 @@ include ../../MAKE/Makefile.${ARCH} FLAGS = -W -Wall -Wextra -pedantic -std=c++11 -O3 INCLUDES = ${INC_DCCRG} -L$/usr/lib/x86_64-linux-gnu -lboost_program_options -I$/usr/include/boost -L/home/tkoskela/lib/zoltan/Zoltan_v3.83/build/lib -lzoltan -I/home/tkoskela/lib/zoltan/Zoltan_v3.83/build/include -default: grid_test +default: grid_test_neighbors clean: rm -rf *.o grid_test @@ -14,3 +14,9 @@ grid_test.o: grid_test.cpp cpu_sort_ids.hpp grid_test: grid_test.o cpu_sort_ids.hpp $(CMP) ${FLAGS} $^ ${INCLUDES} -o $@ + +grid_test_neighbors.o: grid_test_neighbors.cpp cpu_sort_ids.hpp + ${CMP} ${FLAGS} ${INCLUDES} -c $^ + +grid_test_neighbors: grid_test_neighbors.o cpu_sort_ids.hpp + $(CMP) ${FLAGS} $^ ${INCLUDES} -o $@ From b74f454cbf0b018b1216b2e37f129eeb4eb73c74 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Wed, 13 Jun 2018 15:06:08 +0300 Subject: [PATCH 025/602] Refined output formatting. --- mini-apps/build_pencils/grid_test.cpp | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/mini-apps/build_pencils/grid_test.cpp b/mini-apps/build_pencils/grid_test.cpp index fc14827e0..1e07c0088 100644 --- a/mini-apps/build_pencils/grid_test.cpp +++ b/mini-apps/build_pencils/grid_test.cpp @@ -269,9 +269,9 @@ int main(int argc, char* argv[]) { dccrg::Dccrg grid; - const uint xDim = 3; + const uint xDim = 9; const uint yDim = 3; - const uint zDim = 3; + const uint zDim = 1; const std::array grid_size = {{xDim,yDim,zDim}}; grid.initialize(grid_size, comm, "RANDOM", 1); @@ -279,7 +279,7 @@ int main(int argc, char* argv[]) { grid.balance_load(); bool doRefine = true; - const std::array refinementIds = {{3,7,38,58}}; + const std::array refinementIds = {{10,14,64,72}}; if(doRefine) { for(uint i = 0; i < refinementIds.size(); i++) { if(refinementIds[i] > 0) { @@ -345,7 +345,7 @@ int main(int argc, char* argv[]) { sortIds< CellID, dccrg::Grid_Length::type >(dimension, grid_size, ids, mapping); list < vector < CellID >> unrefinedPencils; - std::cout << "The unrefined pencils along dimension " << dimension << " are :\n"; + std::cout << "The unrefined pencils along dimension " << dimension << " are:\n"; for (uint i = 0; i < dims[0]; i++) { for (uint j = 0; j < dims[1]; j++) { vector unrefinedIds; @@ -373,7 +373,7 @@ int main(int argc, char* argv[]) { pencils = buildPencils(grid, pencilInitial, idsInitial, unrefinedIds, dimension, path); } - std::cout << "I have created the following pencils:\n"; + std::cout << "I have created " << pencils.N << " pencils along dimension " << dimension << ":\n"; for (uint i = 0; i < pencils.N; i++) { iend += pencils.lengthOfPencils[i]; for (auto j = pencils.ids.begin() + ibeg; j != pencils.ids.begin() + iend; ++j) { From 2863ae6211be3a171842205b33bc1e1a2e92e400 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Wed, 13 Jun 2018 15:12:49 +0300 Subject: [PATCH 026/602] Shut up compiler warnings. --- mini-apps/build_pencils/grid_test_neighbors.cpp | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/mini-apps/build_pencils/grid_test_neighbors.cpp b/mini-apps/build_pencils/grid_test_neighbors.cpp index 7962d715d..9bb3a780c 100644 --- a/mini-apps/build_pencils/grid_test_neighbors.cpp +++ b/mini-apps/build_pencils/grid_test_neighbors.cpp @@ -85,12 +85,12 @@ CellID selectNeighbor(dccrg::Dccrg grid, CellID id, int dimension = 0 setOfPencils buildPencilsWithNeighbors( dccrg::Dccrg grid, setOfPencils &pencils, CellID startingId, - vector ids, int dimension, + vector ids, uint dimension, vector path) { CellID nextNeighbor; - int id = startingId; - int startingRefLvl = grid.get_refinement_level(id); + uint id = startingId; + uint startingRefLvl = grid.get_refinement_level(id); if( ids.size() == 0 ) ids.push_back(startingId); @@ -136,7 +136,7 @@ setOfPencils buildPencilsWithNeighbors( dccrg::Dccrg grid, if (nextNeighbor == 0) break; - int refLvl = grid.get_refinement_level(nextNeighbor); + uint refLvl = grid.get_refinement_level(nextNeighbor); //std::cout << "I am cell " << id << ". Next neighbor is " << nextNeighbor << " at refinement level " << refLvl << std::endl; @@ -260,14 +260,14 @@ int main(int argc, char* argv[]) { vector ids; vector startingIds; - uint dimension = 0; + int dimension = 0; for (const auto& cell: cells) { // std::cout << "Data of cell " << cell.id << " is stored at " << cell.data << std::endl; // Collect a list of cell ids. ids.push_back(cell.id); - // Collect a list of cell ids that do not have a neighbor on the negative direction + // Collect a list of cell ids that do not have a neighbor in the negative direction vector negativeNeighbors; for (auto neighbor : grid.get_face_neighbors_of(cell.id)) { From 5b50697a5e9f001f8979aef813ed0e7e0e8c837e Mon Sep 17 00:00:00 2001 From: tkoskela Date: Wed, 13 Jun 2018 15:44:31 +0300 Subject: [PATCH 027/602] Fixed bug that appended path twice when entering a higher refinement level. --- .../build_pencils/grid_test_neighbors.cpp | 69 ++++++++++--------- 1 file changed, 36 insertions(+), 33 deletions(-) diff --git a/mini-apps/build_pencils/grid_test_neighbors.cpp b/mini-apps/build_pencils/grid_test_neighbors.cpp index 9bb3a780c..f21944b57 100644 --- a/mini-apps/build_pencils/grid_test_neighbors.cpp +++ b/mini-apps/build_pencils/grid_test_neighbors.cpp @@ -88,6 +88,7 @@ setOfPencils buildPencilsWithNeighbors( dccrg::Dccrg grid, vector ids, uint dimension, vector path) { + const bool debug = true; CellID nextNeighbor; uint id = startingId; uint startingRefLvl = grid.get_refinement_level(id); @@ -100,8 +101,8 @@ setOfPencils buildPencilsWithNeighbors( dccrg::Dccrg grid, // use the order or the children of the parent cell to figure out which // corner we are in. // Maybe you could use physical coordinates here? - if( startingRefLvl > 0 ) { - for ( uint i = 0; i < startingRefLvl; i++) { + if( startingRefLvl > path.size() ) { + for ( uint i = path.size(); i < startingRefLvl; i++) { auto parent = grid.get_parent(id); auto children = grid.get_all_children(parent); auto it = std::find(children.begin(),children.end(),id); @@ -138,37 +139,31 @@ setOfPencils buildPencilsWithNeighbors( dccrg::Dccrg grid, uint refLvl = grid.get_refinement_level(nextNeighbor); - //std::cout << "I am cell " << id << ". Next neighbor is " << nextNeighbor << " at refinement level " << refLvl << std::endl; + if (refLvl > 0) { - if (refLvl == 0 ) { - - // If the neighbor is unrefined, add it to the pencil - ids.push_back(nextNeighbor); - - } else { - // Check if we have encountered this refinement level before and stored // the path this builder follows. if ( path.size() >= refLvl ) { + + if(debug) { + std::cout << "I am cell " << id << ". "; + std::cout << "I have seen refinement level " << refLvl << " before. Path is "; + for (auto k = path.begin(); k != path.end(); ++k) + std::cout << *k << " "; + std::cout << std::endl; + } - // std::cout << " I have seen refinement level " << refLvl << " before. Path is "; - // for (auto k = path.begin(); k != path.end(); ++k) - // std::cout << *k << ' '; - // std::cout << std::endl; - - nextNeighbor = selectNeighbor(grid,id,dimension,path[refLvl-1]); - if (nextNeighbor == 0) - break; - - ids.push_back(nextNeighbor); + nextNeighbor = selectNeighbor(grid,id,dimension,path[refLvl-1]); } else { - - // std::cout << " I have NOT seen refinement level " << refLvl << " before. Path is "; - // for (auto k = path.begin(); k != path.end(); ++k) - // std::cout << *k << ' '; - // std::cout << std::endl; - + + if(debug) { + std::cout << "I am cell " << id << ". "; + std::cout << "I have NOT seen refinement level " << refLvl << " before. Path is "; + for (auto k = path.begin(); k != path.end(); ++k) + std::cout << *k << ' '; + std::cout << std::endl; + } // New refinement level, create a path through each neighbor cell for ( uint i : {0,1,2,3} ) { @@ -177,17 +172,15 @@ setOfPencils buildPencilsWithNeighbors( dccrg::Dccrg grid, myPath.push_back(i); nextNeighbor = selectNeighbor(grid,id,dimension,myPath.back()); - if (nextNeighbor == 0) - break; if ( i == 3 ) { - + // This builder continues along neighbor 3 ids.push_back(nextNeighbor); path = myPath; } else { - + // Spawn new builders for neighbors 0,1,2 buildPencilsWithNeighbors(grid,pencils,id,ids,dimension,myPath); @@ -197,8 +190,18 @@ setOfPencils buildPencilsWithNeighbors( dccrg::Dccrg grid, } - } // Closes if (refLvl == 0) - + } else { + if(debug) { + std::cout << "I am cell " << id << ". "; + std::cout << " I am on refinement level 0." << std::endl; + } + }// Closes if (refLvl == 0) + if(nextNeighbor > 0) { + if (debug) { + std::cout << " Next neighbor is " << nextNeighbor << "." << std::endl; + } + ids.push_back(nextNeighbor); + } id = nextNeighbor; } // Closes while loop @@ -242,7 +245,7 @@ int main(int argc, char* argv[]) { grid.balance_load(); bool doRefine = true; - const std::array refinementIds = {{10,14,64,72}}; + const std::array refinementIds = {{14,64,72}}; if(doRefine) { for(uint i = 0; i < refinementIds.size(); i++) { if(refinementIds[i] > 0) { From 947c22a613553690b582cd75ed8979ed9b1d1e98 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Thu, 14 Jun 2018 11:17:50 +0300 Subject: [PATCH 028/602] Minor cleanups: using INVALID_CELLID when neighbors are not found, using uints instead of ints for indices, etc. --- .../build_pencils/grid_test_neighbors.cpp | 55 +++++++++++-------- 1 file changed, 33 insertions(+), 22 deletions(-) diff --git a/mini-apps/build_pencils/grid_test_neighbors.cpp b/mini-apps/build_pencils/grid_test_neighbors.cpp index f21944b57..0cadfc1e2 100644 --- a/mini-apps/build_pencils/grid_test_neighbors.cpp +++ b/mini-apps/build_pencils/grid_test_neighbors.cpp @@ -6,7 +6,7 @@ #include #include #include "../../definitions.h" -#include "cpu_sort_ids.hpp" +#include "../../parameters.h" using namespace std; @@ -26,7 +26,7 @@ struct setOfPencils { uint N; // Number of pencils in the set std::vector lengthOfPencils; // Lengths of pencils std::vector ids; // List of cells - std::vector x,y; // x,y - position (Maybe replace with uint width?) + std::vector x,y; // x,y - position (Maybe replace with uint refinement level or Real width?) setOfPencils() { N = 0; @@ -45,50 +45,53 @@ struct setOfPencils { }; -CellID selectNeighbor(dccrg::Dccrg grid, CellID id, int dimension = 0, int path = 0) { +CellID selectNeighbor(dccrg::Dccrg &grid, CellID id, int dimension = 0, uint path = 0) { const auto neighbors = grid.get_face_neighbors_of(id); vector < CellID > myNeighbors; - + // Collect neighbor ids in the positive direction of the chosen dimension. + // Note that dimension indexing starts from 1 (of course it does) for (const auto cell : neighbors) { if (cell.second == dimension + 1) myNeighbors.push_back(cell.first); } - CellID retVal; + CellID neighbor; switch( myNeighbors.size() ) { + // Since refinement can only increase by 1 level the only possibilities + // Should be no neighbors, 1 neighbor or 4 neighbors. case 0 : { // did not find neighbors - retVal = 0; + neighbor = INVALID_CELLID; } break; case 1 : { - retVal = myNeighbors[0]; + neighbor = myNeighbors[0]; } break; case 4 : { - retVal = myNeighbors[path]; + neighbor = myNeighbors[path]; } break; default: { // something is wrong - retVal = 0; + neighbor = INVALID_CELLID; } break; } - return retVal; + return neighbor; } -setOfPencils buildPencilsWithNeighbors( dccrg::Dccrg grid, +setOfPencils buildPencilsWithNeighbors( dccrg::Dccrg &grid, setOfPencils &pencils, CellID startingId, vector ids, uint dimension, - vector path) { + vector path) { - const bool debug = true; + const bool debug = false; CellID nextNeighbor; uint id = startingId; uint startingRefLvl = grid.get_refinement_level(id); @@ -108,6 +111,7 @@ setOfPencils buildPencilsWithNeighbors( dccrg::Dccrg grid, auto it = std::find(children.begin(),children.end(),id); auto index = std::distance(children.begin(),it); auto index2 = index; + switch( dimension ) { case 0: { index2 = index / 2; @@ -131,9 +135,11 @@ setOfPencils buildPencilsWithNeighbors( dccrg::Dccrg grid, while (id > 0) { - // Find the refinement level in the neighboring cell + // Find the refinement level in the neighboring cell. Any neighbor will do + // since refinement level can only increase by 1 between neighbors. nextNeighbor = selectNeighbor(grid,id,dimension); + // If there are no neighbors, we can stop. if (nextNeighbor == 0) break; @@ -141,8 +147,9 @@ setOfPencils buildPencilsWithNeighbors( dccrg::Dccrg grid, if (refLvl > 0) { - // Check if we have encountered this refinement level before and stored - // the path this builder follows. + // If we have encountered this refinement level before and stored + // the path this builder follows, we will just take the same path + // again. if ( path.size() >= refLvl ) { if(debug) { @@ -168,14 +175,14 @@ setOfPencils buildPencilsWithNeighbors( dccrg::Dccrg grid, // New refinement level, create a path through each neighbor cell for ( uint i : {0,1,2,3} ) { - vector < int > myPath = path; + vector < uint > myPath = path; myPath.push_back(i); nextNeighbor = selectNeighbor(grid,id,dimension,myPath.back()); if ( i == 3 ) { - // This builder continues along neighbor 3 + // This builder continues with neighbor 3 ids.push_back(nextNeighbor); path = myPath; @@ -196,12 +203,16 @@ setOfPencils buildPencilsWithNeighbors( dccrg::Dccrg grid, std::cout << " I am on refinement level 0." << std::endl; } }// Closes if (refLvl == 0) - if(nextNeighbor > 0) { + + // If we found a neighbor, add it to the list of ids for this pencil. + if(nextNeighbor != INVALID_CELLID) { if (debug) { std::cout << " Next neighbor is " << nextNeighbor << "." << std::endl; } ids.push_back(nextNeighbor); } + + // Move to the next cell. id = nextNeighbor; } // Closes while loop @@ -245,7 +256,7 @@ int main(int argc, char* argv[]) { grid.balance_load(); bool doRefine = true; - const std::array refinementIds = {{14,64,72}}; + const std::array refinementIds = {{10,14,64,72}}; if(doRefine) { for(uint i = 0; i < refinementIds.size(); i++) { if(refinementIds[i] > 0) { @@ -263,7 +274,7 @@ int main(int argc, char* argv[]) { vector ids; vector startingIds; - int dimension = 0; + int dimension = 1; for (const auto& cell: cells) { // std::cout << "Data of cell " << cell.id << " is stored at " << cell.data << std::endl; @@ -287,7 +298,7 @@ int main(int argc, char* argv[]) { sort(ids.begin(),ids.end()); vector idsInitial; - vector path; + vector path; setOfPencils pencils; for (auto id : startingIds) { From afe9e4e3328e3ff28a6dbb81252b00ae223ed9b7 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Thu, 14 Jun 2018 11:31:22 +0300 Subject: [PATCH 029/602] Added error handling to unexpected neighbors size. Cleaned up extra dependency in Makefile. --- mini-apps/build_pencils/Makefile | 4 ++-- mini-apps/build_pencils/grid_test_neighbors.cpp | 5 +++-- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/mini-apps/build_pencils/Makefile b/mini-apps/build_pencils/Makefile index 6ec287be9..aef782704 100644 --- a/mini-apps/build_pencils/Makefile +++ b/mini-apps/build_pencils/Makefile @@ -15,8 +15,8 @@ grid_test.o: grid_test.cpp cpu_sort_ids.hpp grid_test: grid_test.o cpu_sort_ids.hpp $(CMP) ${FLAGS} $^ ${INCLUDES} -o $@ -grid_test_neighbors.o: grid_test_neighbors.cpp cpu_sort_ids.hpp +grid_test_neighbors.o: grid_test_neighbors.cpp ${CMP} ${FLAGS} ${INCLUDES} -c $^ -grid_test_neighbors: grid_test_neighbors.o cpu_sort_ids.hpp +grid_test_neighbors: grid_test_neighbors.o $(CMP) ${FLAGS} $^ ${INCLUDES} -o $@ diff --git a/mini-apps/build_pencils/grid_test_neighbors.cpp b/mini-apps/build_pencils/grid_test_neighbors.cpp index 0cadfc1e2..9328261f2 100644 --- a/mini-apps/build_pencils/grid_test_neighbors.cpp +++ b/mini-apps/build_pencils/grid_test_neighbors.cpp @@ -61,7 +61,7 @@ CellID selectNeighbor(dccrg::Dccrg &grid, CellID id, int dimension = switch( myNeighbors.size() ) { // Since refinement can only increase by 1 level the only possibilities - // Should be no neighbors, 1 neighbor or 4 neighbors. + // Should be 0 neighbors, 1 neighbor or 4 neighbors. case 0 : { // did not find neighbors neighbor = INVALID_CELLID; @@ -78,6 +78,7 @@ CellID selectNeighbor(dccrg::Dccrg &grid, CellID id, int dimension = default: { // something is wrong neighbor = INVALID_CELLID; + throw "Invalid neighbor count!"; } break; } @@ -274,7 +275,7 @@ int main(int argc, char* argv[]) { vector ids; vector startingIds; - int dimension = 1; + int dimension = 0; for (const auto& cell: cells) { // std::cout << "Data of cell " << cell.id << " is stored at " << cell.data << std::endl; From a5feb00d10451004df49c3c143972d8f4d9426d9 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Thu, 14 Jun 2018 11:32:12 +0300 Subject: [PATCH 030/602] Changed datatype of maxdr from Real to Realv to compile when precision is changed in Makefile. Default precision is now SP. --- mini-apps/amr_reconstruction/Makefile | 4 ++-- mini-apps/amr_reconstruction/map_test.cpp | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/mini-apps/amr_reconstruction/Makefile b/mini-apps/amr_reconstruction/Makefile index 4e5866cac..b8c4a5f2e 100644 --- a/mini-apps/amr_reconstruction/Makefile +++ b/mini-apps/amr_reconstruction/Makefile @@ -3,10 +3,10 @@ ARCH = $(VLASIATOR_ARCH) include ../../MAKE/Makefile.${ARCH} #set FP precision to SP (single) or DP (double) -FP_PRECISION = DP +FP_PRECISION = SP #Set floating point precision for distribution function to SPF (single) or DPF (double) -DISTRIBUTION_FP_PRECISION = DPF +DISTRIBUTION_FP_PRECISION = SPF #Set vector backend type for vlasov solvers, sets precision and length. #This mini-app only supports length 4(!) diff --git a/mini-apps/amr_reconstruction/map_test.cpp b/mini-apps/amr_reconstruction/map_test.cpp index 68638e455..25bca82c9 100644 --- a/mini-apps/amr_reconstruction/map_test.cpp +++ b/mini-apps/amr_reconstruction/map_test.cpp @@ -32,7 +32,7 @@ void propagate(Vec dr[], Vec values[], Real z_translation, uint blocks_per_dim ) // Vector buffer where we write data, initialized to 0*/ Vec targetValues[(blocks_per_dim + 2) * WID]; - Real maxdr = 1.0e-8; + Realv maxdr = 1.0e-8; for (uint i = 0; i < (blocks_per_dim + 2) * WID; i++) { maxdr = max(maxdr,dr[i][0]); From 1648e1769a67f2150be9234afe1e1a944b2d33e0 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Thu, 14 Jun 2018 12:18:12 +0300 Subject: [PATCH 031/602] Removed some unused code. --- mini-apps/amr_reconstruction/map_test.cpp | 6 ------ 1 file changed, 6 deletions(-) diff --git a/mini-apps/amr_reconstruction/map_test.cpp b/mini-apps/amr_reconstruction/map_test.cpp index 25bca82c9..83cfc9b16 100644 --- a/mini-apps/amr_reconstruction/map_test.cpp +++ b/mini-apps/amr_reconstruction/map_test.cpp @@ -31,12 +31,6 @@ void propagate(Vec dr[], Vec values[], Real z_translation, uint blocks_per_dim ) // Vector buffer where we write data, initialized to 0*/ Vec targetValues[(blocks_per_dim + 2) * WID]; - - Realv maxdr = 1.0e-8; - - for (uint i = 0; i < (blocks_per_dim + 2) * WID; i++) { - maxdr = max(maxdr,dr[i][0]); - } for (uint k_block = 0; k_block < blocks_per_dim; k_block++){ From a56b806576851890b6679d7ba8a3eefd83c60028 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Thu, 14 Jun 2018 12:18:25 +0300 Subject: [PATCH 032/602] Added real space coordinates to pencils. --- .../build_pencils/grid_test_neighbors.cpp | 58 ++++++++++++++----- 1 file changed, 43 insertions(+), 15 deletions(-) diff --git a/mini-apps/build_pencils/grid_test_neighbors.cpp b/mini-apps/build_pencils/grid_test_neighbors.cpp index 9328261f2..fd587a52c 100644 --- a/mini-apps/build_pencils/grid_test_neighbors.cpp +++ b/mini-apps/build_pencils/grid_test_neighbors.cpp @@ -65,23 +65,23 @@ CellID selectNeighbor(dccrg::Dccrg &grid, CellID id, int dimension = case 0 : { // did not find neighbors neighbor = INVALID_CELLID; - } break; + } case 1 : { neighbor = myNeighbors[0]; - } break; + } case 4 : { neighbor = myNeighbors[path]; - } break; + } default: { // something is wrong neighbor = INVALID_CELLID; throw "Invalid neighbor count!"; - } break; } + } return neighbor; @@ -116,17 +116,17 @@ setOfPencils buildPencilsWithNeighbors( dccrg::Dccrg &grid, switch( dimension ) { case 0: { index2 = index / 2; - } break; + } case 1: { index2 = index - index / 2; - } break; + } case 2: { index2 = index % 4; - } break; } + } path.insert(path.begin(),index2); id = parent; } @@ -217,8 +217,33 @@ setOfPencils buildPencilsWithNeighbors( dccrg::Dccrg &grid, id = nextNeighbor; } // Closes while loop - - pencils.addPencil(ids,0.0,0.0); + + // Get the x,y - coordinates of the pencil (in the direction perpendicular to the pencil) + const auto coordinates = grid.get_center(ids[0]); + double x,y; + switch(dimension) { + case 0: { + x = coordinates[1]; + y = coordinates[2]; + break; + } + case 1: { + x = coordinates[0]; + y = coordinates[2]; + break; + } + case 2: { + x = coordinates[0]; + y = coordinates[1]; + break; + } + default: { + x = 0.0; + y = 0.0; + break; + } + } + pencils.addPencil(ids,x,y); return pencils; } @@ -247,17 +272,19 @@ int main(int argc, char* argv[]) { dccrg::Dccrg grid; + // paremeters const uint xDim = 9; const uint yDim = 3; const uint zDim = 1; const std::array grid_size = {{xDim,yDim,zDim}}; + const int dimension = 0; + const bool doRefine = true; + const std::array refinementIds = {{10,14,64,72}}; grid.initialize(grid_size, comm, "RANDOM", 1); grid.balance_load(); - bool doRefine = true; - const std::array refinementIds = {{10,14,64,72}}; if(doRefine) { for(uint i = 0; i < refinementIds.size(); i++) { if(refinementIds[i] > 0) { @@ -273,9 +300,7 @@ int main(int argc, char* argv[]) { sort(cells.begin(), cells.end()); vector ids; - vector startingIds; - - int dimension = 0; + vector startingIds; for (const auto& cell: cells) { // std::cout << "Data of cell " << cell.id << " is stored at " << cell.data << std::endl; @@ -310,13 +335,16 @@ int main(int argc, char* argv[]) { uint iend = 0; std::cout << "I have created " << pencils.N << " pencils along dimension " << dimension << ":\n"; + std::cout << "(x, y): indices " << std::endl; + std::cout << "-----------------------------------------------------------------" << std::endl; for (uint i = 0; i < pencils.N; i++) { iend += pencils.lengthOfPencils[i]; + std::cout << "(" << pencils.x[i] << ", " << pencils.y[i] << "): "; for (auto j = pencils.ids.begin() + ibeg; j != pencils.ids.begin() + iend; ++j) { std::cout << *j << " "; } ibeg = iend; - std::cout << "\n"; + std::cout << std::endl; } std::ofstream outfile; From 45204f66d5728cc39d1495dfb0735d53bc42990f Mon Sep 17 00:00:00 2001 From: tkoskela Date: Sat, 23 Jun 2018 18:34:42 +0300 Subject: [PATCH 033/602] Added a incomplete draft for cpu_trans_map_amr.cpp --- vlasovsolver/cpu_trans_map_amr.cpp | 162 +++++++++++++++++++++++++++++ 1 file changed, 162 insertions(+) create mode 100644 vlasovsolver/cpu_trans_map_amr.cpp diff --git a/vlasovsolver/cpu_trans_map_amr.cpp b/vlasovsolver/cpu_trans_map_amr.cpp new file mode 100644 index 000000000..b14effc94 --- /dev/null +++ b/vlasovsolver/cpu_trans_map_amr.cpp @@ -0,0 +1,162 @@ +bool trans_map_1d_amr(const dccrg::Dccrg& mpiGrid, + const vector& localPropagatedCells, + const vector& remoteTargetCells, + const uint dimension, + const Realv dt, + const uint popID) { + + + // values used with an stencil in 1 dimension, initialized to 0. + // Contains a block, and its spatial neighbours in one dimension. + vector dz; + Realv z_min, dvz,vz_min; + uint cell_indices_to_id[3]; /*< used when computing id of target cell in block*/ + unsigned char cellid_transpose[WID3]; /*< defines the transpose for the solver internal (transposed) id: i + j*WID + k*WID2 to actual one*/ + + if(localPropagatedCells.size() == 0) + return true; + //vector with all cells + vector allCells(localPropagatedCells); + allCells.insert(allCells.end(), remoteTargetCells.begin(), remoteTargetCells.end()); + + const uint nSourceNeighborsPerCell = 1 + 2 * VLASOV_STENCIL_WIDTH; + std::vector allCellsPointer(allCells.size()); + std::vector sourceNeighbors(localPropagatedCells.size() * nSourceNeighborsPerCell); + std::vector targetNeighbors(3 * localPropagatedCells.size() ); + + +#pragma omp parallel for + for(uint celli = 0; celli < allCells.size(); celli++){ + allCellsPointer[celli] = mpiGrid[allCells[celli]]; + } + + + vector seedIds; + +#pragma omp parallel for + for(uint celli = 0; celli < localPropagatedCells.size(); celli++){ + // compute spatial neighbors, separately for targets and source. In + // source cells we have a wider stencil and take into account + // boundaries. For targets we only have actual cells as we do not + // want to propagate boundary cells (array may contain + // INVALID_CELLIDs at boundaries). + compute_spatial_source_neighbors(mpiGrid, localPropagatedCells[celli], dimension, + sourceNeighbors.data() + celli * nSourceNeighborsPerCell); + compute_spatial_target_neighbors(mpiGrid, localPropagatedCells[celli], dimension, + targetNeighbors.data() + celli * 3); + + // Collect a list of cell ids that do not have a neighbor in the negative direction + // These are the seed ids for the pencils. + vector negativeNeighbors; + for (const auto neighbor : grid.get_face_neighbors_of(celli)) { + + if (neighbor.second == - (dimension + 1)) + negativeNeighbors.push_back(neighbor.first); + } + if (negativeNeighbors.size() == 0) + seedIds.push_back(celli); + } + + // compute pencils => set of pencils (shared datastructure) + vector ids; + vector path; + setOfPencils pencils; + + for (const auto id : seedIds) { + pencils = buildPencilsWithNeighbors(mpiGrid, pencils, id, ids, dimension, path); + } + + // Get a unique sorted list of blockids that are in any of the + // propagated cells. First use set for this, then add to vector (may not + // be the most nice way to do this and in any case we could do it along + // dimension for data locality reasons => copy acc map column code, TODO: FIXME + std::unordered_set unionOfBlocksSet; + + for(uint celli = 0; celli < allCellsPointer.size(); celli++) { + vmesh::VelocityMesh& vmesh = allCellsPointer[celli]->get_velocity_mesh(popID); + for (vmesh::LocalID block_i=0; block_i< vmesh.size(); ++block_i) { + unionOfBlocksSet.insert(vmesh.getGlobalID(block_i)); + } + } + + std::vector unionOfBlocks; + unionOfBlocks.reserve(unionOfBlocksSet.size()); + for(const auto blockGID: unionOfBlocksSet) { + unionOfBlocks.push_back(blockGID); + } + + // Fiddle indices x,y,z + + const uint8_t VMESH_REFLEVEL=0; + const vmesh::VelocityMesh& vmesh = allCellsPointer[0]->get_velocity_mesh(popID); + // set cell size in dimension direction + dvz = vmesh.getCellSize(VMESH_REFLEVEL)[dimension]; + vz_min = vmesh.getMeshMinLimits()[dimension]; + switch (dimension) { + case 0: + // TODO: Query the pencil for the dz's? Do this inside the loop over pencils + dz = ; + z_min = P::xmin; + // set values in array that is used to convert block indices + // to global ID using a dot product. + cell_indices_to_id[0]=WID2; + cell_indices_to_id[1]=WID; + cell_indices_to_id[2]=1; + break; + case 1: + z_min = P::ymin; + // set values in array that is used to convert block indices + // to global ID using a dot product + cell_indices_to_id[0]=1; + cell_indices_to_id[1]=WID2; + cell_indices_to_id[2]=WID; + break; + case 2: + z_min = P::zmin; + // set values in array that is used to convert block indices + // to global id using a dot product. + cell_indices_to_id[0]=1; + cell_indices_to_id[1]=WID; + cell_indices_to_id[2]=WID2; + break; + default: + cerr << __FILE__ << ":"<< __LINE__ << " Wrong dimension, abort"< targetBlockData(3 * localPropagatedCells.size() * WID3); + std::vector targetsValid(localPropagatedCells.size()); + std::vector allCellsBlockLocalID(allCells.size()); + +#pragma omp for schedule(guided) + // Loop over unionOfBlocks + for(uint blocki = 0; blocki < unionOfBlocks.size(); blocki++){ + vmesh::GlobalID blockGID = unionOfBlocks[blocki]; + phiprof::start(t1); + + // Loop over sets of pencils + + // Loop over pencils + + } + } +} From 86625ab5ae90e1603a09be03f07e9b24f05fb1b6 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Thu, 2 Aug 2018 14:05:01 +0300 Subject: [PATCH 034/602] Removed unused code, modified axis limits --- mini-apps/amr_reconstruction/fun_plot_reconstruction.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/mini-apps/amr_reconstruction/fun_plot_reconstruction.py b/mini-apps/amr_reconstruction/fun_plot_reconstruction.py index b1d407948..12651f72f 100644 --- a/mini-apps/amr_reconstruction/fun_plot_reconstruction.py +++ b/mini-apps/amr_reconstruction/fun_plot_reconstruction.py @@ -23,9 +23,6 @@ def plot_reconstruction(step,scale='linear'): m_p = 1.67262158e-27 k_B = 1.3806503e-23 - imax = mlab.find(dat[:,1] == max(dat[:,1])) - rmax = dat[imax[0],0] - r0 = -2e5 dr = 500 @@ -46,7 +43,7 @@ def plot_reconstruction(step,scale='linear'): plt.xlim(-1e6,1e6) else: pass - plt.xlim(0e6,2e6) + plt.xlim(-0.6e6,0.6e6) ax = plt.gca() ax.set_yscale(scale) From 5d99c61830a28b9a109a903f887249b3753b3c8d Mon Sep 17 00:00:00 2001 From: tkoskela Date: Thu, 2 Aug 2018 14:06:28 +0300 Subject: [PATCH 035/602] Added extremum conserving limiter to ppm evaluation. The map_test - mini-app can be used to experiment with it. --- mini-apps/amr_reconstruction/map_test.cpp | 13 +- .../cpu_1d_ppm_nonuniform_conserving.hpp | 72 ++++++++++ vlasovsolver/cpu_face_estimates.hpp | 128 ++++++++++++++++++ 3 files changed, 210 insertions(+), 3 deletions(-) create mode 100644 vlasovsolver/cpu_1d_ppm_nonuniform_conserving.hpp diff --git a/mini-apps/amr_reconstruction/map_test.cpp b/mini-apps/amr_reconstruction/map_test.cpp index 83cfc9b16..4cb261938 100644 --- a/mini-apps/amr_reconstruction/map_test.cpp +++ b/mini-apps/amr_reconstruction/map_test.cpp @@ -2,7 +2,8 @@ #include "common.h" #include "vlasovsolver/vec.h" //#include "vlasovsolver/cpu_1d_ppm.hpp" -#include "vlasovsolver/cpu_1d_ppm_nonuniform.hpp" +//#include "vlasovsolver/cpu_1d_ppm_nonuniform.hpp" +#include "vlasovsolver/cpu_1d_ppm_nonuniform_conserving.hpp" #include #include #include @@ -134,7 +135,7 @@ void print_reconstruction(int step, Vec dr[], Vec values[], uint blocks_per_dim 3.0 * r_norm * r_norm * a[2]; #endif - fprintf(fp,"%20.12g %20.12g %20.12g\n", r[0], values[k_block * WID + k_cell + WID][0], target[0]); + fprintf(fp,"%20.12g %20.12e %20.12e\n", r[0], values[k_block * WID + k_cell + WID][0], target[0]); } //fprintf(fp,"\n"); //empty line to deay wgments in gnuplot } @@ -201,7 +202,13 @@ int main(void) { // Evaluate the function at the middle of the cell r = r + 0.5 * dr[i + WID][0]; values[i + WID] = rho * pow(physicalconstants::MASS_PROTON / (2.0 * M_PI * physicalconstants::K_B * T), 1.5) * - exp(- physicalconstants::MASS_PROTON * (r - r1) * (r - r1) / (2.0 * physicalconstants::K_B * T)); + exp(- physicalconstants::MASS_PROTON * (r - r1) * (r - r1) / (2.0 * physicalconstants::K_B * T)); + + // if (r < 0.0 && r_min - 10.0 * r < 0.0) { + // values[i + WID] = abs(r_min - 10.0 * r); + // } else { + // values[i + WID] = 0.0; + // } // Move r to the end of the cell for the next iteration r = r + 0.5 * dr[i + WID][0]; diff --git a/vlasovsolver/cpu_1d_ppm_nonuniform_conserving.hpp b/vlasovsolver/cpu_1d_ppm_nonuniform_conserving.hpp new file mode 100644 index 000000000..3e30e6d32 --- /dev/null +++ b/vlasovsolver/cpu_1d_ppm_nonuniform_conserving.hpp @@ -0,0 +1,72 @@ +/* + * This file is part of Vlasiator. + * Copyright 2010-2016 Finnish Meteorological Institute + * + * For details of usage, see the COPYING file and read the "Rules of the Road" + * at http://www.physics.helsinki.fi/vlasiator/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ + +#ifndef CPU_1D_PPM_H +#define CPU_1D_PPM_H + +#include +#include "vec.h" +#include "algorithm" +#include "cmath" +#include "cpu_slope_limiters.hpp" +#include "cpu_face_estimates.hpp" + +using namespace std; + +/* + Compute parabolic reconstruction with an explicit scheme +*/ +inline void compute_ppm_coeff_nonuniform(const Vec * const dv, const Vec * const values, face_estimate_order order, uint k, Vec a[3]){ + Vec fv_l; /*left face value*/ + Vec fv_r; /*right face value*/ + compute_filtered_face_values_nonuniform_conserving(dv, values, k, order, fv_l, fv_r); + + //Coella et al, check for monotonicity + Vec m_face = fv_l; + Vec p_face = fv_r; + + //std::cout << "value = " << values[k][0] << ", m_face = " << m_face[0] << ", p_face = " << p_face[0] << "\n"; + //std::cout << values[k][0] - m_face[0] << ", " << values[k][0] - p_face[0] << "\n"; + + // m_face = select((p_face - m_face) * (values[k] - 0.5 * (m_face + p_face)) > + // (p_face - m_face)*(p_face - m_face) * one_sixth, + // 3 * values[k] - 2 * p_face, + // m_face); + // p_face = select(-(p_face - m_face) * (p_face - m_face) * one_sixth > + // (p_face - m_face) * (values[k] - 0.5 * (m_face + p_face)), + // 3 * values[k] - 2 * m_face, + // p_face); + + //Fit a second order polynomial for reconstruction see, e.g., White + //2008 (PQM article) (note additional integration factors built in, + //contrary to White (2008) eq. 4 + a[0] = m_face; + a[1] = 3.0 * values[k] - 2.0 * m_face - p_face; + a[2] = (m_face + p_face - 2.0 * values[k]); + + //std::cout << "value = " << values[k][0] << ", m_face = " << m_face[0] << ", p_face = " << p_face[0] << "\n"; + //std::cout << values[k][0] - m_face[0] << ", " << values[k][0] - p_face[0] << "\n"; + + //std::cout << values[k][0] << " " << m_face[0] << " " << p_face[0] << "\n"; +} + +#endif diff --git a/vlasovsolver/cpu_face_estimates.hpp b/vlasovsolver/cpu_face_estimates.hpp index cfa1e96ca..f45f2e194 100644 --- a/vlasovsolver/cpu_face_estimates.hpp +++ b/vlasovsolver/cpu_face_estimates.hpp @@ -370,4 +370,132 @@ inline void compute_filtered_face_values_nonuniform(const Vec * const dv, const } +inline pair constrain_face_values(const Vec * h,const Vec * values,uint k,Vec & fv_l, Vec & fv_r) { + // Colella & Sekora, eq. 18 + + const Vec C = 1.25; + Vec invh2 = 1.0 / (h[k] * h[k]); + Vec d2a = invh2 * 3.0 * (values[k] - 2.0 * fv_r + values[k + 1]); + Vec d2aL = invh2 * (values[k - 1] - 2.0 * values[k] + values[k + 1]); + Vec d2aR = invh2 * (values[k] - 2.0 * values[k + 1] + values[k + 2]); + Vec d2aLim; + if ( (horizontal_or(d2a * d2aL >= 0)) && (horizontal_or(d2a * d2aR >= 0)) && + horizontal_and(d2a != 0)) { + d2aLim = d2a / abs(d2a) * min(abs(d2a),min(C*abs(d2aL),C*abs(d2aR))); + } else { + d2aLim = 0.0; + } + + // Colella & Sekora, eq 19 + Vec p_face = 0.5 * (values[k] + values[k + 1]) - h[k] * h[k] / 3.0 * d2aLim; + + // Calculate negative face value. TODO: Fix copy-paste + uint l = k - 1; + invh2 = 1.0 / (h[l] * h[l]); + d2a = invh2 * 3.0 * (values[l] - 2.0 * fv_l + values[l + 1]); + d2aL = invh2 * (values[l - 1] - 2.0 * values[l] + values[l + 1]); + d2aR = invh2 * (values[l] - 2.0 * values[l + 1] + values[l + 2]); + if ( (horizontal_or(d2a * d2aL >= 0)) && (horizontal_or(d2a * d2aR >= 0)) && + horizontal_and(d2a != 0)) { + d2aLim = d2a / abs(d2a) * min(abs(d2a),min(C*abs(d2aL),C*abs(d2aR))); + } else { + d2aLim = 0.0; + } + + Vec m_face = 0.5 * (values[l] + values[l + 1]) - h[l] * h[l] / 3.0 * d2aLim; + + // Colella & Sekora, eq 22 + d2a = -2.0 * invh2 * 6.0 * (values[k] - 3.0 * (m_face + p_face)); // a6,j from eq. 7 + Vec d2aC = invh2 * (values[k - 1] - 2.0 * values[k] + values[k + 1]); + //d2aL = invh2 * (values[k-2] - 2.0 * values[k] + values[k]); + d2aL = invh2 * (values[k-2] - 2.0 * values[k-1] + values[k]); + d2aR = invh2 * (values[k] - 2.0 * values[k+1] + values[k+2]); + if ( (horizontal_or(d2a * d2aL >= 0)) && (horizontal_or(d2a * d2aR >= 0)) && + (horizontal_or(d2a * d2aC >= 0)) && horizontal_and(d2a != 0)) { + + d2aLim = d2a / abs(d2a) * min(C * abs(d2aL), min(C * abs(d2aR), min(C * abs(d2aC), abs(d2a)))); + } else { + d2aLim = 0.0; + if( horizontal_or(d2a == 0.0)) { + // Set a non-zero value for the divison in eq. 23. + // According to the paper the ratio should be 0.0 + d2a = 1.0; + } + } + + // Colella & Sekora, eq 23 + Vec p_face_interpolant = values[k] + (p_face - values[k]) * d2aLim / d2a; + Vec m_face_interpolant = values[k] + (m_face - values[k]) * d2aLim / d2a; + + pair faceInterpolants; + faceInterpolants = make_pair(m_face_interpolant,p_face_interpolant); + + // if(horizontal_and(values[k] > 1.0)) + // std::cout << "k, a-, a+, : " << k << ", " << p_face_interpolant[0] << ", " << m_face_interpolant[0] << ", " << values[k][0] << endl; + + return faceInterpolants; +} + +inline void compute_filtered_face_values_nonuniform_conserving(const Vec * const dv, const Vec * const values,uint k, face_estimate_order order, Vec &fv_l, Vec &fv_r){ + switch(order){ + case h4: + compute_h4_left_face_value_nonuniform(dv, values, k, fv_l); + compute_h4_left_face_value_nonuniform(dv, values, k + 1, fv_r); + break; + // case h5: + // compute_h5_face_values(dv, values, k, fv_l, fv_r); + // break; + // case h6: + // compute_h6_left_face_value(dv, values, k, fv_l); + // compute_h6_left_face_value(dv, values, k + 1, fv_r); + // break; + // case h8: + // compute_h8_left_face_value(dv, values, k, fv_l); + // compute_h8_left_face_value(dv, values, k + 1, fv_r); + // break; + default: + std::cout << "Order " << order << " has not been implemented (yet)\n"; + break; + } + + Vec slope_abs,slope_sign; + slope_limiter(values[k -1], values[k], values[k + 1], slope_abs, slope_sign); + + //check for extrema + Vecb is_extrema = (slope_abs == Vec(0.0)); + Vecb filter_l = (values[k - 1] - fv_l) * (fv_l - values[k]) < 0 ; + Vecb filter_r = (values[k + 1] - fv_r) * (fv_r - values[k]) < 0; + // if(horizontal_or(is_extrema) || horizontal_or(filter_l) || horizontal_or(filter_r)) { + // Colella & Sekora, eq. 20 + if(horizontal_or((fv_r - values[k]) * (values[k] - fv_l) <= Vec(0.0)) + && horizontal_or((values[k - 1] - values[k]) * (values[k] - values[k + 1]) <= Vec(0.0))) { + auto faces = constrain_face_values(dv, values, k, fv_l, fv_r); + + fv_l = faces.first; + fv_r = faces.second; + + // fv_r = select(is_extrema, values[k], fv_r); + // fv_l = select(is_extrema, values[k], fv_l); + } + + + + // //Fix left face if needed; boundary value is not bounded + // Vecb filter = (values[k -1] - fv_l) * (fv_l - values[k]) < 0 ; + // if(horizontal_or (filter)) { + // //Go to linear (PLM) estimates if not ok (this is always ok!) + // fv_l=select(filter, values[k ] - slope_sign * 0.5 * slope_abs, fv_l); + // } + + // //Fix face if needed; boundary value is not bounded + // filter = (values[k + 1] - fv_r) * (fv_r - values[k]) < 0; + // if(horizontal_or (filter)) { + // //Go to linear (PLM) estimates if not ok (this is always ok!) + // fv_r=select(filter, values[k] + slope_sign * 0.5 * slope_abs, fv_r); + // } + + +} + + #endif From f2a4873cd111adf93f9465cfba9fee8010517b1e Mon Sep 17 00:00:00 2001 From: tkoskela Date: Mon, 6 Aug 2018 12:08:40 +0300 Subject: [PATCH 036/602] Moved copy-pasted code segment into a function. --- vlasovsolver/cpu_face_estimates.hpp | 97 ++++++++++++++--------------- 1 file changed, 48 insertions(+), 49 deletions(-) diff --git a/vlasovsolver/cpu_face_estimates.hpp b/vlasovsolver/cpu_face_estimates.hpp index f45f2e194..01553fced 100644 --- a/vlasovsolver/cpu_face_estimates.hpp +++ b/vlasovsolver/cpu_face_estimates.hpp @@ -369,13 +369,11 @@ inline void compute_filtered_face_values_nonuniform(const Vec * const dv, const } } +inline Vec get_D2aLim(const Vec * h, const Vec * values, uint k, const Vec C, Vec & fv) { -inline pair constrain_face_values(const Vec * h,const Vec * values,uint k,Vec & fv_l, Vec & fv_r) { // Colella & Sekora, eq. 18 - - const Vec C = 1.25; Vec invh2 = 1.0 / (h[k] * h[k]); - Vec d2a = invh2 * 3.0 * (values[k] - 2.0 * fv_r + values[k + 1]); + Vec d2a = invh2 * 3.0 * (values[k] - 2.0 * fv + values[k + 1]); Vec d2aL = invh2 * (values[k - 1] - 2.0 * values[k] + values[k + 1]); Vec d2aR = invh2 * (values[k] - 2.0 * values[k + 1] + values[k + 2]); Vec d2aLim; @@ -385,55 +383,56 @@ inline pair constrain_face_values(const Vec * h,const Vec * values,uint } else { d2aLim = 0.0; } - - // Colella & Sekora, eq 19 - Vec p_face = 0.5 * (values[k] + values[k + 1]) - h[k] * h[k] / 3.0 * d2aLim; - - // Calculate negative face value. TODO: Fix copy-paste - uint l = k - 1; - invh2 = 1.0 / (h[l] * h[l]); - d2a = invh2 * 3.0 * (values[l] - 2.0 * fv_l + values[l + 1]); - d2aL = invh2 * (values[l - 1] - 2.0 * values[l] + values[l + 1]); - d2aR = invh2 * (values[l] - 2.0 * values[l + 1] + values[l + 2]); - if ( (horizontal_or(d2a * d2aL >= 0)) && (horizontal_or(d2a * d2aR >= 0)) && - horizontal_and(d2a != 0)) { - d2aLim = d2a / abs(d2a) * min(abs(d2a),min(C*abs(d2aL),C*abs(d2aR))); - } else { - d2aLim = 0.0; - } - Vec m_face = 0.5 * (values[l] + values[l + 1]) - h[l] * h[l] / 3.0 * d2aLim; - - // Colella & Sekora, eq 22 - d2a = -2.0 * invh2 * 6.0 * (values[k] - 3.0 * (m_face + p_face)); // a6,j from eq. 7 - Vec d2aC = invh2 * (values[k - 1] - 2.0 * values[k] + values[k + 1]); - //d2aL = invh2 * (values[k-2] - 2.0 * values[k] + values[k]); - d2aL = invh2 * (values[k-2] - 2.0 * values[k-1] + values[k]); - d2aR = invh2 * (values[k] - 2.0 * values[k+1] + values[k+2]); - if ( (horizontal_or(d2a * d2aL >= 0)) && (horizontal_or(d2a * d2aR >= 0)) && - (horizontal_or(d2a * d2aC >= 0)) && horizontal_and(d2a != 0)) { - - d2aLim = d2a / abs(d2a) * min(C * abs(d2aL), min(C * abs(d2aR), min(C * abs(d2aC), abs(d2a)))); - } else { - d2aLim = 0.0; - if( horizontal_or(d2a == 0.0)) { - // Set a non-zero value for the divison in eq. 23. - // According to the paper the ratio should be 0.0 - d2a = 1.0; - } - } + return d2aLim; + +} - // Colella & Sekora, eq 23 - Vec p_face_interpolant = values[k] + (p_face - values[k]) * d2aLim / d2a; - Vec m_face_interpolant = values[k] + (m_face - values[k]) * d2aLim / d2a; +inline pair constrain_face_values(const Vec * h,const Vec * values,uint k,Vec & fv_l, Vec & fv_r) { - pair faceInterpolants; - faceInterpolants = make_pair(m_face_interpolant,p_face_interpolant); + const Vec C = 1.25; + Vec invh2 = 1.0 / (h[k] * h[k]); + + // Colella & Sekora, eq 19 + Vec p_face = 0.5 * (values[k] + values[k + 1]) + - h[k] * h[k] / 3.0 * get_D2aLim(h,values,k ,C,fv_r); + Vec m_face = 0.5 * (values[k-1] + values[k]) + - h[k-1] * h[k-1] / 3.0 * get_D2aLim(h,values,k-1,C,fv_l); + + // Colella & Sekora, eq 21 + Vec d2a = -2.0 * invh2 * 6.0 * (values[k] - 3.0 * (m_face + p_face)); // a6,j from eq. 7 + Vec d2aC = invh2 * (values[k - 1] - 2.0 * values[k ] + values[k + 1]); + // Note: Corrected the index of 2nd term in d2aL to k - 1. + // In the paper it is k but that is almost certainly an error. + Vec d2aL = invh2 * (values[k - 2] - 2.0 * values[k - 1] + values[k ]); + Vec d2aR = invh2 * (values[k ] - 2.0 * values[k + 1] + values[k + 2]); + Vec d2aLim; - // if(horizontal_and(values[k] > 1.0)) - // std::cout << "k, a-, a+, : " << k << ", " << p_face_interpolant[0] << ", " << m_face_interpolant[0] << ", " << values[k][0] << endl; - - return faceInterpolants; + // Colella & Sekora, eq 22 + if ( (horizontal_or(d2a * d2aL >= 0)) && (horizontal_or(d2a * d2aR >= 0)) && + (horizontal_or(d2a * d2aC >= 0)) && horizontal_and(d2a != 0)) { + + d2aLim = d2a / abs(d2a) * min(C * abs(d2aL), min(C * abs(d2aR), min(C * abs(d2aC), abs(d2a)))); + } else { + d2aLim = 0.0; + if( horizontal_or(d2a == 0.0)) { + // Set a non-zero value for the denominator in eq. 23. + // According to the paper the ratio d2aLim/d2a should be 0 + d2a = 1.0; + } + } + + // Colella & Sekora, eq 23 + Vec p_face_interpolant = values[k] + (p_face - values[k]) * d2aLim / d2a; + Vec m_face_interpolant = values[k] + (m_face - values[k]) * d2aLim / d2a; + + pair faceInterpolants; + faceInterpolants = make_pair(m_face_interpolant,p_face_interpolant); + + // if(horizontal_and(values[k] > 1.0)) + // std::cout << "k, a-, a+, : " << k << ", " << p_face_interpolant[0] << ", " << m_face_interpolant[0] << ", " << values[k][0] << endl; + + return faceInterpolants; } inline void compute_filtered_face_values_nonuniform_conserving(const Vec * const dv, const Vec * const values,uint k, face_estimate_order order, Vec &fv_l, Vec &fv_r){ From 2787196f5e5137538a58339b71c2946aadb397de Mon Sep 17 00:00:00 2001 From: tkoskela Date: Thu, 9 Aug 2018 11:12:13 +0300 Subject: [PATCH 037/602] Intermediate version of cpu_trans_map_amr.cpp. Not linked in yet. --- vlasovsolver/cpu_face_estimates.hpp | 57 ++- vlasovsolver/cpu_trans_map_amr.cpp | 762 ++++++++++++++++++++++------ 2 files changed, 654 insertions(+), 165 deletions(-) diff --git a/vlasovsolver/cpu_face_estimates.hpp b/vlasovsolver/cpu_face_estimates.hpp index 01553fced..b71447b46 100644 --- a/vlasovsolver/cpu_face_estimates.hpp +++ b/vlasovsolver/cpu_face_estimates.hpp @@ -178,19 +178,19 @@ inline void compute_h4_left_face_value(const Vec * const values, uint k, Vec &fv \param h Array with cell widths. Can be in abritrary units since they always cancel. Maybe 1/refinement ratio? */ inline void compute_h4_left_face_value_nonuniform(const Vec * const h, const Vec * const u, uint k, Vec &fv_l) { - - fv_l = ( - 1.0 / ( h[k - 2] + h[k - 1] + h[k] + h[k + 1] ) - * ( ( h[k - 2] + h[k - 1] ) * ( h[k] + h[k + 1] ) / ( h[k - 1] + h[k] ) - * ( u[k - 1] * h[k] + u[k] * h[k - 1] ) - * (1.0 / ( h[k - 2] + h[k - 1] + h[k] ) + 1.0 / ( h[k - 1] + h[k] + h[k + 1] ) ) - + ( h[k] * ( h[k] + h[k + 1] ) ) / ( ( h[k - 2] + h[k - 1] + h[k] ) * (h[k - 2] + h[k - 1] ) ) - * ( u[k - 1] * (h[k - 2] + 2.0 * h[k - 1] ) - ( u[k - 2] * h[k - 1] ) ) - + h[k - 1] * ( h[k - 2] + h[k - 1] ) / ( ( h[k - 1] + h[k] + h[k + 1] ) * ( h[k] + h[k + 1] ) ) - * ( u[k] * ( 2.0 * h[k] + h[k + 1] ) - u[k + 1] * h[k] ) ) - ); + + fv_l = ( + 1.0 / ( h[k - 2] + h[k - 1] + h[k] + h[k + 1] ) + * ( ( h[k - 2] + h[k - 1] ) * ( h[k] + h[k + 1] ) / ( h[k - 1] + h[k] ) + * ( u[k - 1] * h[k] + u[k] * h[k - 1] ) + * (1.0 / ( h[k - 2] + h[k - 1] + h[k] ) + 1.0 / ( h[k - 1] + h[k] + h[k + 1] ) ) + + ( h[k] * ( h[k] + h[k + 1] ) ) / ( ( h[k - 2] + h[k - 1] + h[k] ) * (h[k - 2] + h[k - 1] ) ) + * ( u[k - 1] * (h[k - 2] + 2.0 * h[k - 1] ) - ( u[k - 2] * h[k - 1] ) ) + + h[k - 1] * ( h[k - 2] + h[k - 1] ) / ( ( h[k - 1] + h[k] + h[k + 1] ) * ( h[k] + h[k + 1] ) ) + * ( u[k] * ( 2.0 * h[k] + h[k + 1] ) - u[k + 1] * h[k] ) ) + ); } - + /*! Compute left face value based on the explicit h4 estimate. @@ -325,10 +325,10 @@ inline void compute_filtered_face_values(const Vec * const values,uint k, face_e inline void compute_filtered_face_values_nonuniform(const Vec * const dv, const Vec * const values,uint k, face_estimate_order order, Vec &fv_l, Vec &fv_r){ switch(order){ - case h4: - compute_h4_left_face_value_nonuniform(dv, values, k, fv_l); - compute_h4_left_face_value_nonuniform(dv, values, k + 1, fv_r); - break; + case h4: + compute_h4_left_face_value_nonuniform(dv, values, k, fv_l); + compute_h4_left_face_value_nonuniform(dv, values, k + 1, fv_r); + break; // case h5: // compute_h5_face_values(dv, values, k, fv_l, fv_r); // break; @@ -475,22 +475,35 @@ inline void compute_filtered_face_values_nonuniform_conserving(const Vec * const // fv_r = select(is_extrema, values[k], fv_r); // fv_l = select(is_extrema, values[k], fv_l); - } + } else { - + //Fix left face if needed; boundary value is not bounded + Vecb filter = (values[k -1] - fv_l) * (fv_l - values[k]) < 0 ; + if(horizontal_or (filter)) { + //Go to linear (PLM) estimates if not ok (this is always ok!) + fv_l=select(filter, values[k ] - slope_sign * 0.5 * slope_abs, fv_l); + } + + //Fix face if needed; boundary value is not bounded + filter = (values[k + 1] - fv_r) * (fv_r - values[k]) < 0; + if(horizontal_or (filter)) { + //Go to linear (PLM) estimates if not ok (this is always ok!) + fv_r=select(filter, values[k] + slope_sign * 0.5 * slope_abs, fv_r); + } + } // //Fix left face if needed; boundary value is not bounded // Vecb filter = (values[k -1] - fv_l) * (fv_l - values[k]) < 0 ; // if(horizontal_or (filter)) { - // //Go to linear (PLM) estimates if not ok (this is always ok!) - // fv_l=select(filter, values[k ] - slope_sign * 0.5 * slope_abs, fv_l); + // //Go to linear (PLM) estimates if not ok (this is always ok!) + // fv_l=select(filter, values[k ] - slope_sign * 0.5 * slope_abs, fv_l); // } // //Fix face if needed; boundary value is not bounded // filter = (values[k + 1] - fv_r) * (fv_r - values[k]) < 0; // if(horizontal_or (filter)) { - // //Go to linear (PLM) estimates if not ok (this is always ok!) - // fv_r=select(filter, values[k] + slope_sign * 0.5 * slope_abs, fv_r); + // //Go to linear (PLM) estimates if not ok (this is always ok!) + // fv_r=select(filter, values[k] + slope_sign * 0.5 * slope_abs, fv_r); // } diff --git a/vlasovsolver/cpu_trans_map_amr.cpp b/vlasovsolver/cpu_trans_map_amr.cpp index b14effc94..ad76ef643 100644 --- a/vlasovsolver/cpu_trans_map_amr.cpp +++ b/vlasovsolver/cpu_trans_map_amr.cpp @@ -1,162 +1,638 @@ -bool trans_map_1d_amr(const dccrg::Dccrg& mpiGrid, - const vector& localPropagatedCells, - const vector& remoteTargetCells, - const uint dimension, - const Realv dt, - const uint popID) { +//#include "vlasovsolver/cpu_1d_ppm_nonuniform.hpp" +#include "vlasovsolver/cpu_1d_ppm_nonuniform_conserving.hpp" +struct grid_data { - // values used with an stencil in 1 dimension, initialized to 0. - // Contains a block, and its spatial neighbours in one dimension. - vector dz; - Realv z_min, dvz,vz_min; - uint cell_indices_to_id[3]; /*< used when computing id of target cell in block*/ - unsigned char cellid_transpose[WID3]; /*< defines the transpose for the solver internal (transposed) id: i + j*WID + k*WID2 to actual one*/ + int value = 0; + + std::tuple get_mpi_datatype() + { + return std::make_tuple(this, 0, MPI_BYTE); + } + +}; + +struct setOfPencils { + + uint N; // Number of pencils in the set + uint sumOfLengths; + std::vector lengthOfPencils; // Lengths of pencils + std::vector ids; // List of cells + std::vector x,y; // x,y - position + + setOfPencils() { + N = 0; + sumOfLengths = 0; + } + + void addPencil(std::vector idsIn, Real xIn, Real yIn, vector zIn) { + + N += 1; + sumOfLengths += idsIn.size(); + lengthOfPencils.push_back(idsIn.size()); + ids.insert(ids.end(),idsIn.begin(),idsIn.end()); + x.push_back(xIn); + y.push_back(yIn); + z.insert(z.end(),zIn.begin(),zIn.end()); - if(localPropagatedCells.size() == 0) - return true; - //vector with all cells - vector allCells(localPropagatedCells); - allCells.insert(allCells.end(), remoteTargetCells.begin(), remoteTargetCells.end()); + } + + std::vector getIds(uint pencilId) { + + if (pencilId > N) { + return; + } + + CellID ibeg = 0; + for (uint i = 0; i < pencilId; i++) { + ibeg += lengthOfPencils[i]; + } + CellID iend = ibeg + lengthOfPencils[pencilId]; + + vector idsOut; + + for (uint i = ibeg; i <= iend; i++) { + idsOut.push_back(ids[i]); + } + + return idsOut; + } + +}; + +CellID selectNeighbor(dccrg::Dccrg &grid, CellID id, int dimension = 0, uint path = 0) { + + const auto neighbors = grid.get_face_neighbors_of(id); + + vector < CellID > myNeighbors; + // Collect neighbor ids in the positive direction of the chosen dimension. + // Note that dimension indexing starts from 1 (of course it does) + for (const auto cell : neighbors) { + if (cell.second == dimension + 1) + myNeighbors.push_back(cell.first); + } + + CellID neighbor; - const uint nSourceNeighborsPerCell = 1 + 2 * VLASOV_STENCIL_WIDTH; - std::vector allCellsPointer(allCells.size()); - std::vector sourceNeighbors(localPropagatedCells.size() * nSourceNeighborsPerCell); - std::vector targetNeighbors(3 * localPropagatedCells.size() ); + switch( myNeighbors.size() ) { + // Since refinement can only increase by 1 level the only possibilities + // Should be 0 neighbors, 1 neighbor or 4 neighbors. + case 0 : { + // did not find neighbors + neighbor = INVALID_CELLID; + break; + } + case 1 : { + neighbor = myNeighbors[0]; + break; + } + case 4 : { + neighbor = myNeighbors[path]; + break; + } + default: { + // something is wrong + neighbor = INVALID_CELLID; + throw "Invalid neighbor count!"; + break; + } + } + + return neighbor; +} + +setOfPencils buildPencilsWithNeighbors( dccrg::Dccrg &grid, + setOfPencils &pencils, CellID startingId, + vector ids, uint dimension, + vector path) { + + const bool debug = false; + CellID nextNeighbor; + uint id = startingId; + uint startingRefLvl = grid.get_refinement_level(id); + + if( ids.size() == 0 ) + ids.push_back(startingId); + + // If the cell where we start is refined, we need to figure out which path + // to follow in future refined cells. This is a bit hacky but we have to + // use the order or the children of the parent cell to figure out which + // corner we are in. + // Maybe you could use physical coordinates here? + if( startingRefLvl > path.size() ) { + for ( uint i = path.size(); i < startingRefLvl; i++) { + auto parent = grid.get_parent(id); + auto children = grid.get_all_children(parent); + auto it = std::find(children.begin(),children.end(),id); + auto index = std::distance(children.begin(),it); + auto index2 = index; + + switch( dimension ) { + case 0: { + index2 = index / 2; + break; + } + case 1: { + index2 = index - index / 2; + break; + } + case 2: { + index2 = index % 4; + break; + } + } + path.insert(path.begin(),index2); + id = parent; + } + } + + id = startingId; -#pragma omp parallel for - for(uint celli = 0; celli < allCells.size(); celli++){ - allCellsPointer[celli] = mpiGrid[allCells[celli]]; - } + while (id > 0) { + + // Find the refinement level in the neighboring cell. Any neighbor will do + // since refinement level can only increase by 1 between neighbors. + nextNeighbor = selectNeighbor(grid,id,dimension); + + // If there are no neighbors, we can stop. + if (nextNeighbor == 0) + break; + + uint refLvl = grid.get_refinement_level(nextNeighbor); + + if (refLvl > 0) { + + // If we have encountered this refinement level before and stored + // the path this builder follows, we will just take the same path + // again. + if ( path.size() >= refLvl ) { + + if(debug) { + std::cout << "I am cell " << id << ". "; + std::cout << "I have seen refinement level " << refLvl << " before. Path is "; + for (auto k = path.begin(); k != path.end(); ++k) + std::cout << *k << " "; + std::cout << std::endl; + } + + nextNeighbor = selectNeighbor(grid,id,dimension,path[refLvl-1]); + + } else { + + if(debug) { + std::cout << "I am cell " << id << ". "; + std::cout << "I have NOT seen refinement level " << refLvl << " before. Path is "; + for (auto k = path.begin(); k != path.end(); ++k) + std::cout << *k << ' '; + std::cout << std::endl; + } + + // New refinement level, create a path through each neighbor cell + for ( uint i : {0,1,2,3} ) { + + vector < uint > myPath = path; + myPath.push_back(i); + + nextNeighbor = selectNeighbor(grid,id,dimension,myPath.back()); + + if ( i == 3 ) { + + // This builder continues with neighbor 3 + ids.push_back(nextNeighbor); + path = myPath; + + } else { + + // Spawn new builders for neighbors 0,1,2 + buildPencilsWithNeighbors(grid,pencils,id,ids,dimension,myPath); + + } + + } + + } + + } else { + if(debug) { + std::cout << "I am cell " << id << ". "; + std::cout << " I am on refinement level 0." << std::endl; + } + }// Closes if (refLvl == 0) + + // If we found a neighbor, add it to the list of ids for this pencil. + if(nextNeighbor != INVALID_CELLID) { + if (debug) { + std::cout << " Next neighbor is " << nextNeighbor << "." << std::endl; + } + ids.push_back(nextNeighbor); + } + + // Move to the next cell. + id = nextNeighbor; + + } // Closes while loop + + // Get the x,y - coordinates of the pencil (in the direction perpendicular to the pencil) + const auto coordinates = grid.get_center(ids[0]); + double x,y; + uint ix,iy,iz + switch(dimension) { + case 0: { + ix = 1; + iy = 2; + iz = 0; + break; + } + case 1: { + ix = 2; + iy = 0; + iz = 1; + break; + } + case 2: { + ix = 0; + iy = 1; + iz = 2; + break; + } + default: { + ix = 0; + iy = 1; + iz = 2; + break; + } + } + + x = coordinates[ix]; + y = coordinates[iy]; + + pencils.addPencil(ids,x,y); + return pencils; +} - vector seedIds; +void propagatePencil(Vec dr[], Vec values[], Vec z_translation, uint blocks_per_dim ) { -#pragma omp parallel for - for(uint celli = 0; celli < localPropagatedCells.size(); celli++){ - // compute spatial neighbors, separately for targets and source. In - // source cells we have a wider stencil and take into account - // boundaries. For targets we only have actual cells as we do not - // want to propagate boundary cells (array may contain - // INVALID_CELLIDs at boundaries). - compute_spatial_source_neighbors(mpiGrid, localPropagatedCells[celli], dimension, - sourceNeighbors.data() + celli * nSourceNeighborsPerCell); - compute_spatial_target_neighbors(mpiGrid, localPropagatedCells[celli], dimension, - targetNeighbors.data() + celli * 3); + // Determine direction of translation + // part of density goes here (cell index change along spatial direcion) + const int target_scell_index = (z_translation > 0) ? 1: -1; + + // Vector buffer where we write data, initialized to 0*/ + Vec targetValues[(blocks_per_dim + 2) * WID]; + + for (uint k_block = 0; k_block < blocks_per_dim; k_block++){ + + for (uint k_cell=0; k_cell < WID; ++k_cell) { + + uint gid = k_block * WID + k_cell + WID; + // init target_values + targetValues[gid] = 0.0; + + } + } + for (uint k_block = 0; k_block < blocks_per_dim; k_block++){ - // Collect a list of cell ids that do not have a neighbor in the negative direction - // These are the seed ids for the pencils. - vector negativeNeighbors; - for (const auto neighbor : grid.get_face_neighbors_of(celli)) { + for (uint k_cell=0; k_cell < WID; ++k_cell){ + + uint gid = k_block * WID + k_cell + WID; + //uint gid = (blocks_per_dim + 2) * WID - (k_block * WID + k_cell + WID); + + // Calculate normalized coordinates in current cell. + // The coordinates (scaled units from 0 to 1) between which we will + // integrate to put mass in the target neighboring cell. + // Normalize the coordinates to the origin cell. Then we scale with the difference + // in volume between target and origin later when adding the integrated value. + Realv z_1,z_2; + if ( z_translation < 0 ) { + z_1 = 0; + z_2 = -z_translation / dr[gid]; + } else { + z_1 = 1.0 - z_translation / dr[gid]; + z_2 = 1.0; + } + + if( abs(z_1) > 1.0 || abs(z_2) > 1.0 ) { + std::cout << "Error, CFL condition violated\n"; + std::cout << "Exiting\n"; + std::exit(1); + } - if (neighbor.second == - (dimension + 1)) - negativeNeighbors.push_back(neighbor.first); - } - if (negativeNeighbors.size() == 0) - seedIds.push_back(celli); - } - - // compute pencils => set of pencils (shared datastructure) - vector ids; - vector path; - setOfPencils pencils; - - for (const auto id : seedIds) { - pencils = buildPencilsWithNeighbors(mpiGrid, pencils, id, ids, dimension, path); - } - - // Get a unique sorted list of blockids that are in any of the - // propagated cells. First use set for this, then add to vector (may not - // be the most nice way to do this and in any case we could do it along - // dimension for data locality reasons => copy acc map column code, TODO: FIXME - std::unordered_set unionOfBlocksSet; - - for(uint celli = 0; celli < allCellsPointer.size(); celli++) { - vmesh::VelocityMesh& vmesh = allCellsPointer[celli]->get_velocity_mesh(popID); - for (vmesh::LocalID block_i=0; block_i< vmesh.size(); ++block_i) { - unionOfBlocksSet.insert(vmesh.getGlobalID(block_i)); - } - } - - std::vector unionOfBlocks; - unionOfBlocks.reserve(unionOfBlocksSet.size()); - for(const auto blockGID: unionOfBlocksSet) { - unionOfBlocks.push_back(blockGID); - } - - // Fiddle indices x,y,z - - const uint8_t VMESH_REFLEVEL=0; - const vmesh::VelocityMesh& vmesh = allCellsPointer[0]->get_velocity_mesh(popID); - // set cell size in dimension direction - dvz = vmesh.getCellSize(VMESH_REFLEVEL)[dimension]; - vz_min = vmesh.getMeshMinLimits()[dimension]; - switch (dimension) { - case 0: - // TODO: Query the pencil for the dz's? Do this inside the loop over pencils - dz = ; - z_min = P::xmin; - // set values in array that is used to convert block indices - // to global ID using a dot product. - cell_indices_to_id[0]=WID2; - cell_indices_to_id[1]=WID; - cell_indices_to_id[2]=1; - break; - case 1: - z_min = P::ymin; - // set values in array that is used to convert block indices - // to global ID using a dot product - cell_indices_to_id[0]=1; - cell_indices_to_id[1]=WID2; - cell_indices_to_id[2]=WID; - break; - case 2: - z_min = P::zmin; - // set values in array that is used to convert block indices - // to global id using a dot product. - cell_indices_to_id[0]=1; - cell_indices_to_id[1]=WID; - cell_indices_to_id[2]=WID2; - break; - default: - cerr << __FILE__ << ":"<< __LINE__ << " Wrong dimension, abort"<& mpiGrid, + const vector& localPropagatedCells, + const vector& remoteTargetCells, + const uint dimension, + const Realv dt, + const uint popID) { + + vector dz; /*< cell size in the dimension of the pencil */ + Realv dvz,vz_min; + uint cell_indices_to_id[3]; /*< used when computing id of target cell in block*/ + unsigned char cellid_transpose[WID3]; /*< defines the transpose for the solver internal (transposed) id: i + j*WID + k*WID2 to actual one*/ + + // return if there's no cells to propagate + if(localPropagatedCells.size() == 0) + return true; + + // Vector with all cell ids + vector allCells(localPropagatedCells); + allCells.insert(allCells.end(), remoteTargetCells.begin(), remoteTargetCells.end()); + + const uint nSourceNeighborsPerCell = 1 + 2 * VLASOV_STENCIL_WIDTH; + + // Vectors of pointers to the cell structs + std::vector allCellsPointer(allCells.size()); + std::vector sourceNeighbors(localPropagatedCells.size() * nSourceNeighborsPerCell); + std::vector targetNeighbors(3 * localPropagatedCells.size() ); + + // Initialize allCellsPointer +#pragma omp parallel for + for(uint celli = 0; celli < allCells.size(); celli++){ + allCellsPointer[celli] = mpiGrid[allCells[celli]]; + } + + // **************************************************************************** + + // compute pencils => set of pencils (shared datastructure) + vector seedIds; + +#pragma omp parallel for + for(auto celli: localPropagatedCells){ + // Collect a list of cell ids that do not have a neighbor in the negative direction + // These are the seed ids for the pencils. + vector negativeNeighbors; + // Returns all neighbors as (id, direction-dimension) pairs. + for ( const auto neighbor : grid.get_face_neighbors_of(allCellsPointer[celli]) ) { + + // select the neighbor in the negative dimension of the propagation + if (neighbor.second == - (dimension + 1)) + + // add the id of the neighbor to a list + negativeNeighbors.push_back(neighbor.first); + } + // if no neighbors were found in the negative direction, add this cell id to the seed cells + if (negativeNeighbors.size() == 0) + seedIds.push_back(celli); + } + + // Empty vectors for internal use of buildPencilsWithNeighbors. Could be default values but + // default vectors are complicated. Should overload buildPencilsWithNeighbors like suggested here + // https://stackoverflow.com/questions/3147274/c-default-argument-for-vectorint + vector ids; + vector path; + + // Output vectors for ready pencils + setOfPencils pencils; + vector pencilSets; + + for (const auto seedId : seedIds) { + // Construct pencils from the seedIds into a set of pencils. + pencils = buildPencilsWithNeighbors(mpiGrid, pencils, seedId, ids, dimension, path); + } + // Add the final set of pencils to the pencilSets - vector. + // Only one set is created for now but we retain support for multiple sets + pencilSets.push_back(pencils); + // **************************************************************************** + + // Fiddle indices x,y,z + switch (dimension) { + case 0: + // set values in array that is used to convert block indices + // to global ID using a dot product. + cell_indices_to_id[0]=WID2; + cell_indices_to_id[1]=WID; + cell_indices_to_id[2]=1; + break; + case 1: + // set values in array that is used to convert block indices + // to global ID using a dot product + cell_indices_to_id[0]=1; + cell_indices_to_id[1]=WID2; + cell_indices_to_id[2]=WID; + break; + case 2: + // set values in array that is used to convert block indices + // to global id using a dot product. + cell_indices_to_id[0]=1; + cell_indices_to_id[1]=WID; + cell_indices_to_id[2]=WID2; + break; + default: + cerr << __FILE__ << ":"<< __LINE__ << " Wrong dimension, abort"<& vmesh = allCellsPointer[0]->get_velocity_mesh(popID); + + // set cell size in dimension direction + dvz = vmesh.getCellSize(VMESH_REFLEVEL)[dimension]; + vz_min = vmesh.getMeshMinLimits()[dimension]; + + // Get a unique sorted list of blockids that are in any of the + // propagated cells. First use set for this, then add to vector (may not + // be the most nice way to do this and in any case we could do it along + // dimension for data locality reasons => copy acc map column code, TODO: FIXME + // TODO: Do this separately for each pencil? + std::unordered_set unionOfBlocksSet; + + for(auto cell : allCellsPointer) { + vmesh::VelocityMesh& vmesh = cell->get_velocity_mesh(popID); + for (vmesh::LocalID block_i=0; block_i< vmesh.size(); ++block_i) { + unionOfBlocksSet.insert(vmesh.getGlobalID(block_i)); + } + } + + std::vector unionOfBlocks; + unionOfBlocks.reserve(unionOfBlocksSet.size()); + for(const auto blockGID: unionOfBlocksSet) { + unionOfBlocks.push_back(blockGID); + } + // **************************************************************************** + + int t1 = phiprof::initializeTimer("mappingAndStore"); + #pragma omp parallel - { - std::vector targetBlockData(3 * localPropagatedCells.size() * WID3); - std::vector targetsValid(localPropagatedCells.size()); - std::vector allCellsBlockLocalID(allCells.size()); + { + //std::vector targetsValid(localPropagatedCells.size()); + //std::vector allCellsBlockLocalID(allCells.size()); #pragma omp for schedule(guided) - // Loop over unionOfBlocks - for(uint blocki = 0; blocki < unionOfBlocks.size(); blocki++){ - vmesh::GlobalID blockGID = unionOfBlocks[blocki]; - phiprof::start(t1); + // Loop over velocity space blocks. Thread this loop (over vspace blocks) with OpenMP. + for(uint blocki = 0; blocki < unionOfBlocks.size(); blocki++){ + + phiprof::start(t1); - // Loop over sets of pencils + // Get global id of the velocity block + vmesh::GlobalID blockGID = unionOfBlocks[blocki]; - // Loop over pencils + velocity_block_indices_t block_indices; + uint8_t vRefLevel; + vmesh.getIndices(blockGID,vRefLevel, block_indices[0], + block_indices[1], block_indices[2]); - } - } -} + // Loop over sets of pencils + // This loop only has one iteration for now + for ( auto pencils: pencilSets ) { + + // Allocate targetdata sum(lengths of pencils)*WID3) + Vec targetData[pencils.sumOfLengths * WID3]; + + // Initialize targetdata to 0 + for( uint i = 0; i sourcedata) / (proper xy reconstruction in future) + // copied from regular code, should work? + + // TODO: Does the index of sourceData need adjustments for vector length? + copy_trans_block_data(sourceNeighbors.data() + celli * nSourceNeighborsPerCell, + blockGID, sourceData[celli], cellid_transpose, popID); + + // At the same time, calculate dz's and store them in a vector. + dz.push_back(dz_ini / 2.0 ** mpiGrid.get_refinement_level(celli)); + + } + + // Calculate cell centered velocity + const Realv cell_vz = (block_indices[dimension] * WID + k + 0.5) * dvz + vz_min; + + const Realv z_translation = dt * cell_vz / dz[celli]; + // propagate pencil(blockid = velocities, pencil-ids = dzs ), + propagatePencil(dz, sourceData, z_translation, blocks_per_dim); + + // sourcedata => targetdata[this pencil]) + for (auto value: sourceData) { + targetData[targetDataIndex] = value; + targetDataindex++; + } + + // dealloc source data -- Should be automatic since it's declared in this iteration? + + } + + // Loop over pencils again + for(uint pencili = 0; pencili < pencils.N; pencili++){ + + // store_data(target_data =>) :Aggregate data for blockid to original location + + //store values from target_values array to the actual blocks + for(auto celli: pencils.ids) { + //TODO: Figure out validity check later + //if(targetsValid[celli]) { + for(uint ti = 0; ti < 3; ti++) { + SpatialCell* spatial_cell = targetNeighbors[celli * 3 + ti]; + if(spatial_cell ==NULL) { + //invalid target spatial cell + continue; + } + + // Get local ID of the velocity block + const vmesh::LocalID blockLID = spatial_cell->get_velocity_block_local_id(blockGID, popID); + + if (blockLID == vmesh::VelocityMesh::invalidLocalID()) { + // block does not exist. If so, we do not create it and add stuff to it here. + // We have already created blocks around blocks with content in + // spatial sense, so we have no need to create even more blocks here + // TODO add loss counter + continue; + } + // Pointer to the data field of the velocity block + Realf* blockData = spatial_cell->get_data(blockLID, popID); + for(int i = 0; i < WID3 ; i++) { + + // Write data into target block + blockData[i] += targetData[(celli * 3 + ti) * WID3 + i]; + } + } + //} + + } + + // dealloc target data -- Should be automatic again? + } + } + } + } + + return true; + } From d103e41c6da2ee5e3b0bd7f1e71bd7ea93f84cce Mon Sep 17 00:00:00 2001 From: tkoskela Date: Thu, 9 Aug 2018 11:16:42 +0300 Subject: [PATCH 038/602] Complying to coding style guide --- mini-apps/amr_reconstruction/map_test.cpp | 332 +++++++++++----------- 1 file changed, 166 insertions(+), 166 deletions(-) diff --git a/mini-apps/amr_reconstruction/map_test.cpp b/mini-apps/amr_reconstruction/map_test.cpp index 4cb261938..b030785c9 100644 --- a/mini-apps/amr_reconstruction/map_test.cpp +++ b/mini-apps/amr_reconstruction/map_test.cpp @@ -13,216 +13,216 @@ /*print all values in the vector valued values array. In this array there are blocks_per_dim blocks with a width of WID*/ void print_values(int step, Vec *values, uint blocks_per_dim, Real v_min, Real dv){ - char name[256]; - sprintf(name,"dist_%03d.dat",step); - - FILE* fp=fopen(name,"w"); - for(uint i=0; i < blocks_per_dim * WID; i++){ - Real v=v_min + (i + 0.5)*dv; - fprintf(fp,"%20.12g %20.12g %20.12g %20.12g %20.12g\n", v, values[i + WID][0], values[i + WID][1], values[i + WID][2], values[i + WID][3]); - } - fclose(fp); + char name[256]; + sprintf(name,"dist_%03d.dat",step); + + FILE* fp=fopen(name,"w"); + for(uint i=0; i < blocks_per_dim * WID; i++){ + Real v=v_min + (i + 0.5)*dv; + fprintf(fp,"%20.12g %20.12g %20.12g %20.12g %20.12g\n", v, values[i + WID][0], values[i + WID][1], values[i + WID][2], values[i + WID][3]); + } + fclose(fp); } void propagate(Vec dr[], Vec values[], Real z_translation, uint blocks_per_dim ) { - - // Determine direction of translation - // part of density goes here (cell index change along spatial direcion) - const int target_scell_index = (z_translation > 0) ? 1: -1; - - // Vector buffer where we write data, initialized to 0*/ - Vec targetValues[(blocks_per_dim + 2) * WID]; + + // Determine direction of translation + // part of density goes here (cell index change along spatial direcion) + const int target_scell_index = (z_translation > 0) ? 1: -1; - for (uint k_block = 0; k_block < blocks_per_dim; k_block++){ - - for (uint k_cell=0; k_cell < WID; ++k_cell) { - - uint gid = k_block * WID + k_cell + WID; - // init target_values - targetValues[gid] = 0.0; + // Vector buffer where we write data, initialized to 0*/ + Vec targetValues[(blocks_per_dim + 2) * WID]; + + for (uint k_block = 0; k_block < blocks_per_dim; k_block++){ - } - } - for (uint k_block = 0; k_block < blocks_per_dim; k_block++){ - - for (uint k_cell=0; k_cell < WID; ++k_cell){ - - uint gid = k_block * WID + k_cell + WID; - //uint gid = (blocks_per_dim + 2) * WID - (k_block * WID + k_cell + WID); + for (uint k_cell=0; k_cell < WID; ++k_cell) { + + uint gid = k_block * WID + k_cell + WID; + // init target_values + targetValues[gid] = 0.0; + + } + } + for (uint k_block = 0; k_block < blocks_per_dim; k_block++){ - // Calculate normalized coordinates in current cell. - // The coordinates (scaled units from 0 to 1) between which we will - // integrate to put mass in the target neighboring cell. - // Normalize the coordinates to the origin cell. Then we scale with the difference - // in volume between target and origin later when adding the integrated value. - Realv z_1,z_2; - if ( z_translation < 0 ) { - z_1 = 0; - z_2 = -z_translation / dr[gid][0]; - } else { - z_1 = 1.0 - z_translation / dr[gid][0]; - z_2 = 1.0; + for (uint k_cell=0; k_cell < WID; ++k_cell){ + + uint gid = k_block * WID + k_cell + WID; + //uint gid = (blocks_per_dim + 2) * WID - (k_block * WID + k_cell + WID); + + // Calculate normalized coordinates in current cell. + // The coordinates (scaled units from 0 to 1) between which we will + // integrate to put mass in the target neighboring cell. + // Normalize the coordinates to the origin cell. Then we scale with the difference + // in volume between target and origin later when adding the integrated value. + Realv z_1,z_2; + if ( z_translation < 0 ) { + z_1 = 0; + z_2 = -z_translation / dr[gid][0]; + } else { + z_1 = 1.0 - z_translation / dr[gid][0]; + z_2 = 1.0; + } + + if( abs(z_1) > 1.0 || abs(z_2) > 1.0 ) { + std::cout << "Error, CFL condition violated\n"; + std::cout << "Exiting\n"; + std::exit(1); + } + + // Compute polynomial coefficients + Vec a[3]; + //compute_ppm_coeff_nonuniform(dr, values, h4, gid + target_scell_index, a); + compute_ppm_coeff_nonuniform(dr, values, h4, gid, a); + + // Compute integral + const Vec ngbr_target_density = + z_2 * ( a[0] + z_2 * ( a[1] + z_2 * a[2] ) ) - + z_1 * ( a[0] + z_1 * ( a[1] + z_1 * a[2] ) ); + + // Store mapped density in two target cells + // in the neighbor cell we will put this density + targetValues[gid + target_scell_index] += ngbr_target_density * dr[gid] / dr[gid + target_scell_index]; + // in the current original cells we will put the rest of the original density + targetValues[gid] += values[gid] - ngbr_target_density; } + } - if( abs(z_1) > 1.0 || abs(z_2) > 1.0 ) { - std::cout << "Error, CFL condition violated\n"; - std::cout << "Exiting\n"; - std::exit(1); - } - - // Compute polynomial coefficients - Vec a[3]; - //compute_ppm_coeff_nonuniform(dr, values, h4, gid + target_scell_index, a); - compute_ppm_coeff_nonuniform(dr, values, h4, gid, a); - - // Compute integral - const Vec ngbr_target_density = - z_2 * ( a[0] + z_2 * ( a[1] + z_2 * a[2] ) ) - - z_1 * ( a[0] + z_1 * ( a[1] + z_1 * a[2] ) ); - - // Store mapped density in two target cells - // in the neighbor cell we will put this density - targetValues[gid + target_scell_index] += ngbr_target_density * dr[gid] / dr[gid + target_scell_index]; - // in the current original cells we will put the rest of the original density - targetValues[gid] += values[gid] - ngbr_target_density; - } - } - - // Store target data into source data - for (uint k_block = 0; k_block 0) - r0 += dr[iend-1+WID]; + int iend = k_block * WID + k_cell; + if (iend > 0) + r0 += dr[iend-1+WID]; - for (uint k_subcell=0; k_subcell< subcells; ++k_subcell){ - Vec r_norm = (Real)(k_subcell + 0.5)/subcells; //normalized r of subcell in source cell - Vec r = r0 + r_norm * dr[k_block * WID + k_cell + WID]; + for (uint k_subcell=0; k_subcell< subcells; ++k_subcell){ + Vec r_norm = (Real)(k_subcell + 0.5)/subcells; //normalized r of subcell in source cell + Vec r = r0 + r_norm * dr[k_block * WID + k_cell + WID]; #ifdef ACC_SEMILAG_PPM - Vec target = - a[0] + - 2.0 * r_norm * a[1] + - 3.0 * r_norm * r_norm * a[2]; + Vec target = + a[0] + + 2.0 * r_norm * a[1] + + 3.0 * r_norm * r_norm * a[2]; #endif - fprintf(fp,"%20.12g %20.12e %20.12e\n", r[0], values[k_block * WID + k_cell + WID][0], target[0]); + fprintf(fp,"%20.12g %20.12e %20.12e\n", r[0], values[k_block * WID + k_cell + WID][0], target[0]); + } + //fprintf(fp,"\n"); //empty line to deay wgments in gnuplot } - //fprintf(fp,"\n"); //empty line to deay wgments in gnuplot - } - } + } - fclose(fp); + fclose(fp); } void refine(Vec dr[], int ir, int max_refinement, int cells_per_level) { for (uint k=0; k < max_refinement * cells_per_level; ++k) { - dr[ir + k] = dr[ir + k]/pow(2,(max_refinement - k / cells_per_level)); - if (k > 0) - dr[ir - k] = dr[ir - k]/pow(2,(max_refinement - k / cells_per_level)); - } + dr[ir + k] = dr[ir + k]/pow(2,(max_refinement - k / cells_per_level)); + if (k > 0) + dr[ir - k] = dr[ir - k]/pow(2,(max_refinement - k / cells_per_level)); + } } int main(void) { - const Real dr0 = 20000; - const int blocks_per_dim = 100; - const int i_block = 0; //x index of block, fixed in this simple test - const int j_block = 0; //y index of block, fixed in this simple test - const int j_cell = 0; // y index of cell within block (0..WID-1) - - Vec dr[(blocks_per_dim+2)*WID]; - Vec values[(blocks_per_dim+2)*WID]; - - boost::mt19937 rng; - boost::uniform_real u(0.0, 2.0 * dr0); - boost::variate_generator > gen(rng, u); - gen.distribution().reset(); - gen.engine().seed(12345); + const Real dr0 = 20000; + const int blocks_per_dim = 100; + const int i_block = 0; //x index of block, fixed in this simple test + const int j_block = 0; //y index of block, fixed in this simple test + const int j_cell = 0; // y index of cell within block (0..WID-1) + + Vec dr[(blocks_per_dim+2)*WID]; + Vec values[(blocks_per_dim+2)*WID]; + + boost::mt19937 rng; + boost::uniform_real u(0.0, 2.0 * dr0); + boost::variate_generator > gen(rng, u); + gen.distribution().reset(); + gen.engine().seed(12345); - /*initial values*/ - /*clear target & values array*/ - for (uint k=0; k Date: Thu, 9 Aug 2018 11:17:22 +0300 Subject: [PATCH 039/602] Complying to coding style guide --- .../build_pencils/grid_test_neighbors.cpp | 570 ++++++++++-------- 1 file changed, 305 insertions(+), 265 deletions(-) diff --git a/mini-apps/build_pencils/grid_test_neighbors.cpp b/mini-apps/build_pencils/grid_test_neighbors.cpp index fd587a52c..a415f41ea 100644 --- a/mini-apps/build_pencils/grid_test_neighbors.cpp +++ b/mini-apps/build_pencils/grid_test_neighbors.cpp @@ -12,78 +12,103 @@ using namespace std; struct grid_data { - int value = 0; + int value = 0; - std::tuple get_mpi_datatype() - { - return std::make_tuple(this, 0, MPI_BYTE); - } + std::tuple get_mpi_datatype() + { + return std::make_tuple(this, 0, MPI_BYTE); + } }; struct setOfPencils { - uint N; // Number of pencils in the set - std::vector lengthOfPencils; // Lengths of pencils - std::vector ids; // List of cells - std::vector x,y; // x,y - position (Maybe replace with uint refinement level or Real width?) + uint N; // Number of pencils in the set + uint sumOfLengths; + std::vector lengthOfPencils; // Lengths of pencils + std::vector ids; // List of cells + std::vector x,y; // x,y - position + + setOfPencils() { + N = 0; + sumOfLengths = 0; + } + + void addPencil(std::vector idsIn, Real xIn, Real yIn, vector zIn) { + + N += 1; + sumOfLengths += idsIn.size(); + lengthOfPencils.push_back(idsIn.size()); + ids.insert(ids.end(),idsIn.begin(),idsIn.end()); + x.push_back(xIn); + y.push_back(yIn); + z.insert(z.end(),zIn.begin(),zIn.end()); + + } - setOfPencils() { - N = 0; - } + std::vector getIds(uint pencilId) { - void addPencil(std::vector idsIn, Real xIn, Real yIn) { + if (pencilId > N) { + return; + } - N += 1; - lengthOfPencils.push_back(idsIn.size()); - ids.insert(ids.end(),idsIn.begin(),idsIn.end()); - x.push_back(xIn); - y.push_back(yIn); - - } + CellID ibeg = 0; + for (uint i = 0; i < pencilId; i++) { + ibeg += lengthOfPencils[i]; + } + CellID iend = ibeg + lengthOfPencils[pencilId]; + + vector idsOut; + for (uint i = ibeg; i <= iend; i++) { + idsOut.push_back(ids[i]); + } + + return idsOut; + } + }; CellID selectNeighbor(dccrg::Dccrg &grid, CellID id, int dimension = 0, uint path = 0) { - const auto neighbors = grid.get_face_neighbors_of(id); + const auto neighbors = grid.get_face_neighbors_of(id); - vector < CellID > myNeighbors; - // Collect neighbor ids in the positive direction of the chosen dimension. - // Note that dimension indexing starts from 1 (of course it does) - for (const auto cell : neighbors) { - if (cell.second == dimension + 1) - myNeighbors.push_back(cell.first); - } + vector < CellID > myNeighbors; + // Collect neighbor ids in the positive direction of the chosen dimension. + // Note that dimension indexing starts from 1 (of course it does) + for (const auto cell : neighbors) { + if (cell.second == dimension + 1) + myNeighbors.push_back(cell.first); + } - CellID neighbor; + CellID neighbor; - switch( myNeighbors.size() ) { - // Since refinement can only increase by 1 level the only possibilities - // Should be 0 neighbors, 1 neighbor or 4 neighbors. - case 0 : { - // did not find neighbors - neighbor = INVALID_CELLID; - break; - } - case 1 : { - neighbor = myNeighbors[0]; - break; - } - case 4 : { - neighbor = myNeighbors[path]; - break; - } - default: { - // something is wrong - neighbor = INVALID_CELLID; - throw "Invalid neighbor count!"; - break; - } - } - - return neighbor; + switch( myNeighbors.size() ) { + // Since refinement can only increase by 1 level the only possibilities + // Should be 0 neighbors, 1 neighbor or 4 neighbors. + case 0 : { + // did not find neighbors + neighbor = INVALID_CELLID; + break; + } + case 1 : { + neighbor = myNeighbors[0]; + break; + } + case 4 : { + neighbor = myNeighbors[path]; + break; + } + default: { + // something is wrong + neighbor = INVALID_CELLID; + throw "Invalid neighbor count!"; + break; + } + } + + return neighbor; } @@ -92,277 +117,292 @@ setOfPencils buildPencilsWithNeighbors( dccrg::Dccrg &grid, vector ids, uint dimension, vector path) { - const bool debug = false; - CellID nextNeighbor; - uint id = startingId; - uint startingRefLvl = grid.get_refinement_level(id); - - if( ids.size() == 0 ) - ids.push_back(startingId); - - // If the cell where we start is refined, we need to figure out which path - // to follow in future refined cells. This is a bit hacky but we have to - // use the order or the children of the parent cell to figure out which - // corner we are in. - // Maybe you could use physical coordinates here? - if( startingRefLvl > path.size() ) { - for ( uint i = path.size(); i < startingRefLvl; i++) { - auto parent = grid.get_parent(id); - auto children = grid.get_all_children(parent); - auto it = std::find(children.begin(),children.end(),id); - auto index = std::distance(children.begin(),it); - auto index2 = index; + const bool debug = false; + CellID nextNeighbor; + uint id = startingId; + uint startingRefLvl = grid.get_refinement_level(id); + + if( ids.size() == 0 ) + ids.push_back(startingId); + + // If the cell where we start is refined, we need to figure out which path + // to follow in future refined cells. This is a bit hacky but we have to + // use the order or the children of the parent cell to figure out which + // corner we are in. + // Maybe you could use physical coordinates here? + if( startingRefLvl > path.size() ) { + for ( uint i = path.size(); i < startingRefLvl; i++) { + auto parent = grid.get_parent(id); + auto children = grid.get_all_children(parent); + auto it = std::find(children.begin(),children.end(),id); + auto index = std::distance(children.begin(),it); + auto index2 = index; - switch( dimension ) { - case 0: { - index2 = index / 2; - break; - } - case 1: { - index2 = index - index / 2; - break; - } - case 2: { - index2 = index % 4; - break; - } + switch( dimension ) { + case 0: { + index2 = index / 2; + break; + } + case 1: { + index2 = index - index / 2; + break; + } + case 2: { + index2 = index % 4; + break; + } + } + path.insert(path.begin(),index2); + id = parent; } - path.insert(path.begin(),index2); - id = parent; - } - } + } - id = startingId; + id = startingId; - while (id > 0) { + while (id > 0) { - // Find the refinement level in the neighboring cell. Any neighbor will do - // since refinement level can only increase by 1 between neighbors. - nextNeighbor = selectNeighbor(grid,id,dimension); + // Find the refinement level in the neighboring cell. Any neighbor will do + // since refinement level can only increase by 1 between neighbors. + nextNeighbor = selectNeighbor(grid,id,dimension); - // If there are no neighbors, we can stop. - if (nextNeighbor == 0) - break; + // If there are no neighbors, we can stop. + if (nextNeighbor == 0) + break; - uint refLvl = grid.get_refinement_level(nextNeighbor); + uint refLvl = grid.get_refinement_level(nextNeighbor); - if (refLvl > 0) { + if (refLvl > 0) { - // If we have encountered this refinement level before and stored - // the path this builder follows, we will just take the same path - // again. - if ( path.size() >= refLvl ) { + // If we have encountered this refinement level before and stored + // the path this builder follows, we will just take the same path + // again. + if ( path.size() >= refLvl ) { - if(debug) { - std::cout << "I am cell " << id << ". "; - std::cout << "I have seen refinement level " << refLvl << " before. Path is "; - for (auto k = path.begin(); k != path.end(); ++k) - std::cout << *k << " "; - std::cout << std::endl; - } + if(debug) { + std::cout << "I am cell " << id << ". "; + std::cout << "I have seen refinement level " << refLvl << " before. Path is "; + for (auto k = path.begin(); k != path.end(); ++k) + std::cout << *k << " "; + std::cout << std::endl; + } - nextNeighbor = selectNeighbor(grid,id,dimension,path[refLvl-1]); + nextNeighbor = selectNeighbor(grid,id,dimension,path[refLvl-1]); - } else { + } else { - if(debug) { - std::cout << "I am cell " << id << ". "; - std::cout << "I have NOT seen refinement level " << refLvl << " before. Path is "; - for (auto k = path.begin(); k != path.end(); ++k) - std::cout << *k << ' '; - std::cout << std::endl; - } + if(debug) { + std::cout << "I am cell " << id << ". "; + std::cout << "I have NOT seen refinement level " << refLvl << " before. Path is "; + for (auto k = path.begin(); k != path.end(); ++k) + std::cout << *k << ' '; + std::cout << std::endl; + } - // New refinement level, create a path through each neighbor cell - for ( uint i : {0,1,2,3} ) { + // New refinement level, create a path through each neighbor cell + for ( uint i : {0,1,2,3} ) { - vector < uint > myPath = path; - myPath.push_back(i); + vector < uint > myPath = path; + myPath.push_back(i); - nextNeighbor = selectNeighbor(grid,id,dimension,myPath.back()); + nextNeighbor = selectNeighbor(grid,id,dimension,myPath.back()); - if ( i == 3 ) { + if ( i == 3 ) { - // This builder continues with neighbor 3 - ids.push_back(nextNeighbor); - path = myPath; + // This builder continues with neighbor 3 + ids.push_back(nextNeighbor); + path = myPath; - } else { + } else { - // Spawn new builders for neighbors 0,1,2 - buildPencilsWithNeighbors(grid,pencils,id,ids,dimension,myPath); + // Spawn new builders for neighbors 0,1,2 + buildPencilsWithNeighbors(grid,pencils,id,ids,dimension,myPath); - } + } - } + } - } + } - } else { - if(debug) { - std::cout << "I am cell " << id << ". "; - std::cout << " I am on refinement level 0." << std::endl; + } else { + if(debug) { + std::cout << "I am cell " << id << ". "; + std::cout << " I am on refinement level 0." << std::endl; + } + }// Closes if (refLvl == 0) + + // If we found a neighbor, add it to the list of ids for this pencil. + if(nextNeighbor != INVALID_CELLID) { + if (debug) { + std::cout << " Next neighbor is " << nextNeighbor << "." << std::endl; + } + ids.push_back(nextNeighbor); } - }// Closes if (refLvl == 0) - // If we found a neighbor, add it to the list of ids for this pencil. - if(nextNeighbor != INVALID_CELLID) { - if (debug) { - std::cout << " Next neighbor is " << nextNeighbor << "." << std::endl; + // Move to the next cell. + id = nextNeighbor; + + } // Closes while loop + + // Get the x,y - coordinates of the pencil (in the direction perpendicular to the pencil) + const auto coordinates = grid.get_center(ids[0]); + double x,y; + uint ix,iy,iz + switch(dimension) { + case 0: { + ix = 1; + iy = 2; + iz = 0; + break; + } + case 1: { + ix = 2; + iy = 0; + iz = 1; + break; + } + case 2: { + ix = 0; + iy = 1; + iz = 2; + break; + } + default: { + ix = 0; + iy = 1; + iz = 2; + break; + } } - ids.push_back(nextNeighbor); - } - // Move to the next cell. - id = nextNeighbor; - - } // Closes while loop - - // Get the x,y - coordinates of the pencil (in the direction perpendicular to the pencil) - const auto coordinates = grid.get_center(ids[0]); - double x,y; - switch(dimension) { - case 0: { - x = coordinates[1]; - y = coordinates[2]; - break; - } - case 1: { - x = coordinates[0]; - y = coordinates[2]; - break; - } - case 2: { - x = coordinates[0]; - y = coordinates[1]; - break; - } - default: { - x = 0.0; - y = 0.0; - break; - } - } - pencils.addPencil(ids,x,y); - return pencils; + x = coordinates[ix]; + y = coordinates[iy]; + // z = vector; + + // for( auto id: ids ) { + // coordinates = grid.get_center(id); + // z.push_back(coordinates[iz]) + // } + + pencils.addPencil(ids,x,y); + return pencils; } void printVector(vector v) { - for (auto k = v.begin(); k != v.end(); ++k) - std::cout << *k << ' '; - std::cout << "\n"; + for (auto k = v.begin(); k != v.end(); ++k) + std::cout << *k << ' '; + std::cout << "\n"; } int main(int argc, char* argv[]) { - if (MPI_Init(&argc, &argv) != MPI_SUCCESS) { - // cerr << "Coudln't initialize MPI." << endl; - abort(); - } + if (MPI_Init(&argc, &argv) != MPI_SUCCESS) { + // cerr << "Coudln't initialize MPI." << endl; + abort(); + } - MPI_Comm comm = MPI_COMM_WORLD; + MPI_Comm comm = MPI_COMM_WORLD; - int rank = 0, comm_size = 0; - MPI_Comm_rank(comm, &rank); - MPI_Comm_size(comm, &comm_size); + int rank = 0, comm_size = 0; + MPI_Comm_rank(comm, &rank); + MPI_Comm_size(comm, &comm_size); - dccrg::Dccrg grid; - - // paremeters - const uint xDim = 9; - const uint yDim = 3; - const uint zDim = 1; - const std::array grid_size = {{xDim,yDim,zDim}}; - const int dimension = 0; - const bool doRefine = true; - const std::array refinementIds = {{10,14,64,72}}; + dccrg::Dccrg grid; + + // paremeters + const uint xDim = 9; + const uint yDim = 3; + const uint zDim = 1; + const std::array grid_size = {{xDim,yDim,zDim}}; + const int dimension = 0; + const bool doRefine = true; + const std::array refinementIds = {{10,14,64,72}}; - grid.initialize(grid_size, comm, "RANDOM", 1); + grid.initialize(grid_size, comm, "RANDOM", 1); - grid.balance_load(); + grid.balance_load(); - if(doRefine) { - for(uint i = 0; i < refinementIds.size(); i++) { - if(refinementIds[i] > 0) { - grid.refine_completely(refinementIds[i]); - grid.stop_refining(); + if(doRefine) { + for(uint i = 0; i < refinementIds.size(); i++) { + if(refinementIds[i] > 0) { + grid.refine_completely(refinementIds[i]); + grid.stop_refining(); + } } - } - } + } - grid.balance_load(); + grid.balance_load(); - auto cells = grid.cells; - sort(cells.begin(), cells.end()); + auto cells = grid.cells; + sort(cells.begin(), cells.end()); - vector ids; - vector startingIds; + vector ids; + vector startingIds; - for (const auto& cell: cells) { - // std::cout << "Data of cell " << cell.id << " is stored at " << cell.data << std::endl; - // Collect a list of cell ids. - ids.push_back(cell.id); - - // Collect a list of cell ids that do not have a neighbor in the negative direction - vector negativeNeighbors; - for (auto neighbor : grid.get_face_neighbors_of(cell.id)) { + for (const auto& cell: cells) { + // std::cout << "Data of cell " << cell.id << " is stored at " << cell.data << std::endl; + // Collect a list of cell ids. + ids.push_back(cell.id); + + // Collect a list of cell ids that do not have a neighbor in the negative direction + vector negativeNeighbors; + for (auto neighbor : grid.get_face_neighbors_of(cell.id)) { - if (neighbor.second == - (dimension + 1)) - negativeNeighbors.push_back(neighbor.first); - } - if (negativeNeighbors.size() == 0) - startingIds.push_back(cell.id); - } + if (neighbor.second == - (dimension + 1)) + negativeNeighbors.push_back(neighbor.first); + } + if (negativeNeighbors.size() == 0) + startingIds.push_back(cell.id); + } - std::cout << "Starting cell ids for pencils are "; - printVector(startingIds); + std::cout << "Starting cell ids for pencils are "; + printVector(startingIds); - sort(ids.begin(),ids.end()); + sort(ids.begin(),ids.end()); - vector idsInitial; - vector path; - setOfPencils pencils; + vector idsInitial; + vector path; + setOfPencils pencils; - for (auto id : startingIds) { - pencils = buildPencilsWithNeighbors(grid,pencils,id,idsInitial,dimension,path); - } + for (auto id : startingIds) { + pencils = buildPencilsWithNeighbors(grid,pencils,id,idsInitial,dimension,path); + } - uint ibeg = 0; - uint iend = 0; + uint ibeg = 0; + uint iend = 0; - std::cout << "I have created " << pencils.N << " pencils along dimension " << dimension << ":\n"; - std::cout << "(x, y): indices " << std::endl; - std::cout << "-----------------------------------------------------------------" << std::endl; - for (uint i = 0; i < pencils.N; i++) { - iend += pencils.lengthOfPencils[i]; - std::cout << "(" << pencils.x[i] << ", " << pencils.y[i] << "): "; - for (auto j = pencils.ids.begin() + ibeg; j != pencils.ids.begin() + iend; ++j) { - std::cout << *j << " "; - } - ibeg = iend; - std::cout << std::endl; - } + std::cout << "I have created " << pencils.N << " pencils along dimension " << dimension << ":\n"; + std::cout << "(x, y): indices " << std::endl; + std::cout << "-----------------------------------------------------------------" << std::endl; + for (uint i = 0; i < pencils.N; i++) { + iend += pencils.lengthOfPencils[i]; + std::cout << "(" << pencils.x[i] << ", " << pencils.y[i] << "): "; + for (auto j = pencils.ids.begin() + ibeg; j != pencils.ids.begin() + iend; ++j) { + std::cout << *j << " "; + } + ibeg = iend; + std::cout << std::endl; + } - std::ofstream outfile; + std::ofstream outfile; - grid.write_vtk_file("test.vtk"); - - outfile.open("test.vtk", std::ofstream::app); - // write each cells id - outfile << "CELL_DATA " << cells.size() << std::endl; - outfile << "SCALARS id int 1" << std::endl; - outfile << "LOOKUP_TABLE default" << std::endl; - for (const auto& cell: cells) { - outfile << cell.id << std::endl; - } - outfile.close(); + grid.write_vtk_file("test.vtk"); + + outfile.open("test.vtk", std::ofstream::app); + // write each cells id + outfile << "CELL_DATA " << cells.size() << std::endl; + outfile << "SCALARS id int 1" << std::endl; + outfile << "LOOKUP_TABLE default" << std::endl; + for (const auto& cell: cells) { + outfile << cell.id << std::endl; + } + outfile.close(); - MPI_Finalize(); + MPI_Finalize(); - return 0; + return 0; } From 2db8caef3e99d3facefbfdff67f9d157e538b672 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Mon, 20 Aug 2018 11:20:17 +0300 Subject: [PATCH 040/602] Compiling version of cpu_trans_map_amr.cpp. not linked into Vlasovsolver in this revision. --- MAKE/Makefile.appa | 5 +- Makefile | 9 +- vlasovsolver/cpu_trans_map.cpp | 2 +- vlasovsolver/cpu_trans_map.hpp | 25 +- vlasovsolver/cpu_trans_map_amr.cpp | 572 ++++++++++++++++++++++------- 5 files changed, 465 insertions(+), 148 deletions(-) diff --git a/MAKE/Makefile.appa b/MAKE/Makefile.appa index 02c84110a..10d7665bf 100644 --- a/MAKE/Makefile.appa +++ b/MAKE/Makefile.appa @@ -48,7 +48,8 @@ FLAGS = #GNU flags: CC_BRAND = gcc CC_BRAND_VERSION = 5.4.0 -CXXFLAGS += -O3 -fopenmp -funroll-loops -std=c++11 -W -Wall -Wno-unused -fabi-version=0 -mavx2 +#CXXFLAGS += -O3 -fopenmp -funroll-loops -std=c++11 -W -Wall -Wno-unused -fabi-version=0 -mavx2 +CXXFLAGS += -g -fopenmp -funroll-loops -std=c++11 -W -Wall -Wno-unused -fabi-version=0 -mavx2 testpackage: CXXFLAGS = -O2 -fopenmp -funroll-loops -std=c++0x -fabi-version=0 -mavx MATHFLAGS = -ffast-math @@ -90,7 +91,7 @@ LIB_PAPI = -L$(LIBRARY_PREFIX)/papi/lib -lpapi INC_EIGEN = -I$(LIBRARY_PREFIX)/eigen/ INC_DCCRG = -I$(LIBRARY_PREFIX)/dccrg/ INC_VECTORCLASS = -I$(LIBRARY_PREFIX)/vectorclass - +INC_FSGRID = -I$(LIBRARY_PREFIX)/fsgrid diff --git a/Makefile b/Makefile index 35749972e..3bbdc52fa 100644 --- a/Makefile +++ b/Makefile @@ -169,9 +169,11 @@ DEPS_CPU_MOMENTS = ${DEPS_COMMON} ${DEPS_CELL} vlasovmover.h vlasovsolver/cpu_mo DEPS_CPU_TRANS_MAP = ${DEPS_COMMON} ${DEPS_CELL} grid.h vlasovsolver/vec.h vlasovsolver/cpu_trans_map.hpp vlasovsolver/cpu_trans_map.cpp +DEPS_CPU_TRANS_MAP_AMR = ${DEPS_COMMON} ${DEPS_CELL} grid.h vlasovsolver/vec.h vlasovsolver/cpu_trans_map.hpp vlasovsolver/cpu_trans_map.cpp vlasovsolver/cpu_trans_map_amr.hpp vlasovsolver/cpu_trans_map_amr.cpp + DEPS_VLSVMOVER = ${DEPS_CELL} vlasovsolver/vlasovmover.cpp vlasovsolver/cpu_acc_map.hpp vlasovsolver/cpu_acc_intersections.hpp \ vlasovsolver/cpu_acc_intersections.hpp vlasovsolver/cpu_acc_semilag.hpp vlasovsolver/cpu_acc_transform.hpp \ - vlasovsolver/cpu_moments.h vlasovsolver/cpu_trans_map.hpp + vlasovsolver/cpu_moments.h vlasovsolver/cpu_trans_map.hpp vlasovsolver/cpu_trans_map_amr.hpp DEPS_VLSVMOVER_AMR = ${DEPS_CELL} vlasovsolver_amr/vlasovmover.cpp vlasovsolver_amr/cpu_acc_map.hpp vlasovsolver_amr/cpu_acc_intersections.hpp \ vlasovsolver_amr/cpu_acc_intersections.hpp vlasovsolver_amr/cpu_acc_semilag.hpp vlasovsolver_amr/cpu_acc_transform.hpp \ @@ -199,7 +201,7 @@ ifeq ($(MESH),AMR) OBJS += cpu_moments.o else OBJS += cpu_acc_intersections.o cpu_acc_map.o cpu_acc_sort_blocks.o cpu_acc_load_blocks.o cpu_acc_semilag.o cpu_acc_transform.o \ - cpu_moments.o cpu_trans_map.o + cpu_moments.o cpu_trans_map.o cpu_trans_map_amr.o endif # Add field solver objects @@ -422,6 +424,9 @@ cpu_acc_transform.o: ${DEPS_CPU_ACC_TRANSFORM} cpu_trans_map.o: ${DEPS_CPU_TRANS_MAP} ${CMP} ${CXXFLAGS} ${FLAG_OPENMP} ${MATHFLAGS} ${FLAGS} -c vlasovsolver/cpu_trans_map.cpp ${INC_EIGEN} ${INC_DCCRG} ${INC_FSGRID} ${INC_PROFILE} ${INC_VECTORCLASS} ${INC_ZOLTAN} ${INC_VLSV} ${INC_BOOST} +cpu_trans_map_amr.o: ${DEPS_CPU_TRANS_MAP} + ${CMP} ${CXXFLAGS} ${FLAG_OPENMP} ${MATHFLAGS} ${FLAGS} -c vlasovsolver/cpu_trans_map_amr.cpp ${INC_EIGEN} ${INC_DCCRG} ${INC_FSGRID} ${INC_PROFILE} ${INC_VECTORCLASS} ${INC_ZOLTAN} ${INC_VLSV} ${INC_BOOST} + vlasovmover.o: ${DEPS_VLSVMOVER} ${CMP} ${CXXFLAGS} ${FLAG_OPENMP} ${MATHFLAGS} ${FLAGS} -c vlasovsolver/vlasovmover.cpp -I$(CURDIR) ${INC_BOOST} ${INC_EIGEN} ${INC_DCCRG} ${INC_FSGRID} ${INC_ZOLTAN} ${INC_PROFILE} ${INC_VECTORCLASS} ${INC_EIGEN} ${INC_VLSV} endif diff --git a/vlasovsolver/cpu_trans_map.cpp b/vlasovsolver/cpu_trans_map.cpp index 462f5f7d3..a459f6054 100644 --- a/vlasovsolver/cpu_trans_map.cpp +++ b/vlasovsolver/cpu_trans_map.cpp @@ -257,7 +257,7 @@ void compute_spatial_target_neighbors(const dccrg::Dccrg& mpiGrid, - const uint dimension,int direction,const uint popID); +void update_remote_mapping_contribution(dccrg::Dccrg& mpiGrid, + const uint dimension, + int direction, + const uint popID); + +void compute_spatial_source_neighbors(const dccrg::Dccrg& mpiGrid, + const CellID& cellID, + const uint dimension, + SpatialCell **neighbors); + +void compute_spatial_target_neighbors(const dccrg::Dccrg& mpiGrid, + const CellID& cellID, + const uint dimension, + SpatialCell **neighbors); +void copy_trans_block_data(SpatialCell** source_neighbors, + const vmesh::GlobalID blockGID, + Vec* values, + const unsigned char* const cellid_transpose, + const uint popID); + #endif diff --git a/vlasovsolver/cpu_trans_map_amr.cpp b/vlasovsolver/cpu_trans_map_amr.cpp index ad76ef643..abc7ce60a 100644 --- a/vlasovsolver/cpu_trans_map_amr.cpp +++ b/vlasovsolver/cpu_trans_map_amr.cpp @@ -1,16 +1,301 @@ //#include "vlasovsolver/cpu_1d_ppm_nonuniform.hpp" -#include "vlasovsolver/cpu_1d_ppm_nonuniform_conserving.hpp" +#include "cpu_1d_ppm_nonuniform_conserving.hpp" +#include "vec.h" +#include "../grid.h" +#include "../object_wrapper.h" +#include "cpu_trans_map_amr.hpp" +#include "cpu_trans_map.hpp" + +using namespace std; +using namespace spatial_cell; + +// void compute_spatial_source_neighbors(const dccrg::Dccrg& mpiGrid, +// const CellID& cellID,const uint dimension,SpatialCell **neighbors); +// void compute_spatial_target_neighbors(const dccrg::Dccrg& mpiGrid, +// const CellID& cellID,const uint dimension,SpatialCell **neighbors); +// void copy_trans_block_data(SpatialCell** source_neighbors,const vmesh::GlobalID blockGID, +// Vec* values,const unsigned char* const cellid_transpose,const uint popID); +// CellID get_spatial_neighbor(const dccrg::Dccrg& mpiGrid, +// const CellID& cellID,const bool include_first_boundary_layer, +// const int spatial_di,const int spatial_dj,const int spatial_dk); +// SpatialCell* get_spatial_neighbor_pointer(const dccrg::Dccrg& mpiGrid, +// const CellID& cellID,const bool include_first_boundary_layer, +// const int spatial_di,const int spatial_dj,const int spatial_dk); +// void store_trans_block_data(SpatialCell** target_neighbors,const vmesh::GlobalID blockGID, +// Vec* __restrict__ target_values, +// const unsigned char* const cellid_transpose,const uint popID); + +// // indices in padded source block, which is of type Vec with VECL +// // element sin each vector. b_k is the block index in z direction in +// // ordinary space [- VLASOV_STENCIL_WIDTH to VLASOV_STENCIL_WIDTH], +// // i,j,k are the cell ids inside on block (i in vector elements). +// // Vectors with same i,j,k coordinates, but in different spatial cells, are consequtive +// //#define i_trans_ps_blockv(j, k, b_k) ( (b_k + VLASOV_STENCIL_WIDTH ) + ( (((j) * WID + (k) * WID2)/VECL) * ( 1 + 2 * VLASOV_STENCIL_WIDTH) ) ) +// #define i_trans_ps_blockv(planeVectorIndex, planeIndex, blockIndex) ( (blockIndex) + VLASOV_STENCIL_WIDTH + ( (planeVectorIndex) + (planeIndex) * VEC_PER_PLANE ) * ( 1 + 2 * VLASOV_STENCIL_WIDTH) ) + +// // indices in padded target block, which is of type Vec with VECL +// // element sin each vector. b_k is the block index in z direction in +// // ordinary space, i,j,k are the cell ids inside on block (i in vector +// // elements). +// //#define i_trans_pt_blockv(j, k, b_k) ( ( (j) * WID + (k) * WID2 + ((b_k) + 1 ) * WID3) / VECL ) +// #define i_trans_pt_blockv(planeVectorIndex, planeIndex, blockIndex) ( planeVectorIndex + planeIndex * VEC_PER_PLANE + (blockIndex + 1) * VEC_PER_BLOCK) + +// //Is cell translated? It is not translated if DO_NO_COMPUTE or if it is sysboundary cell and not in first sysboundarylayer +// bool do_translate_cell(SpatialCell* SC){ +// if(SC->sysBoundaryFlag == sysboundarytype::DO_NOT_COMPUTE || +// (SC->sysBoundaryLayer != 1 && SC->sysBoundaryFlag != sysboundarytype::NOT_SYSBOUNDARY)) +// return false; +// else +// return true; +// } + +// /* +// * return INVALID_CELLID if the spatial neighbor does not exist, or if +// * it is a cell that is not computed. If the +// * include_first_boundary_layer flag is set, then also first boundary +// * layer is inlcuded (does not return INVALID_CELLID). +// * This does not use dccrg's get_neighbor_of function as it does not support computing neighbors for remote cells +// */ +// CellID get_spatial_neighbor(const dccrg::Dccrg& mpiGrid, +// const CellID& cellID, +// const bool include_first_boundary_layer, +// const int spatial_di, +// const int spatial_dj, +// const int spatial_dk ) { +// dccrg::Types<3>::indices_t indices_unsigned = mpiGrid.mapping.get_indices(cellID); +// int64_t indices[3]; +// dccrg::Grid_Length::type length = mpiGrid.mapping.length.get(); + +// //compute raw new indices +// indices[0] = spatial_di + indices_unsigned[0]; +// indices[1] = spatial_dj + indices_unsigned[1]; +// indices[2] = spatial_dk + indices_unsigned[2]; + +// //take periodicity into account +// for(uint i = 0; i<3; i++) { +// if(mpiGrid.topology.is_periodic(i)) { +// while(indices[i] < 0 ) +// indices[i] += length[i]; +// while(indices[i] >= length[i] ) +// indices[i] -= length[i]; +// } +// } +// //return INVALID_CELLID for cells outside system (non-periodic) +// for(uint i = 0; i<3; i++) { +// if(indices[i]< 0) +// return INVALID_CELLID; +// if(indices[i]>=length[i]) +// return INVALID_CELLID; +// } +// //store nbr indices into the correct datatype +// for(uint i = 0; i<3; i++) { +// indices_unsigned[i] = indices[i]; +// } +// //get nbrID +// CellID nbrID = mpiGrid.mapping.get_cell_from_indices(indices_unsigned,0); +// if (nbrID == dccrg::error_cell ) { +// std::cerr << __FILE__ << ":" << __LINE__ +// << " No neighbor for cell?" << cellID +// << " at offsets " << spatial_di << ", " << spatial_dj << ", " << spatial_dk +// << std::endl; +// abort(); +// } + +// // not existing cell or do not compute +// if( mpiGrid[nbrID]->sysBoundaryFlag == sysboundarytype::DO_NOT_COMPUTE) +// return INVALID_CELLID; + +// //cell on boundary, but not first layer and we want to include +// //first layer (e.g. when we compute source cells) +// if( include_first_boundary_layer && +// mpiGrid[nbrID]->sysBoundaryFlag != sysboundarytype::NOT_SYSBOUNDARY && +// mpiGrid[nbrID]->sysBoundaryLayer != 1 ) { +// return INVALID_CELLID; +// } + +// //cell on boundary, and we want none of the layers, +// //invalid.(e.g. when we compute targets) +// if( !include_first_boundary_layer && +// mpiGrid[nbrID]->sysBoundaryFlag != sysboundarytype::NOT_SYSBOUNDARY){ +// return INVALID_CELLID; +// } + +// return nbrID; //no AMR +// } + + +// /* +// * return NULL if the spatial neighbor does not exist, or if +// * it is a cell that is not computed. If the +// * include_first_boundary_layer flag is set, then also first boundary +// * layer is inlcuded (does not return INVALID_CELLID). +// * This does not use dccrg's get_neighbor_of function as it does not support computing neighbors for remote cells + + +// */ + +// SpatialCell* get_spatial_neighbor_pointer(const dccrg::Dccrg& mpiGrid, +// const CellID& cellID, +// const bool include_first_boundary_layer, +// const int spatial_di, +// const int spatial_dj, +// const int spatial_dk ) { +// CellID nbrID=get_spatial_neighbor(mpiGrid, cellID, include_first_boundary_layer, spatial_di, spatial_dj, spatial_dk); + +// if(nbrID!=INVALID_CELLID) +// return mpiGrid[nbrID]; +// else +// return NULL; +// } + +// /*compute spatial neighbors for source stencil with a size of 2* +// * VLASOV_STENCIL_WIDTH + 1, cellID at VLASOV_STENCIL_WIDTH. First +// * bondary layer included. Invalid cells are replaced by closest good +// * cells (i.e. boundary condition uses constant extrapolation for the +// * stencil values at boundaries*/ + +// void compute_spatial_source_neighbors(const dccrg::Dccrg& mpiGrid, +// const CellID& cellID, +// const uint dimension, +// SpatialCell **neighbors){ +// for(int i = -VLASOV_STENCIL_WIDTH; i <= VLASOV_STENCIL_WIDTH; i++){ +// switch (dimension){ +// case 0: +// neighbors[i + VLASOV_STENCIL_WIDTH] = get_spatial_neighbor_pointer(mpiGrid, cellID, true, i, 0, 0); +// break; +// case 1: +// neighbors[i + VLASOV_STENCIL_WIDTH] = get_spatial_neighbor_pointer(mpiGrid, cellID, true, 0, i, 0); +// break; +// case 2: +// neighbors[i + VLASOV_STENCIL_WIDTH] = get_spatial_neighbor_pointer(mpiGrid, cellID, true, 0, 0, i); +// break; +// } +// } + +// SpatialCell* last_good_cell = mpiGrid[cellID]; +// /*loop to neative side and replace all invalid cells with the closest good cell*/ +// for(int i = -1;i>=-VLASOV_STENCIL_WIDTH;i--){ +// if(neighbors[i + VLASOV_STENCIL_WIDTH] == NULL) +// neighbors[i + VLASOV_STENCIL_WIDTH] = last_good_cell; +// else +// last_good_cell = neighbors[i + VLASOV_STENCIL_WIDTH]; +// } + +// last_good_cell = mpiGrid[cellID]; +// /*loop to positive side and replace all invalid cells with the closest good cell*/ +// for(int i = 1; i <= VLASOV_STENCIL_WIDTH; i++){ +// if(neighbors[i + VLASOV_STENCIL_WIDTH] == NULL) +// neighbors[i + VLASOV_STENCIL_WIDTH] = last_good_cell; +// else +// last_good_cell = neighbors[i + VLASOV_STENCIL_WIDTH]; +// } +// } + +// /*compute spatial target neighbors, stencil has a size of 3. No boundary cells are included*/ +// void compute_spatial_target_neighbors(const dccrg::Dccrg& mpiGrid, +// const CellID& cellID, +// const uint dimension, +// SpatialCell **neighbors){ + +// for(int i = -1; i <= 1; i++){ +// switch (dimension){ +// case 0: +// neighbors[i + 1] = get_spatial_neighbor_pointer(mpiGrid, cellID, false, i, 0, 0); +// break; +// case 1: +// neighbors[i + 1] = get_spatial_neighbor_pointer(mpiGrid, cellID, false, 0, i, 0); +// break; +// case 2: +// neighbors[i + 1] = get_spatial_neighbor_pointer(mpiGrid, cellID, false, 0, 0, i); +// break; +// } +// } + +// } + +// /* Copy the data to the temporary values array, so that the +// * dimensions are correctly swapped. Also, copy the same block for +// * then neighboring spatial cells (in the dimension). neighbors +// * generated with compute_spatial_neighbors_wboundcond). +// * +// * This function must be thread-safe. +// * +// * @param source_neighbors Array containing the VLASOV_STENCIL_WIDTH closest +// * spatial neighbors of this cell in the propagated dimension. +// * @param blockGID Global ID of the velocity block. +// * @param values Vector where loaded data is stored. +// * @param cellid_transpose +// * @param popID ID of the particle species. +// */ +// inline void copy_trans_block_data( +// SpatialCell** source_neighbors, +// const vmesh::GlobalID blockGID, +// Vec* values, +// const unsigned char* const cellid_transpose, +// const uint popID) { + +// /*load pointers to blocks and prefetch them to L1*/ +// Realf* blockDatas[VLASOV_STENCIL_WIDTH * 2 + 1]; +// for (int b = -VLASOV_STENCIL_WIDTH; b <= VLASOV_STENCIL_WIDTH; ++b) { +// SpatialCell* srcCell = source_neighbors[b + VLASOV_STENCIL_WIDTH]; +// const vmesh::LocalID blockLID = srcCell->get_velocity_block_local_id(blockGID,popID); +// if (blockLID != srcCell->invalid_local_id()) { +// blockDatas[b + VLASOV_STENCIL_WIDTH] = srcCell->get_data(blockLID,popID); +// //prefetch storage pointers to L1 +// _mm_prefetch((char *)(blockDatas[b + VLASOV_STENCIL_WIDTH]), _MM_HINT_T0); +// _mm_prefetch((char *)(blockDatas[b + VLASOV_STENCIL_WIDTH]) + 64, _MM_HINT_T0); +// _mm_prefetch((char *)(blockDatas[b + VLASOV_STENCIL_WIDTH]) + 128, _MM_HINT_T0); +// _mm_prefetch((char *)(blockDatas[b + VLASOV_STENCIL_WIDTH]) + 192, _MM_HINT_T0); +// if(VPREC == 8) { +// //prefetch storage pointers to L1 +// _mm_prefetch((char *)(blockDatas[b + VLASOV_STENCIL_WIDTH]) + 256, _MM_HINT_T0); +// _mm_prefetch((char *)(blockDatas[b + VLASOV_STENCIL_WIDTH]) + 320, _MM_HINT_T0); +// _mm_prefetch((char *)(blockDatas[b + VLASOV_STENCIL_WIDTH]) + 384, _MM_HINT_T0); +// _mm_prefetch((char *)(blockDatas[b + VLASOV_STENCIL_WIDTH]) + 448, _MM_HINT_T0); +// } +// } +// else{ +// blockDatas[b + VLASOV_STENCIL_WIDTH] = NULL; +// } +// } + +// // Copy volume averages of this block from all spatial cells: +// for (int b = -VLASOV_STENCIL_WIDTH; b <= VLASOV_STENCIL_WIDTH; ++b) { +// if(blockDatas[b + VLASOV_STENCIL_WIDTH] != NULL) { +// Realv blockValues[WID3]; +// const Realf* block_data = blockDatas[b + VLASOV_STENCIL_WIDTH]; +// // Copy data to a temporary array and transpose values so that mapping is along k direction. +// // spatial source_neighbors already taken care of when +// // creating source_neighbors table. If a normal spatial cell does not +// // simply have the block, its value will be its null_block which +// // is fine. This null_block has a value of zero in data, and that +// // is thus the velocity space boundary +// for (uint i=0; i get_mpi_datatype() - { - return std::make_tuple(this, 0, MPI_BYTE); - } - -}; struct setOfPencils { @@ -25,7 +310,7 @@ struct setOfPencils { sumOfLengths = 0; } - void addPencil(std::vector idsIn, Real xIn, Real yIn, vector zIn) { + void addPencil(std::vector idsIn, Real xIn, Real yIn) { N += 1; sumOfLengths += idsIn.size(); @@ -33,14 +318,15 @@ struct setOfPencils { ids.insert(ids.end(),idsIn.begin(),idsIn.end()); x.push_back(xIn); y.push_back(yIn); - z.insert(z.end(),zIn.begin(),zIn.end()); } std::vector getIds(uint pencilId) { + vector idsOut; + if (pencilId > N) { - return; + return idsOut; } CellID ibeg = 0; @@ -48,8 +334,6 @@ struct setOfPencils { ibeg += lengthOfPencils[i]; } CellID iend = ibeg + lengthOfPencils[pencilId]; - - vector idsOut; for (uint i = ibeg; i <= iend; i++) { idsOut.push_back(ids[i]); @@ -60,7 +344,7 @@ struct setOfPencils { }; -CellID selectNeighbor(dccrg::Dccrg &grid, CellID id, int dimension = 0, uint path = 0) { +CellID selectNeighbor(dccrg::Dccrg &grid, CellID id, int dimension = 0, uint path = 0) { const auto neighbors = grid.get_face_neighbors_of(id); @@ -102,7 +386,7 @@ CellID selectNeighbor(dccrg::Dccrg &grid, CellID id, int dimension = } -setOfPencils buildPencilsWithNeighbors( dccrg::Dccrg &grid, +setOfPencils buildPencilsWithNeighbors( dccrg::Dccrg grid, setOfPencils &pencils, CellID startingId, vector ids, uint dimension, vector path) { @@ -236,136 +520,135 @@ setOfPencils buildPencilsWithNeighbors( dccrg::Dccrg &grid, // Get the x,y - coordinates of the pencil (in the direction perpendicular to the pencil) const auto coordinates = grid.get_center(ids[0]); double x,y; - uint ix,iy,iz - switch(dimension) { - case 0: { - ix = 1; - iy = 2; - iz = 0; - break; - } - case 1: { - ix = 2; - iy = 0; - iz = 1; - break; - } - case 2: { - ix = 0; - iy = 1; - iz = 2; - break; - } - default: { - ix = 0; - iy = 1; - iz = 2; - break; - } - } - + uint ix,iy,iz; + + switch(dimension) { + case 0: { + ix = 1; + iy = 2; + iz = 0; + break; + } + case 1: { + ix = 2; + iy = 0; + iz = 1; + break; + } + case 2: { + ix = 0; + iy = 1; + iz = 2; + break; + } + default: { + ix = 0; + iy = 1; + iz = 2; + break; + } + } + x = coordinates[ix]; y = coordinates[iy]; - + pencils.addPencil(ids,x,y); return pencils; } -void propagatePencil(Vec dr[], Vec values[], Vec z_translation, uint blocks_per_dim ) { +//void propagatePencil(Vec dr[], Vec values, Vec z_translation, uint blocks_per_dim ) { +void propagatePencil(Vec dr[], Vec values[], Vec z_translation, uint lengthOfPencil, uint nSourceNeighborsPerCell) { + // Assuming 1 neighbor in the target array because of the CFL condition + // In fact propagating to > 1 neighbor will give an error + const uint nTargetNeighborsPerCell = 1; + // Determine direction of translation // part of density goes here (cell index change along spatial direcion) - const int target_scell_index = (z_translation > 0) ? 1: -1; + Vecb positiveTranslationDirection = (z_translation > Vec(0.0)); + //Veci target_scell_index = truncate_to_int(select(z_translation > Vec(0.0), 1, -1)); // Vector buffer where we write data, initialized to 0*/ - Vec targetValues[(blocks_per_dim + 2) * WID]; + Vec targetValues[lengthOfPencil + 2 * nTargetNeighborsPerCell]; - for (uint k_block = 0; k_block < blocks_per_dim; k_block++){ - - for (uint k_cell=0; k_cell < WID; ++k_cell) { - - uint gid = k_block * WID + k_cell + WID; - // init target_values - targetValues[gid] = 0.0; + for (uint i_target = 0; i_target < lengthOfPencil + nTargetNeighborsPerCell; i_target++) { - } - } - for (uint k_block = 0; k_block < blocks_per_dim; k_block++){ - - for (uint k_cell=0; k_cell < WID; ++k_cell){ - - uint gid = k_block * WID + k_cell + WID; - //uint gid = (blocks_per_dim + 2) * WID - (k_block * WID + k_cell + WID); + // init target_values + targetValues[i_target] = 0.0; - // Calculate normalized coordinates in current cell. - // The coordinates (scaled units from 0 to 1) between which we will - // integrate to put mass in the target neighboring cell. - // Normalize the coordinates to the origin cell. Then we scale with the difference - // in volume between target and origin later when adding the integrated value. - Realv z_1,z_2; - if ( z_translation < 0 ) { - z_1 = 0; - z_2 = -z_translation / dr[gid]; - } else { - z_1 = 1.0 - z_translation / dr[gid]; - z_2 = 1.0; - } - - if( abs(z_1) > 1.0 || abs(z_2) > 1.0 ) { - std::cout << "Error, CFL condition violated\n"; - std::cout << "Exiting\n"; - std::exit(1); - } + } + // Go from 0 to length here to propagate all the cells in the pencil + for (uint i = 0; i < lengthOfPencil; i++){ + + // We padded the target array by 1 cell on both sides + // Assume the source array has been padded by nSourceNeighborsPerCell + // To have room for propagation. Refer to dr and values by i_cell + // and targetValues by i_target + uint i_cell = i + nSourceNeighborsPerCell; + uint i_target = i + nTargetNeighborsPerCell; - // Compute polynomial coefficients - Vec a[3]; - //compute_ppm_coeff_nonuniform(dr, values, h4, gid + target_scell_index, a); - compute_ppm_coeff_nonuniform(dr, values, h4, gid, a); - - // Compute integral - const Vec ngbr_target_density = - z_2 * ( a[0] + z_2 * ( a[1] + z_2 * a[2] ) ) - - z_1 * ( a[0] + z_1 * ( a[1] + z_1 * a[2] ) ); - - // Store mapped density in two target cells - // in the neighbor cell we will put this density - targetValues[gid + target_scell_index] += ngbr_target_density * dr[gid] / dr[gid + target_scell_index]; - // in the current original cells we will put the rest of the original density - targetValues[gid] += values[gid] - ngbr_target_density; + // Calculate normalized coordinates in current cell. + // The coordinates (scaled units from 0 to 1) between which we will + // integrate to put mass in the target neighboring cell. + // Normalize the coordinates to the origin cell. Then we scale with the difference + // in volume between target and origin later when adding the integrated value. + Vec z_1,z_2; + z_1 = select(positiveTranslationDirection, 1.0 - z_translation / dr[i_cell], 0.0); + z_2 = select(positiveTranslationDirection, 1.0, - z_translation / dr[i_cell]); + + if( horizontal_or(abs(z_1) > Vec(1.0)) || horizontal_or(abs(z_2) > Vec(1.0)) ) { + std::cout << "Error, CFL condition violated\n"; + std::cout << "Exiting\n"; + std::exit(1); } + + // Compute polynomial coefficients + Vec a[3]; + compute_ppm_coeff_nonuniform(dr, values, h4, i_cell, a); + + // Compute integral + const Vec ngbr_target_density = + z_2 * ( a[0] + z_2 * ( a[1] + z_2 * a[2] ) ) - + z_1 * ( a[0] + z_1 * ( a[1] + z_1 * a[2] ) ); + + // Store mapped density in two target cells + // in the neighbor cell we will put this density + //targetValues[i_cell + target_scell_index] += ngbr_target_density * dr[i_cell] / dr[i_cell + target_scell_index]; + targetValues[i_target + 1] += select( positiveTranslationDirection,ngbr_target_density * dr[i_cell] / dr[i_cell + 1],Vec(0.0)); + targetValues[i_target - 1] += select(!positiveTranslationDirection,ngbr_target_density * dr[i_cell] / dr[i_cell - 1],Vec(0.0)); + // in the current original cells we will put the rest of the original density + targetValues[i_target] += values[i_cell] - ngbr_target_density; } // Store target data into source data - for (uint k_block = 0; k_block& mpiGrid, +bool trans_map_1d_amr(const dccrg::Dccrg& mpiGrid, const vector& localPropagatedCells, const vector& remoteTargetCells, const uint dimension, const Realv dt, const uint popID) { - vector dz; /*< cell size in the dimension of the pencil */ Realv dvz,vz_min; uint cell_indices_to_id[3]; /*< used when computing id of target cell in block*/ unsigned char cellid_transpose[WID3]; /*< defines the transpose for the solver internal (transposed) id: i + j*WID + k*WID2 to actual one*/ - + const uint blocks_per_dim = 1; + // return if there's no cells to propagate - if(localPropagatedCells.size() == 0) + if(localPropagatedCells.size() == 0) + std::cout << "Returning because of no cells" << std::endl; return true; // Vector with all cell ids @@ -378,11 +661,16 @@ bool trans_map_1d(const dccrg::Dccrg& mpi std::vector allCellsPointer(allCells.size()); std::vector sourceNeighbors(localPropagatedCells.size() * nSourceNeighborsPerCell); std::vector targetNeighbors(3 * localPropagatedCells.size() ); - + + Vec allCellsDz[allCells.size()]; + // Initialize allCellsPointer #pragma omp parallel for for(uint celli = 0; celli < allCells.size(); celli++){ allCellsPointer[celli] = mpiGrid[allCells[celli]]; + + // At the same time, calculate dz's and store them in an array. + allCellsDz[celli] = P::dz_ini / pow(2.0, mpiGrid.get_refinement_level(celli)); } // **************************************************************************** @@ -390,13 +678,14 @@ bool trans_map_1d(const dccrg::Dccrg& mpi // compute pencils => set of pencils (shared datastructure) vector seedIds; -#pragma omp parallel for - for(auto celli: localPropagatedCells){ + //#pragma omp parallel for + for(uint celli = 0; celli < localPropagatedCells.size(); celli++){ + CellID localCelli = localPropagatedCells[celli]; // Collect a list of cell ids that do not have a neighbor in the negative direction // These are the seed ids for the pencils. vector negativeNeighbors; // Returns all neighbors as (id, direction-dimension) pairs. - for ( const auto neighbor : grid.get_face_neighbors_of(allCellsPointer[celli]) ) { + for ( const auto neighbor : mpiGrid.get_face_neighbors_of(localCelli ) ) { // select the neighbor in the negative dimension of the propagation if (neighbor.second == - (dimension + 1)) @@ -406,7 +695,7 @@ bool trans_map_1d(const dccrg::Dccrg& mpi } // if no neighbors were found in the negative direction, add this cell id to the seed cells if (negativeNeighbors.size() == 0) - seedIds.push_back(celli); + seedIds.push_back(localCelli); } // Empty vectors for internal use of buildPencilsWithNeighbors. Could be default values but @@ -503,7 +792,7 @@ bool trans_map_1d(const dccrg::Dccrg& mpi int t1 = phiprof::initializeTimer("mappingAndStore"); -#pragma omp parallel +#pragma omp parallel { //std::vector targetsValid(localPropagatedCells.size()); //std::vector allCellsBlockLocalID(allCells.size()); @@ -526,12 +815,12 @@ bool trans_map_1d(const dccrg::Dccrg& mpi // This loop only has one iteration for now for ( auto pencils: pencilSets ) { - // Allocate targetdata sum(lengths of pencils)*WID3) + // Allocate targetdata sum(lengths of pencils)*WID3) Vec targetData[pencils.sumOfLengths * WID3]; // Initialize targetdata to 0 - for( uint i = 0; i& mpi for(uint pencili = 0; pencili < pencils.N; pencili++){ // Allocate source data: sourcedata pencilIds = pencils.getIds(pencili); + for( auto celli: pencilIds) { compute_spatial_source_neighbors(mpiGrid, localPropagatedCells[celli], dimension, sourceNeighbors.data() - + celli * nSourceNeighborsPerCell); - - // load data(=> sourcedata) / (proper xy reconstruction in future) - // copied from regular code, should work? + + celli * nSourceNeighborsPerCell); + } - // TODO: Does the index of sourceData need adjustments for vector length? - copy_trans_block_data(sourceNeighbors.data() + celli * nSourceNeighborsPerCell, - blockGID, sourceData[celli], cellid_transpose, popID); - - // At the same time, calculate dz's and store them in a vector. - dz.push_back(dz_ini / 2.0 ** mpiGrid.get_refinement_level(celli)); + Vec * dzPointer = allCellsDz + pencilIds[0]; + + // load data(=> sourcedata) / (proper xy reconstruction in future) + // copied from regular code, should work? + int offset = 0; // TODO: Figure out what needs to go here. + copy_trans_block_data(sourceNeighbors.data() + offset, + blockGID, sourceData, cellid_transpose, popID); - } - - // Calculate cell centered velocity - const Realv cell_vz = (block_indices[dimension] * WID + k + 0.5) * dvz + vz_min; + + // Calculate cell centered velocity for each v cell in the block + const Vec k = (0,1,2,3); + const Vec cell_vz = (block_indices[dimension] * WID + k + 0.5) * dvz + vz_min; - const Realv z_translation = dt * cell_vz / dz[celli]; + const Vec z_translation = dt * cell_vz; // propagate pencil(blockid = velocities, pencil-ids = dzs ), - propagatePencil(dz, sourceData, z_translation, blocks_per_dim); + propagatePencil(dzPointer, sourceData, z_translation, pencils.lengthOfPencils[pencili], nSourceNeighborsPerCell); // sourcedata => targetdata[this pencil]) for (auto value: sourceData) { targetData[targetDataIndex] = value; - targetDataindex++; + targetDataIndex++; } // dealloc source data -- Should be automatic since it's declared in this iteration? @@ -618,10 +907,11 @@ bool trans_map_1d(const dccrg::Dccrg& mpi } // Pointer to the data field of the velocity block Realf* blockData = spatial_cell->get_data(blockLID, popID); + // Unpack the vector data to the cell data types for(int i = 0; i < WID3 ; i++) { // Write data into target block - blockData[i] += targetData[(celli * 3 + ti) * WID3 + i]; + blockData[i] += targetData[(celli * 3 + ti)][i]; } } //} From 522e2a9a58b5925de559d4a1fb204639efc6ad0f Mon Sep 17 00:00:00 2001 From: tkoskela Date: Mon, 20 Aug 2018 11:39:38 +0300 Subject: [PATCH 041/602] Added header file for cpu_trans_map_amr --- vlasovsolver/cpu_trans_map_amr.hpp | 40 ++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) create mode 100644 vlasovsolver/cpu_trans_map_amr.hpp diff --git a/vlasovsolver/cpu_trans_map_amr.hpp b/vlasovsolver/cpu_trans_map_amr.hpp new file mode 100644 index 000000000..0b5b02b1b --- /dev/null +++ b/vlasovsolver/cpu_trans_map_amr.hpp @@ -0,0 +1,40 @@ +/* + * This file is part of Vlasiator. + * Copyright 2010-2016 Finnish Meteorological Institute + * + * For details of usage, see the COPYING file and read the "Rules of the Road" + * at http://www.physics.helsinki.fi/vlasiator/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ +#ifndef CPU_TRANS_MAP_AMR_H +#define CPU_TRANS_MAP_AMR_H + +#include + +#include "vec.h" +#include "../common.h" +#include "../spatial_cell.hpp" +//bool do_translate_cell(spatial_cell::SpatialCell* SC); +bool trans_map_1d_amr(const dccrg::Dccrg& mpiGrid, + const std::vector& localPropagatedCells, + const std::vector& remoteTargetCells, + const uint dimension, + const Realv dt, + const uint popID); +//void update_remote_mapping_contribution(dccrg::Dccrg& mpiGrid, +// const uint dimension,int direction,const uint popID); + +#endif From a2c4e3e318b31fb3e2b8d4db44a0565fbb7ab438 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Mon, 20 Aug 2018 14:54:38 +0300 Subject: [PATCH 042/602] Fixed include guard --- vlasovsolver/cpu_1d_ppm_nonuniform.hpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/vlasovsolver/cpu_1d_ppm_nonuniform.hpp b/vlasovsolver/cpu_1d_ppm_nonuniform.hpp index 35639a69b..ae613bcfe 100644 --- a/vlasovsolver/cpu_1d_ppm_nonuniform.hpp +++ b/vlasovsolver/cpu_1d_ppm_nonuniform.hpp @@ -20,8 +20,8 @@ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ -#ifndef CPU_1D_PPM_H -#define CPU_1D_PPM_H +#ifndef CPU_1D_PPM_NU_H +#define CPU_1D_PPM_NU_H #include #include "vec.h" From ce31e56f004e605ba0574092c711bf2d8c3b1a5c Mon Sep 17 00:00:00 2001 From: tkoskela Date: Tue, 21 Aug 2018 12:16:02 +0300 Subject: [PATCH 043/602] Moved amr functions to cpu_trans_map.cpp to get the code to execute. Might want to look at this later. --- Makefile | 2 +- vlasovsolver/cpu_trans_map.cpp | 682 +++++++++++++++++++++++++++++ vlasovsolver/cpu_trans_map.hpp | 8 + vlasovsolver/cpu_trans_map_amr.cpp | 16 +- vlasovsolver/vlasovmover.cpp | 6 +- 5 files changed, 706 insertions(+), 8 deletions(-) diff --git a/Makefile b/Makefile index 3bbdc52fa..0a2ce10b8 100644 --- a/Makefile +++ b/Makefile @@ -201,7 +201,7 @@ ifeq ($(MESH),AMR) OBJS += cpu_moments.o else OBJS += cpu_acc_intersections.o cpu_acc_map.o cpu_acc_sort_blocks.o cpu_acc_load_blocks.o cpu_acc_semilag.o cpu_acc_transform.o \ - cpu_moments.o cpu_trans_map.o cpu_trans_map_amr.o + cpu_moments.o cpu_trans_map.o #cpu_trans_map_amr.o endif # Add field solver objects diff --git a/vlasovsolver/cpu_trans_map.cpp b/vlasovsolver/cpu_trans_map.cpp index a459f6054..e19415a22 100644 --- a/vlasovsolver/cpu_trans_map.cpp +++ b/vlasovsolver/cpu_trans_map.cpp @@ -33,6 +33,7 @@ #include "vec.h" #include "cpu_1d_plm.hpp" #include "cpu_1d_ppm.hpp" +#include "cpu_1d_ppm_nonuniform.hpp" #include "cpu_1d_pqm.hpp" #include "cpu_trans_map.hpp" @@ -783,3 +784,684 @@ void update_remote_mapping_contribution( aligned_free(receiveBuffers[c]); } } + + + + + + + + + + + +struct setOfPencils { + + uint N; // Number of pencils in the set + uint sumOfLengths; + std::vector lengthOfPencils; // Lengths of pencils + std::vector ids; // List of cells + std::vector x,y; // x,y - position + + setOfPencils() { + N = 0; + sumOfLengths = 0; + } + + void addPencil(std::vector idsIn, Real xIn, Real yIn) { + + N += 1; + sumOfLengths += idsIn.size(); + lengthOfPencils.push_back(idsIn.size()); + ids.insert(ids.end(),idsIn.begin(),idsIn.end()); + x.push_back(xIn); + y.push_back(yIn); + + } + + std::vector getIds(uint pencilId) { + + vector idsOut; + + if (pencilId > N) { + return idsOut; + } + + CellID ibeg = 0; + for (uint i = 0; i < pencilId; i++) { + ibeg += lengthOfPencils[i]; + } + CellID iend = ibeg + lengthOfPencils[pencilId]; + + for (uint i = ibeg; i <= iend; i++) { + idsOut.push_back(ids[i]); + } + + return idsOut; + } + +}; + +CellID selectNeighbor(dccrg::Dccrg &grid, CellID id, int dimension = 0, uint path = 0) { + + const auto neighbors = grid.get_face_neighbors_of(id); + const int myProcess = grid.get_process(id); + + vector < CellID > myNeighbors; + // Collect neighbor ids in the positive direction of the chosen dimension, + // that are on the same process as the origin. + // Note that dimension indexing starts from 1 (of course it does) + for (const auto cell : neighbors) { + if (cell.second == dimension + 1 && grid.get_process(cell.first) == myProcess) + myNeighbors.push_back(cell.first); + } + + CellID neighbor; + + switch( myNeighbors.size() ) { + // Since refinement can only increase by 1 level the only possibilities + // Should be 0 neighbors, 1 neighbor or 4 neighbors. + case 0 : { + // did not find neighbors + neighbor = INVALID_CELLID; + break; + } + case 1 : { + neighbor = myNeighbors[0]; + break; + } + case 4 : { + neighbor = myNeighbors[path]; + break; + } + default: { + // something is wrong + neighbor = INVALID_CELLID; + throw "Invalid neighbor count!"; + break; + } + } + + return neighbor; + +} + +setOfPencils buildPencilsWithNeighbors( dccrg::Dccrg grid, + setOfPencils &pencils, CellID startingId, + vector ids, uint dimension, + vector path) { + + const bool debug = false; + CellID nextNeighbor; + uint id = startingId; + uint startingRefLvl = grid.get_refinement_level(id); + + if( ids.size() == 0 ) + ids.push_back(startingId); + + // If the cell where we start is refined, we need to figure out which path + // to follow in future refined cells. This is a bit hacky but we have to + // use the order or the children of the parent cell to figure out which + // corner we are in. + // Maybe you could use physical coordinates here? + if( startingRefLvl > path.size() ) { + for ( uint i = path.size(); i < startingRefLvl; i++) { + auto parent = grid.get_parent(id); + auto children = grid.get_all_children(parent); + auto it = std::find(children.begin(),children.end(),id); + auto index = std::distance(children.begin(),it); + auto index2 = index; + + switch( dimension ) { + case 0: { + index2 = index / 2; + break; + } + case 1: { + index2 = index - index / 2; + break; + } + case 2: { + index2 = index % 4; + break; + } + } + path.insert(path.begin(),index2); + id = parent; + } + } + + id = startingId; + + while (id > 0) { + + // Find the refinement level in the neighboring cell. Any neighbor will do + // since refinement level can only increase by 1 between neighbors. + nextNeighbor = selectNeighbor(grid,id,dimension); + + // If there are no neighbors, we can stop. + if (nextNeighbor == 0) + break; + + uint refLvl = grid.get_refinement_level(nextNeighbor); + + if (refLvl > 0) { + + // If we have encountered this refinement level before and stored + // the path this builder follows, we will just take the same path + // again. + if ( path.size() >= refLvl ) { + + if(debug) { + std::cout << "I am cell " << id << ". "; + std::cout << "I have seen refinement level " << refLvl << " before. Path is "; + for (auto k = path.begin(); k != path.end(); ++k) + std::cout << *k << " "; + std::cout << std::endl; + } + + nextNeighbor = selectNeighbor(grid,id,dimension,path[refLvl-1]); + + } else { + + if(debug) { + std::cout << "I am cell " << id << ". "; + std::cout << "I have NOT seen refinement level " << refLvl << " before. Path is "; + for (auto k = path.begin(); k != path.end(); ++k) + std::cout << *k << ' '; + std::cout << std::endl; + } + + // New refinement level, create a path through each neighbor cell + for ( uint i : {0,1,2,3} ) { + + vector < uint > myPath = path; + myPath.push_back(i); + + nextNeighbor = selectNeighbor(grid,id,dimension,myPath.back()); + + if ( i == 3 ) { + + // This builder continues with neighbor 3 + ids.push_back(nextNeighbor); + path = myPath; + + } else { + + // Spawn new builders for neighbors 0,1,2 + buildPencilsWithNeighbors(grid,pencils,id,ids,dimension,myPath); + + } + + } + + } + + } else { + if(debug) { + std::cout << "I am cell " << id << ". "; + std::cout << " I am on refinement level 0." << std::endl; + } + }// Closes if (refLvl == 0) + + // If we found a neighbor, add it to the list of ids for this pencil. + if(nextNeighbor != INVALID_CELLID) { + if (debug) { + std::cout << " Next neighbor is " << nextNeighbor << "." << std::endl; + } + ids.push_back(nextNeighbor); + } + + // Move to the next cell. + id = nextNeighbor; + + } // Closes while loop + + // Get the x,y - coordinates of the pencil (in the direction perpendicular to the pencil) + const auto coordinates = grid.get_center(ids[0]); + double x,y; + uint ix,iy,iz; + + switch(dimension) { + case 0: { + ix = 1; + iy = 2; + iz = 0; + break; + } + case 1: { + ix = 2; + iy = 0; + iz = 1; + break; + } + case 2: { + ix = 0; + iy = 1; + iz = 2; + break; + } + default: { + ix = 0; + iy = 1; + iz = 2; + break; + } + } + + x = coordinates[ix]; + y = coordinates[iy]; + + pencils.addPencil(ids,x,y); + return pencils; + +} + +//void propagatePencil(Vec dr[], Vec values, Vec z_translation, uint blocks_per_dim ) { +void propagatePencil(Vec dr[], Vec values[], Vec z_translation, uint lengthOfPencil, uint nSourceNeighborsPerCell) { + + // Assuming 1 neighbor in the target array because of the CFL condition + // In fact propagating to > 1 neighbor will give an error + const uint nTargetNeighborsPerCell = 1; + + // Determine direction of translation + // part of density goes here (cell index change along spatial direcion) + Vecb positiveTranslationDirection = (z_translation > Vec(0.0)); + //Veci target_scell_index = truncate_to_int(select(z_translation > Vec(0.0), 1, -1)); + + // Vector buffer where we write data, initialized to 0*/ + Vec targetValues[lengthOfPencil + 2 * nTargetNeighborsPerCell]; + + for (uint i_target = 0; i_target < lengthOfPencil + nTargetNeighborsPerCell; i_target++) { + + // init target_values + targetValues[i_target] = 0.0; + + } + // Go from 0 to length here to propagate all the cells in the pencil + for (uint i = 0; i < lengthOfPencil; i++){ + + // We padded the target array by 1 cell on both sides + // Assume the source array has been padded by nSourceNeighborsPerCell + // To have room for propagation. Refer to dr and values by i_cell + // and targetValues by i_target + uint i_cell = i + nSourceNeighborsPerCell; + uint i_target = i + nTargetNeighborsPerCell; + + // Calculate normalized coordinates in current cell. + // The coordinates (scaled units from 0 to 1) between which we will + // integrate to put mass in the target neighboring cell. + // Normalize the coordinates to the origin cell. Then we scale with the difference + // in volume between target and origin later when adding the integrated value. + Vec z_1,z_2; + z_1 = select(positiveTranslationDirection, 1.0 - z_translation / dr[i_cell], 0.0); + z_2 = select(positiveTranslationDirection, 1.0, - z_translation / dr[i_cell]); + + if( horizontal_or(abs(z_1) > Vec(1.0)) || horizontal_or(abs(z_2) > Vec(1.0)) ) { + std::cout << "Error, CFL condition violated\n"; + std::cout << "Exiting\n"; + std::exit(1); + } + + // Compute polynomial coefficients + Vec a[3]; + compute_ppm_coeff_nonuniform(dr, values, h4, i_cell, a); + + // Compute integral + const Vec ngbr_target_density = + z_2 * ( a[0] + z_2 * ( a[1] + z_2 * a[2] ) ) - + z_1 * ( a[0] + z_1 * ( a[1] + z_1 * a[2] ) ); + + // Store mapped density in two target cells + // in the neighbor cell we will put this density + //targetValues[i_cell + target_scell_index] += ngbr_target_density * dr[i_cell] / dr[i_cell + target_scell_index]; + targetValues[i_target + 1] += select( positiveTranslationDirection,ngbr_target_density * dr[i_cell] / dr[i_cell + 1],Vec(0.0)); + targetValues[i_target - 1] += select(!positiveTranslationDirection,ngbr_target_density * dr[i_cell] / dr[i_cell - 1],Vec(0.0)); + // in the current original cells we will put the rest of the original density + targetValues[i_target] += values[i_cell] - ngbr_target_density; + } + + // Store target data into source data + for (uint i=0; i < lengthOfPencil; i++){ + + uint i_cell = i + nSourceNeighborsPerCell; + uint i_target = i + nTargetNeighborsPerCell; + + values[i_cell] = targetValues[i_target]; + + } + +} + + +bool trans_map_1d_amr(const dccrg::Dccrg& mpiGrid, + const vector& localPropagatedCells, + const vector& remoteTargetCells, + const uint dimension, + const Realv dt, + const uint popID) { + + Realv dvz,vz_min; + uint cell_indices_to_id[3]; /*< used when computing id of target cell in block*/ + unsigned char cellid_transpose[WID3]; /*< defines the transpose for the solver internal (transposed) id: i + j*WID + k*WID2 to actual one*/ + const uint blocks_per_dim = 1; + + // cout << "entering trans_map_1d_amr" << endl; + + // return if there's no cells to propagate + if(localPropagatedCells.size() == 0) { + cout << "Returning because of no cells" << endl; + return false; + } + + // Vector with all cell ids + vector allCells(localPropagatedCells); + allCells.insert(allCells.end(), remoteTargetCells.begin(), remoteTargetCells.end()); + + const uint nSourceNeighborsPerCell = 1 + 2 * VLASOV_STENCIL_WIDTH; + + // Vectors of pointers to the cell structs + std::vector allCellsPointer(allCells.size()); + std::vector sourceNeighbors(localPropagatedCells.size() * nSourceNeighborsPerCell); + std::vector targetNeighbors(3 * localPropagatedCells.size() ); + + Vec allCellsDz[allCells.size()]; + + // Initialize allCellsPointer + //#pragma omp parallel for + //cout << "list of cell ids: "; + for(uint celli = 0; celli < allCells.size(); celli++){ + //cout << allCells[celli] << " "; + allCellsPointer[celli] = mpiGrid[allCells[celli]]; + + // At the same time, calculate dz's and store them in an array. + allCellsDz[celli] = P::dz_ini / pow(2.0, mpiGrid.get_refinement_level(celli)); + } + //cout << endl; + + // **************************************************************************** + + // compute pencils => set of pencils (shared datastructure) + vector seedIds; + + //cout << "localpropagatedcells.size() " << localPropagatedCells.size() << endl; + //cout << "dimension " << dimension << endl; + + //#pragma omp parallel for + for(uint celli = 0; celli < localPropagatedCells.size(); ++celli){ + CellID localCelli = localPropagatedCells[celli]; + int myProcess = mpiGrid.get_process(localCelli); + // Collect a list of cell ids that do not have a neighbor in the negative direction + // These are the seed ids for the pencils. + vector negativeNeighbors; + // Returns all neighbors as (id, direction-dimension) pairs. + //cout << "neighbors of cell " << localCelli << " are "; + for ( const auto neighbor : mpiGrid.get_face_neighbors_of(localCelli ) ) { + + if ( mpiGrid.get_process(neighbor.first) == myProcess ) { + //cout << neighbor.first << "," << neighbor.second << " "; + // select the neighbor in the negative dimension of the propagation + if (neighbor.second == - (dimension + 1)) { + + // add the id of the neighbor to a list if it's on the same process + negativeNeighbors.push_back(neighbor.first); + + } + + } + } + //cout << endl; + // if no neighbors were found in the negative direction, add this cell id to the seed cells + if (negativeNeighbors.size() == 0) + seedIds.push_back(localCelli); + } + + // Empty vectors for internal use of buildPencilsWithNeighbors. Could be default values but + // default vectors are complicated. Should overload buildPencilsWithNeighbors like suggested here + // https://stackoverflow.com/questions/3147274/c-default-argument-for-vectorint + vector ids; + vector path; + + // Output vectors for ready pencils + setOfPencils pencils; + vector pencilSets; + + //cout << "Seed ids are: "; + for (const auto seedId : seedIds) { + //cout << seedId << " "; + // Construct pencils from the seedIds into a set of pencils. + pencils = buildPencilsWithNeighbors(mpiGrid, pencils, seedId, ids, dimension, path); + } + // cout << endl; + //cout << "Number of seed ids is " << seedIds.size() << endl; + + uint ibeg = 0; + uint iend = 0; + std::cout << "I have created " << pencils.N << " pencils along dimension " << dimension << ":\n"; + std::cout << "(x, y): indices " << std::endl; + std::cout << "-----------------------------------------------------------------" << std::endl; + for (uint i = 0; i < pencils.N; i++) { + iend += pencils.lengthOfPencils[i]; + std::cout << "(" << pencils.x[i] << ", " << pencils.y[i] << "): "; + for (auto j = pencils.ids.begin() + ibeg; j != pencils.ids.begin() + iend; ++j) { + std::cout << *j << " "; + } + ibeg = iend; + std::cout << std::endl; + } + + + // Add the final set of pencils to the pencilSets - vector. + // Only one set is created for now but we retain support for multiple sets + pencilSets.push_back(pencils); + // **************************************************************************** + + // Fiddle indices x,y,z + switch (dimension) { + case 0: + // set values in array that is used to convert block indices + // to global ID using a dot product. + cell_indices_to_id[0]=WID2; + cell_indices_to_id[1]=WID; + cell_indices_to_id[2]=1; + break; + case 1: + // set values in array that is used to convert block indices + // to global ID using a dot product + cell_indices_to_id[0]=1; + cell_indices_to_id[1]=WID2; + cell_indices_to_id[2]=WID; + break; + case 2: + // set values in array that is used to convert block indices + // to global id using a dot product. + cell_indices_to_id[0]=1; + cell_indices_to_id[1]=WID; + cell_indices_to_id[2]=WID2; + break; + default: + cerr << __FILE__ << ":"<< __LINE__ << " Wrong dimension, abort"<& vmesh = allCellsPointer[0]->get_velocity_mesh(popID); + + // set cell size in dimension direction + dvz = vmesh.getCellSize(VMESH_REFLEVEL)[dimension]; + vz_min = vmesh.getMeshMinLimits()[dimension]; + + // Get a unique sorted list of blockids that are in any of the + // propagated cells. First use set for this, then add to vector (may not + // be the most nice way to do this and in any case we could do it along + // dimension for data locality reasons => copy acc map column code, TODO: FIXME + // TODO: Do this separately for each pencil? + std::unordered_set unionOfBlocksSet; + + for(auto cell : allCellsPointer) { + vmesh::VelocityMesh& vmesh = cell->get_velocity_mesh(popID); + for (vmesh::LocalID block_i=0; block_i< vmesh.size(); ++block_i) { + unionOfBlocksSet.insert(vmesh.getGlobalID(block_i)); + } + } + + std::vector unionOfBlocks; + unionOfBlocks.reserve(unionOfBlocksSet.size()); + for(const auto blockGID: unionOfBlocksSet) { + unionOfBlocks.push_back(blockGID); + } + // **************************************************************************** + + int t1 = phiprof::initializeTimer("mappingAndStore"); + +#pragma omp parallel + { + //std::vector targetsValid(localPropagatedCells.size()); + //std::vector allCellsBlockLocalID(allCells.size()); + +#pragma omp for schedule(guided) + // Loop over velocity space blocks. Thread this loop (over vspace blocks) with OpenMP. + for(uint blocki = 0; blocki < unionOfBlocks.size(); blocki++){ + + phiprof::start(t1); + + // Get global id of the velocity block + vmesh::GlobalID blockGID = unionOfBlocks[blocki]; + + velocity_block_indices_t block_indices; + uint8_t vRefLevel; + vmesh.getIndices(blockGID,vRefLevel, block_indices[0], + block_indices[1], block_indices[2]); + + // Loop over sets of pencils + // This loop only has one iteration for now + for ( auto pencils: pencilSets ) { + + // Allocate targetdata sum(lengths of pencils)*WID3) + Vec targetData[pencils.sumOfLengths * WID3]; + + // Initialize targetdata to 0 + for( uint i = 0; i < pencils.sumOfLengths * WID3; i++ ) { + targetData[i] = 0.0; + } + + // TODO: There's probably a smarter way to keep track of where we are writing + // in the target data structure. + uint targetDataIndex = 0; + + // Compute spatial neighbors for target cells. + // For targets we only have actual cells as we do not + // want to propagate boundary cells (array may contain + // INVALID_CELLIDs at boundaries). + for ( auto celli: pencils.ids ) { + compute_spatial_target_neighbors(mpiGrid, localPropagatedCells[celli], dimension, + targetNeighbors.data() + celli * 3); + } + + // Loop over pencils + for(uint pencili = 0; pencili < pencils.N; pencili++){ + + // Allocate source data: sourcedata pencilIds = pencils.getIds(pencili); + for( auto celli: pencilIds) { + compute_spatial_source_neighbors(mpiGrid, localPropagatedCells[celli], + dimension, sourceNeighbors.data() + + celli * nSourceNeighborsPerCell); + } + + Vec * dzPointer = allCellsDz + pencilIds[0]; + + // load data(=> sourcedata) / (proper xy reconstruction in future) + // copied from regular code, should work? + int offset = 0; // TODO: Figure out what needs to go here. + copy_trans_block_data(sourceNeighbors.data() + offset, + blockGID, sourceData, cellid_transpose, popID); + + + // Calculate cell centered velocity for each v cell in the block + const Vec k = (0,1,2,3); + const Vec cell_vz = (block_indices[dimension] * WID + k + 0.5) * dvz + vz_min; + + const Vec z_translation = dt * cell_vz; + // propagate pencil(blockid = velocities, pencil-ids = dzs ), + propagatePencil(dzPointer, sourceData, z_translation, pencils.lengthOfPencils[pencili], nSourceNeighborsPerCell); + + // sourcedata => targetdata[this pencil]) + for (auto value: sourceData) { + targetData[targetDataIndex] = value; + targetDataIndex++; + } + + // dealloc source data -- Should be automatic since it's declared in this iteration? + + } + + // Loop over pencils again + for(uint pencili = 0; pencili < pencils.N; pencili++){ + + // store_data(target_data =>) :Aggregate data for blockid to original location + + //store values from target_values array to the actual blocks + for(auto celli: pencils.ids) { + //TODO: Figure out validity check later + //if(targetsValid[celli]) { + for(uint ti = 0; ti < 3; ti++) { + SpatialCell* spatial_cell = targetNeighbors[celli * 3 + ti]; + if(spatial_cell ==NULL) { + //invalid target spatial cell + continue; + } + + // Get local ID of the velocity block + const vmesh::LocalID blockLID = spatial_cell->get_velocity_block_local_id(blockGID, popID); + + if (blockLID == vmesh::VelocityMesh::invalidLocalID()) { + // block does not exist. If so, we do not create it and add stuff to it here. + // We have already created blocks around blocks with content in + // spatial sense, so we have no need to create even more blocks here + // TODO add loss counter + continue; + } + // Pointer to the data field of the velocity block + Realf* blockData = spatial_cell->get_data(blockLID, popID); + // Unpack the vector data to the cell data types + for(int i = 0; i < WID3 ; i++) { + + // Write data into target block + blockData[i] += targetData[(celli * 3 + ti)][i]; + } + } + //} + + } + + // dealloc target data -- Should be automatic again? + } + } + } + } + + return true; + } diff --git a/vlasovsolver/cpu_trans_map.hpp b/vlasovsolver/cpu_trans_map.hpp index d276fa4f4..6101ad0cf 100644 --- a/vlasovsolver/cpu_trans_map.hpp +++ b/vlasovsolver/cpu_trans_map.hpp @@ -57,5 +57,13 @@ void copy_trans_block_data(SpatialCell** source_neighbors, const unsigned char* const cellid_transpose, const uint popID); +bool trans_map_1d_amr(const dccrg::Dccrg& mpiGrid, + const std::vector& localPropagatedCells, + const std::vector& remoteTargetCells, + const uint dimension, + const Realv dt, + const uint popID); + + #endif diff --git a/vlasovsolver/cpu_trans_map_amr.cpp b/vlasovsolver/cpu_trans_map_amr.cpp index abc7ce60a..a2ed692ce 100644 --- a/vlasovsolver/cpu_trans_map_amr.cpp +++ b/vlasovsolver/cpu_trans_map_amr.cpp @@ -1,5 +1,5 @@ -//#include "vlasovsolver/cpu_1d_ppm_nonuniform.hpp" -#include "cpu_1d_ppm_nonuniform_conserving.hpp" +#include "vlasovsolver/cpu_1d_ppm_nonuniform.hpp" +//#include "cpu_1d_ppm_nonuniform_conserving.hpp" #include "vec.h" #include "../grid.h" #include "../object_wrapper.h" @@ -645,11 +645,15 @@ bool trans_map_1d_amr(const dccrg::Dccrg& uint cell_indices_to_id[3]; /*< used when computing id of target cell in block*/ unsigned char cellid_transpose[WID3]; /*< defines the transpose for the solver internal (transposed) id: i + j*WID + k*WID2 to actual one*/ const uint blocks_per_dim = 1; - + + cout << "entering trans_map_1d_amr" << endl; + // return if there's no cells to propagate - if(localPropagatedCells.size() == 0) - std::cout << "Returning because of no cells" << std::endl; - return true; + if(localPropagatedCells.size() == 0) { + //std::cout << "Returning because of no cells" << std::endl; + cerr << "Returning because of no cells" << endl; + return false; + } // Vector with all cell ids vector allCells(localPropagatedCells); diff --git a/vlasovsolver/vlasovmover.cpp b/vlasovsolver/vlasovmover.cpp index eb55a4792..fcc46cb68 100644 --- a/vlasovsolver/vlasovmover.cpp +++ b/vlasovsolver/vlasovmover.cpp @@ -42,6 +42,7 @@ #include "cpu_moments.h" #include "cpu_acc_semilag.hpp" #include "cpu_trans_map.hpp" +#include "cpu_trans_map_amr.hpp" using namespace std; using namespace spatial_cell; @@ -106,9 +107,12 @@ void calculateSpatialTranslation( phiprof::stop(trans_timer); phiprof::start("compute-mapping-x"); - trans_map_1d(mpiGrid,local_propagated_cells, remoteTargetCellsx, 0,dt,popID); // map along x// + bool foo; + foo = trans_map_1d_amr(mpiGrid,local_propagated_cells, remoteTargetCellsx, 0,dt,popID); // map along x// phiprof::stop("compute-mapping-x"); + cout << "return value of trans_map_1d_amr: " << foo << endl; + trans_timer=phiprof::initializeTimer("update_remote-x","MPI"); phiprof::start("update_remote-x"); update_remote_mapping_contribution(mpiGrid, 0,+1,popID); From 47a0117cf195df799beca96fd88603d5b2acf6d2 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Wed, 22 Aug 2018 12:35:35 +0300 Subject: [PATCH 044/602] Fix for pencil builder that works with single process/periodic boundary conditions. --- vlasovsolver/cpu_trans_map.cpp | 458 ++++++++++++++++++--------------- 1 file changed, 256 insertions(+), 202 deletions(-) diff --git a/vlasovsolver/cpu_trans_map.cpp b/vlasovsolver/cpu_trans_map.cpp index e19415a22..a4331754c 100644 --- a/vlasovsolver/cpu_trans_map.cpp +++ b/vlasovsolver/cpu_trans_map.cpp @@ -1008,12 +1008,23 @@ setOfPencils buildPencilsWithNeighbors( dccrg::Dccrg& mpiGrid, - const vector& localPropagatedCells, - const vector& remoteTargetCells, - const uint dimension, - const Realv dt, - const uint popID) { + const vector& localPropagatedCells, + const vector& remoteTargetCells, + const uint dimension, + const Realv dt, + const uint popID) { Realv dvz,vz_min; uint cell_indices_to_id[3]; /*< used when computing id of target cell in block*/ @@ -1178,6 +1189,51 @@ bool trans_map_1d_amr(const dccrg::Dccrg& allCellsDz[celli] = P::dz_ini / pow(2.0, mpiGrid.get_refinement_level(celli)); } //cout << endl; + + // Fiddle indices x,y,z + switch (dimension) { + case 0: + // set values in array that is used to convert block indices + // to global ID using a dot product. + cell_indices_to_id[0]=WID2; + cell_indices_to_id[1]=WID; + cell_indices_to_id[2]=1; + break; + case 1: + // set values in array that is used to convert block indices + // to global ID using a dot product + cell_indices_to_id[0]=1; + cell_indices_to_id[1]=WID2; + cell_indices_to_id[2]=WID; + break; + case 2: + // set values in array that is used to convert block indices + // to global id using a dot product. + cell_indices_to_id[0]=1; + cell_indices_to_id[1]=WID; + cell_indices_to_id[2]=WID2; + break; + default: + cerr << __FILE__ << ":"<< __LINE__ << " Wrong dimension, abort"<& //cout << "dimension " << dimension << endl; //#pragma omp parallel for - for(uint celli = 0; celli < localPropagatedCells.size(); ++celli){ - CellID localCelli = localPropagatedCells[celli]; - int myProcess = mpiGrid.get_process(localCelli); + for(auto celli: localPropagatedCells) { + int myProcess = mpiGrid.get_process(celli); // Collect a list of cell ids that do not have a neighbor in the negative direction // These are the seed ids for the pencils. vector negativeNeighbors; // Returns all neighbors as (id, direction-dimension) pairs. //cout << "neighbors of cell " << localCelli << " are "; - for ( const auto neighbor : mpiGrid.get_face_neighbors_of(localCelli ) ) { + for ( const auto neighbor : mpiGrid.get_face_neighbors_of(celli ) ) { if ( mpiGrid.get_process(neighbor.first) == myProcess ) { //cout << neighbor.first << "," << neighbor.second << " "; // select the neighbor in the negative dimension of the propagation - if (neighbor.second == - (dimension + 1)) { + if (neighbor.second == - (static_cast(dimension) + 1)) { // add the id of the neighbor to a list if it's on the same process negativeNeighbors.push_back(neighbor.first); @@ -1213,7 +1268,36 @@ bool trans_map_1d_amr(const dccrg::Dccrg& //cout << endl; // if no neighbors were found in the negative direction, add this cell id to the seed cells if (negativeNeighbors.size() == 0) - seedIds.push_back(localCelli); + seedIds.push_back(celli); + } + cout << P::xcells_ini << " " << P::ycells_ini << " " << P::zcells_ini << endl; + // If no seed ids were found, let's assume we have a periodic boundary and + // a single process in the dimension of propagation. In this case we start from + // the first cells of the plane perpendicular to the propagation dimension + if (seedIds.size() == 0) { + for (uint ix = 0; ix < P::xcells_ini; ix++) { + for (uint iy = 0; iy < P::ycells_ini; iy++) { + for (uint iz = 0; iz < P::zcells_ini; iz++) { + switch (dimension) { + case 0: + // yz - plane + if(ix == 0) + seedIds.push_back(P::xcells_ini * P::ycells_ini * iz + P::xcells_ini * iy +1 ); + break; + case 1: + // xz - plane + if(iy == 0) + seedIds.push_back(P::xcells_ini * P::ycells_ini * iz + ix + 1); + break; + case 2: + // xy - plane + if(iz == 0) + seedIds.push_back(P::xcells_ini * iy + ix + 1); + break; + } + } + } + } } // Empty vectors for internal use of buildPencilsWithNeighbors. Could be default values but @@ -1226,15 +1310,17 @@ bool trans_map_1d_amr(const dccrg::Dccrg& setOfPencils pencils; vector pencilSets; - //cout << "Seed ids are: "; + cout << "Number of seed ids is " << seedIds.size() << endl; + cout << "Seed ids are: "; + for (const auto seedId : seedIds) { + cout << seedId << " "; + } + cout << endl; + for (const auto seedId : seedIds) { - //cout << seedId << " "; // Construct pencils from the seedIds into a set of pencils. pencils = buildPencilsWithNeighbors(mpiGrid, pencils, seedId, ids, dimension, path); } - // cout << endl; - //cout << "Number of seed ids is " << seedIds.size() << endl; - uint ibeg = 0; uint iend = 0; std::cout << "I have created " << pencils.N << " pencils along dimension " << dimension << ":\n"; @@ -1255,213 +1341,181 @@ bool trans_map_1d_amr(const dccrg::Dccrg& // Only one set is created for now but we retain support for multiple sets pencilSets.push_back(pencils); // **************************************************************************** - - // Fiddle indices x,y,z - switch (dimension) { - case 0: - // set values in array that is used to convert block indices - // to global ID using a dot product. - cell_indices_to_id[0]=WID2; - cell_indices_to_id[1]=WID; - cell_indices_to_id[2]=1; - break; - case 1: - // set values in array that is used to convert block indices - // to global ID using a dot product - cell_indices_to_id[0]=1; - cell_indices_to_id[1]=WID2; - cell_indices_to_id[2]=WID; - break; - case 2: - // set values in array that is used to convert block indices - // to global id using a dot product. - cell_indices_to_id[0]=1; - cell_indices_to_id[1]=WID; - cell_indices_to_id[2]=WID2; - break; - default: - cerr << __FILE__ << ":"<< __LINE__ << " Wrong dimension, abort"<& vmesh = allCellsPointer[0]->get_velocity_mesh(popID); + + // set cell size in dimension direction + dvz = vmesh.getCellSize(VMESH_REFLEVEL)[dimension]; + vz_min = vmesh.getMeshMinLimits()[dimension]; + + // Get a unique sorted list of blockids that are in any of the + // propagated cells. First use set for this, then add to vector (may not + // be the most nice way to do this and in any case we could do it along + // dimension for data locality reasons => copy acc map column code, TODO: FIXME + // TODO: Do this separately for each pencil? + std::unordered_set unionOfBlocksSet; + + for(auto cell : allCellsPointer) { + vmesh::VelocityMesh& vmesh = cell->get_velocity_mesh(popID); + for (vmesh::LocalID block_i=0; block_i< vmesh.size(); ++block_i) { + unionOfBlocksSet.insert(vmesh.getGlobalID(block_i)); } } - // **************************************************************************** - - const uint8_t VMESH_REFLEVEL = 0; + + std::vector unionOfBlocks; + unionOfBlocks.reserve(unionOfBlocksSet.size()); + for(const auto blockGID: unionOfBlocksSet) { + unionOfBlocks.push_back(blockGID); + } + // **************************************************************************** + + //cout << "Beginning of parallel region" << endl; + int t1 = phiprof::initializeTimer("mappingAndStore"); + + //#pragma omp parallel + { + //std::vector targetsValid(localPropagatedCells.size()); + //std::vector allCellsBlockLocalID(allCells.size()); - // Get a pointer to the velocity mesh of the first spatial cell - const vmesh::VelocityMesh& vmesh = allCellsPointer[0]->get_velocity_mesh(popID); + //#pragma omp for schedule(guided) + // Loop over velocity space blocks. Thread this loop (over vspace blocks) with OpenMP. + for(uint blocki = 0; blocki < unionOfBlocks.size(); blocki++){ + + phiprof::start(t1); - // set cell size in dimension direction - dvz = vmesh.getCellSize(VMESH_REFLEVEL)[dimension]; - vz_min = vmesh.getMeshMinLimits()[dimension]; - - // Get a unique sorted list of blockids that are in any of the - // propagated cells. First use set for this, then add to vector (may not - // be the most nice way to do this and in any case we could do it along - // dimension for data locality reasons => copy acc map column code, TODO: FIXME - // TODO: Do this separately for each pencil? - std::unordered_set unionOfBlocksSet; - - for(auto cell : allCellsPointer) { - vmesh::VelocityMesh& vmesh = cell->get_velocity_mesh(popID); - for (vmesh::LocalID block_i=0; block_i< vmesh.size(); ++block_i) { - unionOfBlocksSet.insert(vmesh.getGlobalID(block_i)); - } - } - - std::vector unionOfBlocks; - unionOfBlocks.reserve(unionOfBlocksSet.size()); - for(const auto blockGID: unionOfBlocksSet) { - unionOfBlocks.push_back(blockGID); - } - // **************************************************************************** - - int t1 = phiprof::initializeTimer("mappingAndStore"); + // Get global id of the velocity block + vmesh::GlobalID blockGID = unionOfBlocks[blocki]; + + velocity_block_indices_t block_indices; + uint8_t vRefLevel; + vmesh.getIndices(blockGID,vRefLevel, block_indices[0], + block_indices[1], block_indices[2]); -#pragma omp parallel - { - //std::vector targetsValid(localPropagatedCells.size()); - //std::vector allCellsBlockLocalID(allCells.size()); - -#pragma omp for schedule(guided) - // Loop over velocity space blocks. Thread this loop (over vspace blocks) with OpenMP. - for(uint blocki = 0; blocki < unionOfBlocks.size(); blocki++){ - - phiprof::start(t1); + // Loop over sets of pencils + // This loop only has one iteration for now + cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; + for ( auto pencils: pencilSets ) { - // Get global id of the velocity block - vmesh::GlobalID blockGID = unionOfBlocks[blocki]; + // Allocate targetdata sum(lengths of pencils)*WID3) + Vec targetData[pencils.sumOfLengths * WID3]; - velocity_block_indices_t block_indices; - uint8_t vRefLevel; - vmesh.getIndices(blockGID,vRefLevel, block_indices[0], - block_indices[1], block_indices[2]); - - // Loop over sets of pencils - // This loop only has one iteration for now - for ( auto pencils: pencilSets ) { + // Initialize targetdata to 0 + for( uint i = 0; i < pencils.sumOfLengths * WID3; i++ ) { + targetData[i] = 0.0; + } - // Allocate targetdata sum(lengths of pencils)*WID3) - Vec targetData[pencils.sumOfLengths * WID3]; + // TODO: There's probably a smarter way to keep track of where we are writing + // in the target data structure. + uint targetDataIndex = 0; + + // Compute spatial neighbors for target cells. + // For targets we only have actual cells as we do not + // want to propagate boundary cells (array may contain + // INVALID_CELLIDs at boundaries). + for ( auto celli: pencils.ids ) { + compute_spatial_target_neighbors(mpiGrid, localPropagatedCells[celli], dimension, + targetNeighbors.data() + celli * 3); + } - // Initialize targetdata to 0 - for( uint i = 0; i < pencils.sumOfLengths * WID3; i++ ) { - targetData[i] = 0.0; - } + cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; + + // Loop over pencils + for(uint pencili = 0; pencili < pencils.N; pencili++){ - // TODO: There's probably a smarter way to keep track of where we are writing - // in the target data structure. - uint targetDataIndex = 0; - - // Compute spatial neighbors for target cells. - // For targets we only have actual cells as we do not - // want to propagate boundary cells (array may contain - // INVALID_CELLIDs at boundaries). - for ( auto celli: pencils.ids ) { - compute_spatial_target_neighbors(mpiGrid, localPropagatedCells[celli], dimension, - targetNeighbors.data() + celli * 3); - } - - // Loop over pencils - for(uint pencili = 0; pencili < pencils.N; pencili++){ + // Allocate source data: sourcedata pencilIds = pencils.getIds(pencili); - for( auto celli: pencilIds) { - compute_spatial_source_neighbors(mpiGrid, localPropagatedCells[celli], - dimension, sourceNeighbors.data() - + celli * nSourceNeighborsPerCell); - } - - Vec * dzPointer = allCellsDz + pencilIds[0]; + + // Compute spatial neighbors for source cells. In + // source cells we have a wider stencil and take into account + // boundaries. + vector pencilIds = pencils.getIds(pencili); + + cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; + + for( auto celli: pencilIds) { + compute_spatial_source_neighbors(mpiGrid, localPropagatedCells[celli], + dimension, sourceNeighbors.data() + + celli * nSourceNeighborsPerCell); + } + + cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; + + Vec * dzPointer = allCellsDz + pencilIds[0]; - // load data(=> sourcedata) / (proper xy reconstruction in future) - // copied from regular code, should work? - int offset = 0; // TODO: Figure out what needs to go here. - copy_trans_block_data(sourceNeighbors.data() + offset, - blockGID, sourceData, cellid_transpose, popID); + // load data(=> sourcedata) / (proper xy reconstruction in future) + // copied from regular code, should work? + int offset = 0; // TODO: Figure out what needs to go here. + copy_trans_block_data(sourceNeighbors.data() + offset, + blockGID, sourceData, cellid_transpose, popID); + cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; - // Calculate cell centered velocity for each v cell in the block - const Vec k = (0,1,2,3); - const Vec cell_vz = (block_indices[dimension] * WID + k + 0.5) * dvz + vz_min; + // Calculate cell centered velocity for each v cell in the block + const Vec k = (0,1,2,3); + const Vec cell_vz = (block_indices[dimension] * WID + k + 0.5) * dvz + vz_min; - const Vec z_translation = dt * cell_vz; - // propagate pencil(blockid = velocities, pencil-ids = dzs ), - propagatePencil(dzPointer, sourceData, z_translation, pencils.lengthOfPencils[pencili], nSourceNeighborsPerCell); - - // sourcedata => targetdata[this pencil]) - for (auto value: sourceData) { - targetData[targetDataIndex] = value; - targetDataIndex++; - } + const Vec z_translation = dt * cell_vz; + // propagate pencil(blockid = velocities, pencil-ids = dzs ), + propagatePencil(dzPointer, sourceData, z_translation, pencils.lengthOfPencils[pencili], nSourceNeighborsPerCell); + + // sourcedata => targetdata[this pencil]) + for (auto value: sourceData) { + targetData[targetDataIndex] = value; + targetDataIndex++; + } - // dealloc source data -- Should be automatic since it's declared in this iteration? + // dealloc source data -- Should be automatic since it's declared in this iteration? - } + } - // Loop over pencils again - for(uint pencili = 0; pencili < pencils.N; pencili++){ + // Loop over pencils again + for(uint pencili = 0; pencili < pencils.N; pencili++){ - // store_data(target_data =>) :Aggregate data for blockid to original location + // store_data(target_data =>) :Aggregate data for blockid to original location - //store values from target_values array to the actual blocks - for(auto celli: pencils.ids) { - //TODO: Figure out validity check later - //if(targetsValid[celli]) { - for(uint ti = 0; ti < 3; ti++) { - SpatialCell* spatial_cell = targetNeighbors[celli * 3 + ti]; - if(spatial_cell ==NULL) { - //invalid target spatial cell - continue; - } + //store values from target_values array to the actual blocks + for(auto celli: pencils.ids) { + //TODO: Figure out validity check later + //if(targetsValid[celli]) { + for(uint ti = 0; ti < 3; ti++) { + SpatialCell* spatial_cell = targetNeighbors[celli * 3 + ti]; + if(spatial_cell ==NULL) { + //invalid target spatial cell + continue; + } - // Get local ID of the velocity block - const vmesh::LocalID blockLID = spatial_cell->get_velocity_block_local_id(blockGID, popID); + // Get local ID of the velocity block + const vmesh::LocalID blockLID = spatial_cell->get_velocity_block_local_id(blockGID, popID); - if (blockLID == vmesh::VelocityMesh::invalidLocalID()) { - // block does not exist. If so, we do not create it and add stuff to it here. - // We have already created blocks around blocks with content in - // spatial sense, so we have no need to create even more blocks here - // TODO add loss counter - continue; - } - // Pointer to the data field of the velocity block - Realf* blockData = spatial_cell->get_data(blockLID, popID); - // Unpack the vector data to the cell data types - for(int i = 0; i < WID3 ; i++) { - - // Write data into target block - blockData[i] += targetData[(celli * 3 + ti)][i]; - } - } - //} - + if (blockLID == vmesh::VelocityMesh::invalidLocalID()) { + // block does not exist. If so, we do not create it and add stuff to it here. + // We have already created blocks around blocks with content in + // spatial sense, so we have no need to create even more blocks here + // TODO add loss counter + continue; + } + // Pointer to the data field of the velocity block + Realf* blockData = spatial_cell->get_data(blockLID, popID); + // Unpack the vector data to the cell data types + for(int i = 0; i < WID3 ; i++) { + + // Write data into target block + blockData[i] += targetData[(celli * 3 + ti)][i]; + } } - - // dealloc target data -- Should be automatic again? + //} + } + + // dealloc target data -- Should be automatic again? } } } - - return true; } + + return true; +} From cd0310254ff11f530c9e0379dedb06c0704bbf92 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Wed, 22 Aug 2018 14:10:57 +0300 Subject: [PATCH 045/602] Moved pencil seed ids to a function. Fixed the code with multiple MPI ranks. --- vlasovsolver/cpu_trans_map.cpp | 160 +++++++++++++++++++-------------- 1 file changed, 91 insertions(+), 69 deletions(-) diff --git a/vlasovsolver/cpu_trans_map.cpp b/vlasovsolver/cpu_trans_map.cpp index a4331754c..8e11e4df4 100644 --- a/vlasovsolver/cpu_trans_map.cpp +++ b/vlasovsolver/cpu_trans_map.cpp @@ -842,7 +842,8 @@ struct setOfPencils { }; -CellID selectNeighbor(dccrg::Dccrg &grid, CellID id, int dimension = 0, uint path = 0) { +CellID selectNeighbor(const dccrg::Dccrg &grid, + CellID id, int dimension = 0, uint path = 0) { const auto neighbors = grid.get_face_neighbors_of(id); const int myProcess = grid.get_process(id); @@ -886,7 +887,7 @@ CellID selectNeighbor(dccrg::Dccrg &grid, } -setOfPencils buildPencilsWithNeighbors( dccrg::Dccrg grid, +setOfPencils buildPencilsWithNeighbors( const dccrg::Dccrg &grid, setOfPencils &pencils, CellID startingId, vector ids, uint dimension, vector path) { @@ -1144,6 +1145,92 @@ void propagatePencil(Vec dr[], Vec values[], Vec z_translation, uint lengthOfPen } +void get_seed_ids(const dccrg::Dccrg& mpiGrid, + const vector &localPropagatedCells, + const uint dimension, + vector &seedIds) { + + //cout << "localpropagatedcells.size() " << localPropagatedCells.size() << endl; + //cout << "dimension " << dimension << endl; + + //#pragma omp parallel for + for(auto celli: localPropagatedCells) { + int myProcess = mpiGrid.get_process(celli); + // Collect a list of cell ids that do not have a neighbor in the negative direction + // These are the seed ids for the pencils. + vector negativeNeighbors; + // Returns all neighbors as (id, direction-dimension) pairs. + //cout << "neighbors of cell " << localCelli << " are "; + for ( const auto neighbor : mpiGrid.get_face_neighbors_of(celli ) ) { + + if ( mpiGrid.get_process(neighbor.first) == myProcess ) { + //cout << neighbor.first << "," << neighbor.second << " "; + // select the neighbor in the negative dimension of the propagation + if (neighbor.second == - (static_cast(dimension) + 1)) { + + // add the id of the neighbor to a list if it's on the same process + negativeNeighbors.push_back(neighbor.first); + + } + + } + } + //cout << endl; + // if no neighbors were found in the negative direction, add this cell id to the seed cells + if (negativeNeighbors.size() == 0) + seedIds.push_back(celli); + } + + // If no seed ids were found, let's assume we have a periodic boundary and + // a single process in the dimension of propagation. In this case we start from + // the first cells of the plane perpendicular to the propagation dimension + if (seedIds.size() == 0) { + for (uint ix = 0; ix < P::xcells_ini; ix++) { + for (uint iy = 0; iy < P::ycells_ini; iy++) { + for (uint iz = 0; iz < P::zcells_ini; iz++) { + CellID seedId; + switch (dimension) { + case 0: + // yz - plane + if(ix == 0) { + seedId = P::xcells_ini * P::ycells_ini * iz + P::xcells_ini * iy + 1; + if(mpiGrid.get_process(seedId) == mpiGrid.get_process(localPropagatedCells[0])) + seedIds.push_back(seedId); + + } + break; + case 1: + // xz - plane + if(iy == 0) { + seedId = P::xcells_ini * P::ycells_ini * iz + ix + 1; + if(mpiGrid.get_process(seedId) == mpiGrid.get_process(localPropagatedCells[0])) + seedIds.push_back(seedId); + + } + break; + case 2: + // xy - plane + if(iz == 0) { + seedId = P::xcells_ini * iy + ix + 1; + if(mpiGrid.get_process(seedId) == mpiGrid.get_process(localPropagatedCells[0])) + seedIds.push_back(seedId); + } + break; + } + } + } + } + } + + cout << "Number of seed ids is " << seedIds.size() << endl; + cout << "Seed ids are: "; + for (const auto seedId : seedIds) { + cout << seedId << " "; + } + cout << endl; + +} + bool trans_map_1d_amr(const dccrg::Dccrg& mpiGrid, const vector& localPropagatedCells, @@ -1238,68 +1325,10 @@ bool trans_map_1d_amr(const dccrg::Dccrg& // **************************************************************************** // compute pencils => set of pencils (shared datastructure) - vector seedIds; - //cout << "localpropagatedcells.size() " << localPropagatedCells.size() << endl; - //cout << "dimension " << dimension << endl; + vector seedIds; + get_seed_ids(mpiGrid, localPropagatedCells, dimension, seedIds); - //#pragma omp parallel for - for(auto celli: localPropagatedCells) { - int myProcess = mpiGrid.get_process(celli); - // Collect a list of cell ids that do not have a neighbor in the negative direction - // These are the seed ids for the pencils. - vector negativeNeighbors; - // Returns all neighbors as (id, direction-dimension) pairs. - //cout << "neighbors of cell " << localCelli << " are "; - for ( const auto neighbor : mpiGrid.get_face_neighbors_of(celli ) ) { - - if ( mpiGrid.get_process(neighbor.first) == myProcess ) { - //cout << neighbor.first << "," << neighbor.second << " "; - // select the neighbor in the negative dimension of the propagation - if (neighbor.second == - (static_cast(dimension) + 1)) { - - // add the id of the neighbor to a list if it's on the same process - negativeNeighbors.push_back(neighbor.first); - - } - - } - } - //cout << endl; - // if no neighbors were found in the negative direction, add this cell id to the seed cells - if (negativeNeighbors.size() == 0) - seedIds.push_back(celli); - } - cout << P::xcells_ini << " " << P::ycells_ini << " " << P::zcells_ini << endl; - // If no seed ids were found, let's assume we have a periodic boundary and - // a single process in the dimension of propagation. In this case we start from - // the first cells of the plane perpendicular to the propagation dimension - if (seedIds.size() == 0) { - for (uint ix = 0; ix < P::xcells_ini; ix++) { - for (uint iy = 0; iy < P::ycells_ini; iy++) { - for (uint iz = 0; iz < P::zcells_ini; iz++) { - switch (dimension) { - case 0: - // yz - plane - if(ix == 0) - seedIds.push_back(P::xcells_ini * P::ycells_ini * iz + P::xcells_ini * iy +1 ); - break; - case 1: - // xz - plane - if(iy == 0) - seedIds.push_back(P::xcells_ini * P::ycells_ini * iz + ix + 1); - break; - case 2: - // xy - plane - if(iz == 0) - seedIds.push_back(P::xcells_ini * iy + ix + 1); - break; - } - } - } - } - } - // Empty vectors for internal use of buildPencilsWithNeighbors. Could be default values but // default vectors are complicated. Should overload buildPencilsWithNeighbors like suggested here // https://stackoverflow.com/questions/3147274/c-default-argument-for-vectorint @@ -1310,13 +1339,6 @@ bool trans_map_1d_amr(const dccrg::Dccrg& setOfPencils pencils; vector pencilSets; - cout << "Number of seed ids is " << seedIds.size() << endl; - cout << "Seed ids are: "; - for (const auto seedId : seedIds) { - cout << seedId << " "; - } - cout << endl; - for (const auto seedId : seedIds) { // Construct pencils from the seedIds into a set of pencils. pencils = buildPencilsWithNeighbors(mpiGrid, pencils, seedId, ids, dimension, path); From cd6c3dc5c7d663848ce92264f695c043d60bc958 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Mon, 27 Aug 2018 16:46:45 +0300 Subject: [PATCH 046/602] Modified copy_trans_block_data to work with pencils. Code runs without segfault but produces rubbish. --- vlasovsolver/cpu_trans_map.cpp | 724 +++++++++++++++++++++++---------- 1 file changed, 506 insertions(+), 218 deletions(-) diff --git a/vlasovsolver/cpu_trans_map.cpp b/vlasovsolver/cpu_trans_map.cpp index 8e11e4df4..8e34b6f6b 100644 --- a/vlasovsolver/cpu_trans_map.cpp +++ b/vlasovsolver/cpu_trans_map.cpp @@ -56,6 +56,54 @@ void store_trans_block_data(SpatialCell** target_neighbors,const vmesh::GlobalID Vec* __restrict__ target_values, const unsigned char* const cellid_transpose,const uint popID); +struct setOfPencils { + + uint N; // Number of pencils in the set + uint sumOfLengths; + std::vector lengthOfPencils; // Lengths of pencils + std::vector ids; // List of cells + std::vector x,y; // x,y - position + + setOfPencils() { + N = 0; + sumOfLengths = 0; + } + + void addPencil(std::vector idsIn, Real xIn, Real yIn) { + + N += 1; + sumOfLengths += idsIn.size(); + lengthOfPencils.push_back(idsIn.size()); + ids.insert(ids.end(),idsIn.begin(),idsIn.end()); + x.push_back(xIn); + y.push_back(yIn); + + } + + std::vector getIds(const uint pencilId) { + + vector idsOut; + + if (pencilId > N) { + return idsOut; + } + + CellID ibeg = 0; + for (uint i = 0; i < pencilId; i++) { + ibeg += lengthOfPencils[i]; + } + CellID iend = ibeg + lengthOfPencils[pencilId]; + + for (uint i = ibeg; i <= iend; i++) { + idsOut.push_back(ids[i]); + } + + return idsOut; + } + +}; + + // indices in padded source block, which is of type Vec with VECL // element sin each vector. b_k is the block index in z direction in // ordinary space [- VLASOV_STENCIL_WIDTH to VLASOV_STENCIL_WIDTH], @@ -71,6 +119,8 @@ void store_trans_block_data(SpatialCell** target_neighbors,const vmesh::GlobalID //#define i_trans_pt_blockv(j, k, b_k) ( ( (j) * WID + (k) * WID2 + ((b_k) + 1 ) * WID3) / VECL ) #define i_trans_pt_blockv(planeVectorIndex, planeIndex, blockIndex) ( planeVectorIndex + planeIndex * VEC_PER_PLANE + (blockIndex + 1) * VEC_PER_BLOCK) +#define i_trans_ps_blockv_pencil(planeVectorIndex, planeIndex, blockIndex, lengthOfPencil) ( (blockIndex) + VLASOV_STENCIL_WIDTH + ( (planeVectorIndex) + (planeIndex) * VEC_PER_PLANE ) * ( lengthOfPencil + 2 * VLASOV_STENCIL_WIDTH) ) + //Is cell translated? It is not translated if DO_NO_COMPUTE or if it is sysboundary cell and not in first sysboundarylayer bool do_translate_cell(SpatialCell* SC){ if(SC->sysBoundaryFlag == sysboundarytype::DO_NOT_COMPUTE || @@ -222,6 +272,56 @@ void compute_spatial_source_neighbors(const dccrg::Dccrg& mpiGrid, + setOfPencils pencils, + const uint iPencil, + const uint dimension, + SpatialCell **neighbors){ + + // L = length of the pencil iPencil + int L = pencils.lengthOfPencils[iPencil]; + + vector ids = pencils.getIds(iPencil); + for (int iCell = -VLASOV_STENCIL_WIDTH; iCell < L + VLASOV_STENCIL_WIDTH; iCell++) { + CellID cellID = ids[iCell]; + + int i = 0; + if(iCell < 0) i = iCell; + if(iCell > L) i = iCell - L; + + switch (dimension) { + case 0: + neighbors[iCell + VLASOV_STENCIL_WIDTH] = get_spatial_neighbor_pointer(mpiGrid, cellID, false, i, 0, 0); + break; + case 1: + neighbors[iCell + VLASOV_STENCIL_WIDTH] = get_spatial_neighbor_pointer(mpiGrid, cellID, false, 0, i, 0); + break; + case 2: + neighbors[iCell + VLASOV_STENCIL_WIDTH] = get_spatial_neighbor_pointer(mpiGrid, cellID, false, 0, 0, i); + break; + } + } + + SpatialCell* last_good_cell = mpiGrid[ids.front()]; + /*loop to neative side and replace all invalid cells with the closest good cell*/ + for(int i = -1;i>=-VLASOV_STENCIL_WIDTH;i--){ + if(neighbors[i + VLASOV_STENCIL_WIDTH] == NULL) + neighbors[i + VLASOV_STENCIL_WIDTH] = last_good_cell; + else + last_good_cell = neighbors[i + VLASOV_STENCIL_WIDTH]; + } + + last_good_cell = mpiGrid[ids.back()]; + /*loop to positive side and replace all invalid cells with the closest good cell*/ + for(int i = L + 1; i <= L + VLASOV_STENCIL_WIDTH; i++){ + if(neighbors[i + VLASOV_STENCIL_WIDTH] == NULL) + neighbors[i + VLASOV_STENCIL_WIDTH] = last_good_cell; + else + last_good_cell = neighbors[i + VLASOV_STENCIL_WIDTH]; + } +} + + /*compute spatial target neighbors, stencil has a size of 3. No boundary cells are included*/ void compute_spatial_target_neighbors(const dccrg::Dccrg& mpiGrid, const CellID& cellID, @@ -244,6 +344,42 @@ void compute_spatial_target_neighbors(const dccrg::Dccrg& mpiGrid, + setOfPencils& pencils, + const uint dimension, + SpatialCell **neighbors){ + + uint GID = 0; + for(uint iPencil = 0; iPencil < pencils.N; iPencil++){ + // L = length of the pencil iPencil + int L = pencils.lengthOfPencils[iPencil]; + + vector ids = pencils.getIds(iPencil); + for (int iCell = -1; iCell <= L; iCell++) { + CellID cellID = ids[iCell]; + + int i = 0; + if(iCell == -1) i = -1; + if(iCell == L) i = 1; + + switch (dimension) { + case 0: + neighbors[GID + iCell + 1] = get_spatial_neighbor_pointer(mpiGrid, cellID, false, i, 0, 0); + break; + case 1: + neighbors[GID + iCell + 1] = get_spatial_neighbor_pointer(mpiGrid, cellID, false, 0, i, 0); + break; + case 2: + neighbors[GID + iCell + 1] = get_spatial_neighbor_pointer(mpiGrid, cellID, false, 0, 0, i); + break; + } + } + GID += (L + 2); + } +} + + /* Copy the data to the temporary values array, so that the * dimensions are correctly swapped. Also, copy the same block for * then neighboring spatial cells (in the dimension). neighbors @@ -375,7 +511,6 @@ bool trans_map_1d(const dccrg::Dccrg& mpi compute_spatial_source_neighbors(mpiGrid, localPropagatedCells[celli], dimension, sourceNeighbors.data() + celli * nSourceNeighborsPerCell); compute_spatial_target_neighbors(mpiGrid, localPropagatedCells[celli], dimension, targetNeighbors.data() + celli * 3); } - //Get a unique sorted list of blockids that are in any of the @@ -473,6 +608,20 @@ bool trans_map_1d(const dccrg::Dccrg& mpi vmesh::GlobalID blockGID = unionOfBlocks[blocki]; phiprof::start(t1); + // bool exitflag = false; + // cout << "sourceData: " << endl; + // const vmesh::LocalID blockLID = allCellsPointer[0]->get_velocity_block_local_id(blockGID, popID); + // Realf* data = allCellsPointer[0]->get_data(blockLID,popID); + // for (uint i = 0; i < WID3; i++) { + // cout << " " << data[i]; + // if (data[i] != 0) exitflag = true; + // } + // cout << endl; + // if(exitflag) { + // cout << blockGID << " " << blockLID << endl; + // throw; + // } + for(uint celli = 0; celli < allCellsPointer.size(); celli++){ allCellsBlockLocalID[celli] = allCellsPointer[celli]->get_velocity_block_local_id(blockGID, popID); } @@ -785,63 +934,6 @@ void update_remote_mapping_contribution( } } - - - - - - - - - - -struct setOfPencils { - - uint N; // Number of pencils in the set - uint sumOfLengths; - std::vector lengthOfPencils; // Lengths of pencils - std::vector ids; // List of cells - std::vector x,y; // x,y - position - - setOfPencils() { - N = 0; - sumOfLengths = 0; - } - - void addPencil(std::vector idsIn, Real xIn, Real yIn) { - - N += 1; - sumOfLengths += idsIn.size(); - lengthOfPencils.push_back(idsIn.size()); - ids.insert(ids.end(),idsIn.begin(),idsIn.end()); - x.push_back(xIn); - y.push_back(yIn); - - } - - std::vector getIds(uint pencilId) { - - vector idsOut; - - if (pencilId > N) { - return idsOut; - } - - CellID ibeg = 0; - for (uint i = 0; i < pencilId; i++) { - ibeg += lengthOfPencils[i]; - } - CellID iend = ibeg + lengthOfPencils[pencilId]; - - for (uint i = ibeg; i <= iend; i++) { - idsOut.push_back(ids[i]); - } - - return idsOut; - } - -}; - CellID selectNeighbor(const dccrg::Dccrg &grid, CellID id, int dimension = 0, uint path = 0) { @@ -1070,77 +1162,101 @@ setOfPencils buildPencilsWithNeighbors( const dccrg::Dccrg &vmesh, const uint lengthOfPencil) { + + // Get velocity data from vmesh that we need later to calculate the translation + velocity_block_indices_t block_indices; + uint8_t refLevel; + vmesh.getIndices(blockGID,refLevel, block_indices[0], block_indices[1], block_indices[2]); + Realv dvz = vmesh.getCellSize(refLevel)[dimension]; + Realv vz_min = vmesh.getMeshMinLimits()[dimension]; + // Assuming 1 neighbor in the target array because of the CFL condition // In fact propagating to > 1 neighbor will give an error - const uint nTargetNeighborsPerCell = 1; + const uint nTargetNeighborsPerPencil = 1; - // Determine direction of translation - // part of density goes here (cell index change along spatial direcion) - Vecb positiveTranslationDirection = (z_translation > Vec(0.0)); //Veci target_scell_index = truncate_to_int(select(z_translation > Vec(0.0), 1, -1)); // Vector buffer where we write data, initialized to 0*/ - Vec targetValues[lengthOfPencil + 2 * nTargetNeighborsPerCell]; + Vec targetValues[(lengthOfPencil + 2 * nTargetNeighborsPerPencil) * WID3 / VECL]; - for (uint i_target = 0; i_target < lengthOfPencil + nTargetNeighborsPerCell; i_target++) { + for (auto value: targetValues) { // init target_values - targetValues[i_target] = 0.0; + value = 0.0; } // Go from 0 to length here to propagate all the cells in the pencil for (uint i = 0; i < lengthOfPencil; i++){ - // We padded the target array by 1 cell on both sides - // Assume the source array has been padded by nSourceNeighborsPerCell - // To have room for propagation. Refer to dr and values by i_cell - // and targetValues by i_target - uint i_cell = i + nSourceNeighborsPerCell; - uint i_target = i + nTargetNeighborsPerCell; - - // Calculate normalized coordinates in current cell. - // The coordinates (scaled units from 0 to 1) between which we will - // integrate to put mass in the target neighboring cell. - // Normalize the coordinates to the origin cell. Then we scale with the difference - // in volume between target and origin later when adding the integrated value. - Vec z_1,z_2; - z_1 = select(positiveTranslationDirection, 1.0 - z_translation / dr[i_cell], 0.0); - z_2 = select(positiveTranslationDirection, 1.0, - z_translation / dr[i_cell]); - - if( horizontal_or(abs(z_1) > Vec(1.0)) || horizontal_or(abs(z_2) > Vec(1.0)) ) { - std::cout << "Error, CFL condition violated\n"; - std::cout << "Exiting\n"; - std::exit(1); - } + // The source array is padded by VLASOV_STENCIL_WIDTH on both sides. + uint i_source = i + VLASOV_STENCIL_WIDTH; + uint i_target = i + nTargetNeighborsPerPencil; - // Compute polynomial coefficients - Vec a[3]; - compute_ppm_coeff_nonuniform(dr, values, h4, i_cell, a); - - // Compute integral - const Vec ngbr_target_density = - z_2 * ( a[0] + z_2 * ( a[1] + z_2 * a[2] ) ) - - z_1 * ( a[0] + z_1 * ( a[1] + z_1 * a[2] ) ); + for (uint k = 0; k < WID; ++k) { + + const Realv cell_vz = (block_indices[dimension] * WID + k + 0.5) * dvz + vz_min; //cell centered velocity + const Vec z_translation = cell_vz * dt / dz[i_source]; // how much it moved in time dt (reduced units) + + // Determine direction of translation + // part of density goes here (cell index change along spatial direcion) + Vecb positiveTranslationDirection = (z_translation > Vec(0.0)); + + // Calculate normalized coordinates in current cell. + // The coordinates (scaled units from 0 to 1) between which we will + // integrate to put mass in the target neighboring cell. + // Normalize the coordinates to the origin cell. Then we scale with the difference + // in volume between target and origin later when adding the integrated value. + Vec z_1,z_2; + z_1 = select(positiveTranslationDirection, 1.0 - z_translation / dz[i_source], 0.0); + z_2 = select(positiveTranslationDirection, 1.0, - z_translation / dz[i_source]); + + // if( horizontal_or(abs(z_1) > Vec(1.0)) || horizontal_or(abs(z_2) > Vec(1.0)) ) { + // std::cout << "Error, CFL condition violated\n"; + // std::cout << "Exiting\n"; + // std::exit(1); + // } + + for (uint planeVector = 0; planeVector < VEC_PER_PLANE; planeVector++) { - // Store mapped density in two target cells - // in the neighbor cell we will put this density - //targetValues[i_cell + target_scell_index] += ngbr_target_density * dr[i_cell] / dr[i_cell + target_scell_index]; - targetValues[i_target + 1] += select( positiveTranslationDirection,ngbr_target_density * dr[i_cell] / dr[i_cell + 1],Vec(0.0)); - targetValues[i_target - 1] += select(!positiveTranslationDirection,ngbr_target_density * dr[i_cell] / dr[i_cell - 1],Vec(0.0)); - // in the current original cells we will put the rest of the original density - targetValues[i_target] += values[i_cell] - ngbr_target_density; + // Compute polynomial coefficients + Vec a[3]; + compute_ppm_coeff_nonuniform(dz + i_source, + values + i_trans_ps_blockv_pencil(planeVector, k, i, lengthOfPencil), + h4, i_source, a); + // TODO: may need i - VLASOV_STENCIL_WIDTH instead of i for i_trans_ps_blockv_pencil. + + // Compute integral + const Vec ngbr_target_density = + z_2 * ( a[0] + z_2 * ( a[1] + z_2 * a[2] ) ) - + z_1 * ( a[0] + z_1 * ( a[1] + z_1 * a[2] ) ); + + // Store mapped density in two target cells + // in the neighbor cell we will put this density + //targetValues[i_cell + target_scell_index] += ngbr_target_density * dz[i_cell] / dz[i_cell + target_scell_index]; + targetValues[i_trans_pt_blockv(planeVector, k, i + 1)] += select( positiveTranslationDirection, + ngbr_target_density * dz[i_source] / dz[i_source + 1],Vec(0.0)); + targetValues[i_trans_pt_blockv(planeVector, k, i - 1 )] += select(!positiveTranslationDirection, + ngbr_target_density * dz[i_source] / dz[i_source - 1],Vec(0.0)); + + // in the current original cells we will put the rest of the original density + targetValues[i_trans_pt_blockv(planeVector, k, i)] += values[i_trans_ps_blockv_pencil(planeVector, k, i, lengthOfPencil)] - ngbr_target_density; + } + } } - // Store target data into source data - for (uint i=0; i < lengthOfPencil; i++){ + // Write target data into source data + + for (uint i = 0; i < lengthOfPencil; i++) { + uint i_source = i + VLASOV_STENCIL_WIDTH; + uint i_target = i + nTargetNeighborsPerPencil; - uint i_cell = i + nSourceNeighborsPerCell; - uint i_target = i + nTargetNeighborsPerCell; - - values[i_cell] = targetValues[i_target]; - + for (uint k = 0; k < WID; ++k) { + for (uint planeVector = 0; planeVector < VEC_PER_PLANE; planeVector++) { + values[i_trans_ps_blockv_pencil(planeVector, k, i, lengthOfPencil)] = targetValues[i_trans_pt_blockv(planeVector, k, i)]; + } + } } } @@ -1152,6 +1268,8 @@ void get_seed_ids(const dccrg::Dccrg& mpi //cout << "localpropagatedcells.size() " << localPropagatedCells.size() << endl; //cout << "dimension " << dimension << endl; + + const bool debug = false; //#pragma omp parallel for for(auto celli: localPropagatedCells) { @@ -1222,13 +1340,104 @@ void get_seed_ids(const dccrg::Dccrg& mpi } } - cout << "Number of seed ids is " << seedIds.size() << endl; - cout << "Seed ids are: "; - for (const auto seedId : seedIds) { - cout << seedId << " "; + if(debug) { + cout << "Number of seed ids is " << seedIds.size() << endl; + cout << "Seed ids are: "; + for (const auto seedId : seedIds) { + cout << seedId << " "; + } + cout << endl; + } +} + + + + +/* Copy the data to the temporary values array, so that the + * dimensions are correctly swapped. Also, copy the same block for + * then neighboring spatial cells (in the dimension). neighbors + * generated with compute_spatial_neighbors_wboundcond). + * + * This function must be thread-safe. + * + * @param source_neighbors Array containing the VLASOV_STENCIL_WIDTH closest + * spatial neighbors of this cell in the propagated dimension. + * @param blockGID Global ID of the velocity block. + * @param int lengthOfPencil Number of spatial cells in pencil + * @param values Vector where loaded data is stored. + * @param cellid_transpose + * @param popID ID of the particle species. + */ +void copy_trans_block_data_amr( + SpatialCell** source_neighbors, + const vmesh::GlobalID blockGID, + int lengthOfPencil, + Vec* values, + const unsigned char* const cellid_transpose, + const uint popID) { + + // Allocate data for all blocks in pencil. Pad on both ends by VLASOV_STENCIL_WIDTH + Realf* blockDatas[lengthOfPencil + VLASOV_STENCIL_WIDTH * 2]; + + for (int b = -VLASOV_STENCIL_WIDTH; b < lengthOfPencil + VLASOV_STENCIL_WIDTH; ++b) { + // Get cell pointer and local block id + SpatialCell* srcCell = source_neighbors[b + VLASOV_STENCIL_WIDTH]; + const vmesh::LocalID blockLID = srcCell->get_velocity_block_local_id(blockGID,popID); + if (blockLID != srcCell->invalid_local_id()) { + // Get data pointer + blockDatas[b + VLASOV_STENCIL_WIDTH] = srcCell->get_data(blockLID,popID); + // //prefetch storage pointers to L1 + // _mm_prefetch((char *)(blockDatas[b + VLASOV_STENCIL_WIDTH]), _MM_HINT_T0); + // _mm_prefetch((char *)(blockDatas[b + VLASOV_STENCIL_WIDTH]) + 64, _MM_HINT_T0); + // _mm_prefetch((char *)(blockDatas[b + VLASOV_STENCIL_WIDTH]) + 128, _MM_HINT_T0); + // _mm_prefetch((char *)(blockDatas[b + VLASOV_STENCIL_WIDTH]) + 192, _MM_HINT_T0); + // if(VPREC == 8) { + // //prefetch storage pointers to L1 + // _mm_prefetch((char *)(blockDatas[b + VLASOV_STENCIL_WIDTH]) + 256, _MM_HINT_T0); + // _mm_prefetch((char *)(blockDatas[b + VLASOV_STENCIL_WIDTH]) + 320, _MM_HINT_T0); + // _mm_prefetch((char *)(blockDatas[b + VLASOV_STENCIL_WIDTH]) + 384, _MM_HINT_T0); + // _mm_prefetch((char *)(blockDatas[b + VLASOV_STENCIL_WIDTH]) + 448, _MM_HINT_T0); + // } + + } else { + blockDatas[b + VLASOV_STENCIL_WIDTH] = NULL; + } } - cout << endl; + // Copy volume averages of this block from all spatial cells: + for (int b = -VLASOV_STENCIL_WIDTH; b < lengthOfPencil + VLASOV_STENCIL_WIDTH; ++b) { + if(blockDatas[b + VLASOV_STENCIL_WIDTH] != NULL) { + Realv blockValues[WID3]; + const Realf* block_data = blockDatas[b + VLASOV_STENCIL_WIDTH]; + // Copy data to a temporary array and transpose values so that mapping is along k direction. + // spatial source_neighbors already taken care of when + // creating source_neighbors table. If a normal spatial cell does not + // simply have the block, its value will be its null_block which + // is fine. This null_block has a value of zero in data, and that + // is thus the velocity space boundary + for (uint i=0; i& // Vector with all cell ids vector allCells(localPropagatedCells); - allCells.insert(allCells.end(), remoteTargetCells.begin(), remoteTargetCells.end()); - - const uint nSourceNeighborsPerCell = 1 + 2 * VLASOV_STENCIL_WIDTH; + allCells.insert(allCells.end(), remoteTargetCells.begin(), remoteTargetCells.end()); // Vectors of pointers to the cell structs std::vector allCellsPointer(allCells.size()); - std::vector sourceNeighbors(localPropagatedCells.size() * nSourceNeighborsPerCell); - std::vector targetNeighbors(3 * localPropagatedCells.size() ); - + Vec allCellsDz[allCells.size()]; // Initialize allCellsPointer @@ -1343,20 +1548,21 @@ bool trans_map_1d_amr(const dccrg::Dccrg& // Construct pencils from the seedIds into a set of pencils. pencils = buildPencilsWithNeighbors(mpiGrid, pencils, seedId, ids, dimension, path); } - uint ibeg = 0; - uint iend = 0; - std::cout << "I have created " << pencils.N << " pencils along dimension " << dimension << ":\n"; - std::cout << "(x, y): indices " << std::endl; - std::cout << "-----------------------------------------------------------------" << std::endl; - for (uint i = 0; i < pencils.N; i++) { - iend += pencils.lengthOfPencils[i]; - std::cout << "(" << pencils.x[i] << ", " << pencils.y[i] << "): "; - for (auto j = pencils.ids.begin() + ibeg; j != pencils.ids.begin() + iend; ++j) { - std::cout << *j << " "; - } - ibeg = iend; - std::cout << std::endl; - } + + // uint ibeg = 0; + // uint iend = 0; + // std::cout << "I have created " << pencils.N << " pencils along dimension " << dimension << ":\n"; + // std::cout << "(x, y): indices " << std::endl; + // std::cout << "-----------------------------------------------------------------" << std::endl; + // for (uint i = 0; i < pencils.N; i++) { + // iend += pencils.lengthOfPencils[i]; + // std::cout << "(" << pencils.x[i] << ", " << pencils.y[i] << "): "; + // for (auto j = pencils.ids.begin() + ibeg; j != pencils.ids.begin() + iend; ++j) { + // std::cout << *j << " "; + // } + // ibeg = iend; + // std::cout << std::endl; + // } // Add the final set of pencils to the pencilSets - vector. @@ -1398,7 +1604,7 @@ bool trans_map_1d_amr(const dccrg::Dccrg& int t1 = phiprof::initializeTimer("mappingAndStore"); //#pragma omp parallel - { + { //std::vector targetsValid(localPropagatedCells.size()); //std::vector allCellsBlockLocalID(allCells.size()); @@ -1411,6 +1617,22 @@ bool trans_map_1d_amr(const dccrg::Dccrg& // Get global id of the velocity block vmesh::GlobalID blockGID = unionOfBlocks[blocki]; + // bool debugflag = false; + // uint debugcell = 0; + // const vmesh::LocalID debugLID = allCellsPointer[debugcell]->get_velocity_block_local_id(blockGID, popID); + // Realf* data = allCellsPointer[debugcell]->get_data(debugLID,popID); + // for (uint i = 0; i < WID3; i++) + // if (data[i] != 0) debugflag = true; + + // if (debugflag) { + // cout << "blockGID, blockLID " << blockGID << ", " << debugLID << endl; + // cout << "sourceData: " << endl; + // for (uint i = 0; i < WID3; i++) + // cout << ", " << data[i]; + // cout << endl; + // } + + velocity_block_indices_t block_indices; uint8_t vRefLevel; vmesh.getIndices(blockGID,vRefLevel, block_indices[0], @@ -1418,126 +1640,192 @@ bool trans_map_1d_amr(const dccrg::Dccrg& // Loop over sets of pencils // This loop only has one iteration for now - cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; + // cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; for ( auto pencils: pencilSets ) { - // Allocate targetdata sum(lengths of pencils)*WID3) - Vec targetData[pencils.sumOfLengths * WID3]; + std::vector targetBlockData((pencils.sumOfLengths + 2 * pencils.N) * WID3); + // Allocate vectorized targetvecdata sum(lengths of pencils)*WID3 / VECL) + // Add padding by 2 for each pencil + Vec targetVecData[(pencils.sumOfLengths + 2 * pencils.N) * WID3 / VECL]; - // Initialize targetdata to 0 - for( uint i = 0; i < pencils.sumOfLengths * WID3; i++ ) { - targetData[i] = 0.0; + // Initialize targetvecdata to 0 + for( uint i = 0; i < (pencils.sumOfLengths + 2 * pencils.N) * WID3 / VECL; i++ ) { + targetVecData[i] = 0.0; } // TODO: There's probably a smarter way to keep track of where we are writing - // in the target data structure. + // in the target data array. uint targetDataIndex = 0; // Compute spatial neighbors for target cells. - // For targets we only have actual cells as we do not - // want to propagate boundary cells (array may contain - // INVALID_CELLIDs at boundaries). - for ( auto celli: pencils.ids ) { - compute_spatial_target_neighbors(mpiGrid, localPropagatedCells[celli], dimension, - targetNeighbors.data() + celli * 3); - } - - cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; + // For targets we need the local cells, plus a padding of 1 cell at both ends + std::vector targetNeighbors(pencils.sumOfLengths + pencils.N * 2 ); + + compute_spatial_target_neighbors_for_pencils(mpiGrid, pencils, dimension,targetNeighbors.data()); // Loop over pencils for(uint pencili = 0; pencili < pencils.N; pencili++){ - + + int L = pencils.lengthOfPencils[pencili]; + + // Compute spatial neighbors for the source cells of the pencil. In + // source cells we have a wider stencil and take into account boundaries. + std::vector sourceNeighbors(L + 2 * VLASOV_STENCIL_WIDTH); + compute_spatial_source_neighbors_for_pencil(mpiGrid, pencils, pencili, dimension, sourceNeighbors.data()); + // Allocate source data: sourcedata pencilIds = pencils.getIds(pencili); + + // load data(=> sourcedata) / (proper xy reconstruction in future) + copy_trans_block_data_amr(sourceNeighbors.data(), blockGID, L, sourceVecData, + cellid_transpose, popID); + + // for (auto celli: pencilIds) { + // if (celli == debugcell) { + // if (debugflag) { + // cout << "sourceVecData: " << endl; + // for (uint i = 0; i < WID3 / VECL; i++) { + // for (uint j = 0; j < VECL; j++) + // cout << ", " << sourceVecData[i][j]; + // } + // cout << endl; + // } + // } + // } - cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; - for( auto celli: pencilIds) { - compute_spatial_source_neighbors(mpiGrid, localPropagatedCells[celli], - dimension, sourceNeighbors.data() - + celli * nSourceNeighborsPerCell); - } - - cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; + // cout << "sourceVecData: " << endl; + // for (uint i = 0; i < WID3; i++) + // cout << " " << sourceVecData[VLASOV_STENCIL_WIDTH + i][0]; + // cout << endl; Vec * dzPointer = allCellsDz + pencilIds[0]; - - // load data(=> sourcedata) / (proper xy reconstruction in future) - // copied from regular code, should work? - int offset = 0; // TODO: Figure out what needs to go here. - copy_trans_block_data(sourceNeighbors.data() + offset, - blockGID, sourceData, cellid_transpose, popID); - - cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; - - // Calculate cell centered velocity for each v cell in the block - const Vec k = (0,1,2,3); - const Vec cell_vz = (block_indices[dimension] * WID + k + 0.5) * dvz + vz_min; - - const Vec z_translation = dt * cell_vz; - // propagate pencil(blockid = velocities, pencil-ids = dzs ), - propagatePencil(dzPointer, sourceData, z_translation, pencils.lengthOfPencils[pencili], nSourceNeighborsPerCell); + + propagatePencil(dzPointer, sourceVecData, dimension, blockGID, dt, vmesh, L); // sourcedata => targetdata[this pencil]) - for (auto value: sourceData) { - targetData[targetDataIndex] = value; + for (int i = 0; i < L + 2; i++) { + Vec value = sourceVecData[i + VLASOV_STENCIL_WIDTH - 1]; + targetVecData[targetDataIndex] += value; targetDataIndex++; } - + + // for (auto celli: pencilIds) { + // if (celli == debugcell) { + // if (debugflag) { + // cout << "targetVecData: " << endl; + // for (uint i = 0; i < WID3 / VECL; i++) { + // for (uint j = 0; j < VECL; j++) + // cout << ", " << targetVecData[i][j]; + // } + // cout << endl; + // } + // } + // } // dealloc source data -- Should be automatic since it's declared in this iteration? - + //throw; } - + + //cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; + + // store_data(target_data => allCellsPointer) :Aggregate data for blockid to original location // Loop over pencils again for(uint pencili = 0; pencili < pencils.N; pencili++){ - // store_data(target_data =>) :Aggregate data for blockid to original location - - //store values from target_values array to the actual blocks - for(auto celli: pencils.ids) { - //TODO: Figure out validity check later - //if(targetsValid[celli]) { - for(uint ti = 0; ti < 3; ti++) { - SpatialCell* spatial_cell = targetNeighbors[celli * 3 + ti]; - if(spatial_cell ==NULL) { - //invalid target spatial cell - continue; - } - - // Get local ID of the velocity block + vector pencilIds = pencils.getIds(pencili); + uint targetLength = pencils.lengthOfPencils[pencili] + 2; + uint totalLength = 0; + + // Unpack the vector data + + // Loop over cells in pencil +- 1 padded cell + for ( uint celli = 0; celli < targetLength; ++celli ) { + Realv vector[VECL]; + // Loop over 1 vspace dimension + for (uint k = 0; k< WID; ++k) { + // Loop over 2nd vspace dimension + for(uint planeVector = 0; planeVector < VEC_PER_PLANE; planeVector++) { + targetVecData[i_trans_pt_blockv(planeVector, k, celli)].store(vector); + // Loop over 3rd (vectorized) vspace dimension + for (uint i = 0; i < VECL; i++) { + targetBlockData[(totalLength + celli) * WID3 + cellid_transpose[i + planeVector * VECL + k * WID2]] = vector[i]; + } + } + } + } + + //cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; + + // reset blocks in all non-sysboundary neighbor spatial cells for this block id and pencil id + for (auto *spatial_cell: targetNeighbors) { + // Check for system boundary + if(spatial_cell->sysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY) { + // Get local velocity block id const vmesh::LocalID blockLID = spatial_cell->get_velocity_block_local_id(blockGID, popID); - - if (blockLID == vmesh::VelocityMesh::invalidLocalID()) { - // block does not exist. If so, we do not create it and add stuff to it here. - // We have already created blocks around blocks with content in - // spatial sense, so we have no need to create even more blocks here - // TODO add loss counter - continue; + // Check for invalid id + if (blockLID != vmesh::VelocityMesh::invalidLocalID()) { + // Get a pointer to the block data + Realf* blockData = spatial_cell->get_data(blockLID, popID); + // Loop over velocity block cells + for(int i = 0; i < WID3; i++) { + blockData[i] = 0.0; + } } - // Pointer to the data field of the velocity block - Realf* blockData = spatial_cell->get_data(blockLID, popID); - // Unpack the vector data to the cell data types - for(int i = 0; i < WID3 ; i++) { + } + } + // store values from targetBlockData array to the actual blocks - // Write data into target block - blockData[i] += targetData[(celli * 3 + ti)][i]; - } + //cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; + + // Loop over cells in the pencil, including the padded cells of the target array + for ( uint icell = -1; icell < pencils.lengthOfPencils[pencili] + 1; icell++ ) { + + uint GID = (icell + 1) + pencili * pencils.lengthOfPencils[pencili]; + SpatialCell* spatial_cell = targetNeighbors[GID]; + if(spatial_cell ==NULL) { + //invalid target spatial cell + continue; + } + + const vmesh::LocalID blockLID = spatial_cell->get_velocity_block_local_id(blockGID, popID); + if (blockLID == vmesh::VelocityMesh::invalidLocalID()) { + continue; + } + + Realf* blockData = spatial_cell->get_data(blockLID, popID); + for(int i = 0; i < WID3 ; i++) { + blockData[i] += targetBlockData[GID * WID3 + i]; } - //} - } - + + totalLength += targetLength; + // dealloc target data -- Should be automatic again? + // cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; } } + + // data = allCellsPointer[debugcell]->get_data(debugLID,popID); + + // if (debugflag) { + // cout << "blockGID, blockLID " << blockGID << ", " << debugLID << endl; + // cout << "targetData: " << endl; + // for (uint i = 0; i < WID3; i++) + // cout << ", " << data[i]; + // cout << endl; + // throw; + // } + } } + //cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; + + // cerr << "At the end of trans_map_1d_amr" << endl; + // throw; + return true; } From 72f5c2af043fd7bcf2f41720707db69627c2817c Mon Sep 17 00:00:00 2001 From: tkoskela Date: Fri, 31 Aug 2018 16:58:25 +0300 Subject: [PATCH 047/602] Lots of bug fixes, mostly related to the indexing in the i_trans_pt/s_blockv macros. Propagate does not produce NaN's and the data write writes values correctly. Data read still seems buggy. Amr translation routine is called in the x-direction in vlasovmover.cpp. --- vlasovsolver/cpu_1d_ppm_nonuniform.hpp | 6 +- vlasovsolver/cpu_trans_map.cpp | 366 +++++++++++++++++-------- vlasovsolver/vlasovmover.cpp | 2 - 3 files changed, 257 insertions(+), 117 deletions(-) diff --git a/vlasovsolver/cpu_1d_ppm_nonuniform.hpp b/vlasovsolver/cpu_1d_ppm_nonuniform.hpp index ae613bcfe..8e4808399 100644 --- a/vlasovsolver/cpu_1d_ppm_nonuniform.hpp +++ b/vlasovsolver/cpu_1d_ppm_nonuniform.hpp @@ -51,10 +51,10 @@ inline void compute_ppm_coeff_nonuniform(const Vec * const dv, const Vec * const (p_face - m_face)*(p_face - m_face) * one_sixth, 3 * values[k] - 2 * p_face, m_face); - p_face = select(-(p_face - m_face) * (p_face - m_face) * one_sixth > + p_face = select(-(p_face - m_face) * (p_face - m_face) * one_sixth > (p_face - m_face) * (values[k] - 0.5 * (m_face + p_face)), - 3 * values[k] - 2 * m_face, - p_face); + 3 * values[k] - 2 * m_face, + p_face); //Fit a second order polynomial for reconstruction see, e.g., White //2008 (PQM article) (note additional integration factors built in, diff --git a/vlasovsolver/cpu_trans_map.cpp b/vlasovsolver/cpu_trans_map.cpp index 8e34b6f6b..1d166e3b6 100644 --- a/vlasovsolver/cpu_trans_map.cpp +++ b/vlasovsolver/cpu_trans_map.cpp @@ -119,7 +119,7 @@ struct setOfPencils { //#define i_trans_pt_blockv(j, k, b_k) ( ( (j) * WID + (k) * WID2 + ((b_k) + 1 ) * WID3) / VECL ) #define i_trans_pt_blockv(planeVectorIndex, planeIndex, blockIndex) ( planeVectorIndex + planeIndex * VEC_PER_PLANE + (blockIndex + 1) * VEC_PER_BLOCK) -#define i_trans_ps_blockv_pencil(planeVectorIndex, planeIndex, blockIndex, lengthOfPencil) ( (blockIndex) + VLASOV_STENCIL_WIDTH + ( (planeVectorIndex) + (planeIndex) * VEC_PER_PLANE ) * ( lengthOfPencil + 2 * VLASOV_STENCIL_WIDTH) ) +#define i_trans_ps_blockv_pencil(planeVectorIndex, planeIndex, blockIndex, lengthOfPencil) ( (blockIndex) + VLASOV_STENCIL_WIDTH + ( (planeVectorIndex) + (planeIndex) * VEC_PER_PLANE ) * ( lengthOfPencil + 2 * VLASOV_STENCIL_WIDTH) ) //Is cell translated? It is not translated if DO_NO_COMPUTE or if it is sysboundary cell and not in first sysboundarylayer bool do_translate_cell(SpatialCell* SC){ @@ -283,11 +283,11 @@ void compute_spatial_source_neighbors_for_pencil(const dccrg::Dccrg ids = pencils.getIds(iPencil); for (int iCell = -VLASOV_STENCIL_WIDTH; iCell < L + VLASOV_STENCIL_WIDTH; iCell++) { - CellID cellID = ids[iCell]; + CellID cellID = ids[min(max(iCell, 0), L - 1)]; int i = 0; - if(iCell < 0) i = iCell; - if(iCell > L) i = iCell - L; + if(iCell <= 0) i = iCell; + if(iCell >= L) i = iCell - (L - 1); switch (dimension) { case 0: @@ -357,7 +357,7 @@ void compute_spatial_target_neighbors_for_pencils(const dccrg::Dccrg ids = pencils.getIds(iPencil); for (int iCell = -1; iCell <= L; iCell++) { - CellID cellID = ids[iCell]; + CellID cellID = ids[min(max(iCell,0),L - 1)]; int i = 0; if(iCell == -1) i = -1; @@ -1162,7 +1162,7 @@ setOfPencils buildPencilsWithNeighbors( const dccrg::Dccrg &vmesh, const uint lengthOfPencil) { // Get velocity data from vmesh that we need later to calculate the translation @@ -1180,7 +1180,13 @@ void propagatePencil(Vec dz[], Vec values[], const uint dimension, const uint bl // Vector buffer where we write data, initialized to 0*/ Vec targetValues[(lengthOfPencil + 2 * nTargetNeighborsPerPencil) * WID3 / VECL]; - + + // cout << "propagatePencil" << endl; + // for (uint i = 0; i < lengthOfPencil + 2 * VLASOV_STENCIL_WIDTH; i++) { + // cout << dz[i][0] << ", "; + // } + // cout << endl; + for (auto value: targetValues) { // init target_values @@ -1192,7 +1198,6 @@ void propagatePencil(Vec dz[], Vec values[], const uint dimension, const uint bl // The source array is padded by VLASOV_STENCIL_WIDTH on both sides. uint i_source = i + VLASOV_STENCIL_WIDTH; - uint i_target = i + nTargetNeighborsPerPencil; for (uint k = 0; k < WID; ++k) { @@ -1218,14 +1223,48 @@ void propagatePencil(Vec dz[], Vec values[], const uint dimension, const uint bl // std::exit(1); // } + // cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; + for (uint planeVector = 0; planeVector < VEC_PER_PLANE; planeVector++) { // Compute polynomial coefficients Vec a[3]; - compute_ppm_coeff_nonuniform(dz + i_source, - values + i_trans_ps_blockv_pencil(planeVector, k, i, lengthOfPencil), - h4, i_source, a); + // Dz: is a padded array, pointer can point to the beginning, i + VLASOV_STENCIL_WIDTH will get the right cell. + // values: transpose function adds VLASOV_STENCIL_WIDTH to the block index, therefore we substract it here, then + // i + VLASOV_STENCIL_WIDTH will point to the right cell. Complicated! Why! Sad! MVGA! + compute_ppm_coeff_nonuniform(dz, + values + i_trans_ps_blockv_pencil(planeVector, k, i - VLASOV_STENCIL_WIDTH, lengthOfPencil), + h4, i + VLASOV_STENCIL_WIDTH, a); // TODO: may need i - VLASOV_STENCIL_WIDTH instead of i for i_trans_ps_blockv_pencil. + + // cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; + + if (horizontal_or(is_nan(z_1))) { + cerr << "nan found in z_1" << endl; + throw; + } + + if (horizontal_or(is_nan(z_2))) { + cerr << "nan found in z_2" << endl; + throw; + } + + for (uint ia = 0; ia < 3; ia++) { + if(horizontal_or(is_nan(a[ia]))) { + cerr << "Found NaN in polynomial coefficient " << ia << endl; + for (uint j = 0; j < VECL; j++) { + cout << j << " " << a[0][j] << " " << a[1][j] << " " << a[2][j] << endl; + } + for (int ii = -2; ii <= 1; ii++) { + for (uint j = 0; j < VECL; j++) { + cout << ii << " " << j << " dz, value: " << dz[ii + i_source][0] << ", "; + cout << values[ii + i_trans_ps_blockv_pencil(planeVector, k, i, lengthOfPencil)][0] << endl; + } + } + + throw; + } + } // Compute integral const Vec ngbr_target_density = @@ -1236,9 +1275,9 @@ void propagatePencil(Vec dz[], Vec values[], const uint dimension, const uint bl // in the neighbor cell we will put this density //targetValues[i_cell + target_scell_index] += ngbr_target_density * dz[i_cell] / dz[i_cell + target_scell_index]; targetValues[i_trans_pt_blockv(planeVector, k, i + 1)] += select( positiveTranslationDirection, - ngbr_target_density * dz[i_source] / dz[i_source + 1],Vec(0.0)); + ngbr_target_density * dz[i] / dz[i_source + 1],Vec(0.0)); targetValues[i_trans_pt_blockv(planeVector, k, i - 1 )] += select(!positiveTranslationDirection, - ngbr_target_density * dz[i_source] / dz[i_source - 1],Vec(0.0)); + ngbr_target_density * dz[i] / dz[i_source - 1],Vec(0.0)); // in the current original cells we will put the rest of the original density targetValues[i_trans_pt_blockv(planeVector, k, i)] += values[i_trans_ps_blockv_pencil(planeVector, k, i, lengthOfPencil)] - ngbr_target_density; @@ -1247,14 +1286,20 @@ void propagatePencil(Vec dz[], Vec values[], const uint dimension, const uint bl } // Write target data into source data + // VLASOV_STENCIL_WIDTH >= nTargetNeighborsPerPencil is required (default 2 >= 1) - for (uint i = 0; i < lengthOfPencil; i++) { - uint i_source = i + VLASOV_STENCIL_WIDTH; - uint i_target = i + nTargetNeighborsPerPencil; + for (int i = -nTargetNeighborsPerPencil; i < lengthOfPencil + nTargetNeighborsPerPencil; i++) { for (uint k = 0; k < WID; ++k) { - for (uint planeVector = 0; planeVector < VEC_PER_PLANE; planeVector++) { - values[i_trans_ps_blockv_pencil(planeVector, k, i, lengthOfPencil)] = targetValues[i_trans_pt_blockv(planeVector, k, i)]; + for (uint planeVector = 0; planeVector < VEC_PER_PLANE; planeVector++) { + + values[i_trans_ps_blockv_pencil(planeVector, k, i, lengthOfPencil)] = + targetValues[i_trans_pt_blockv(planeVector, k, i)]; + + if(horizontal_or(is_nan(values[i_trans_ps_blockv_pencil(planeVector, k, i, lengthOfPencil)]))) { + cerr << "nan detected in output" << endl; + throw; + } } } } @@ -1376,39 +1421,46 @@ void copy_trans_block_data_amr( const unsigned char* const cellid_transpose, const uint popID) { - // Allocate data for all blocks in pencil. Pad on both ends by VLASOV_STENCIL_WIDTH - Realf* blockDatas[lengthOfPencil + VLASOV_STENCIL_WIDTH * 2]; + // Allocate data pointer for all blocks in pencil. Pad on both ends by VLASOV_STENCIL_WIDTH + Realf* blockDataPointer[lengthOfPencil + 2 * VLASOV_STENCIL_WIDTH]; - for (int b = -VLASOV_STENCIL_WIDTH; b < lengthOfPencil + VLASOV_STENCIL_WIDTH; ++b) { + for (int b = -VLASOV_STENCIL_WIDTH; b < lengthOfPencil + VLASOV_STENCIL_WIDTH; b++) { // Get cell pointer and local block id SpatialCell* srcCell = source_neighbors[b + VLASOV_STENCIL_WIDTH]; + + // cout << "Spatial cell coordinates : "; + // cout << srcCell->SpatialCell::parameters[CellParams::XCRD] << ", "; + // cout << srcCell->SpatialCell::parameters[CellParams::YCRD] << ", "; + // cout << srcCell->SpatialCell::parameters[CellParams::ZCRD]; + // cout << endl; + const vmesh::LocalID blockLID = srcCell->get_velocity_block_local_id(blockGID,popID); if (blockLID != srcCell->invalid_local_id()) { // Get data pointer - blockDatas[b + VLASOV_STENCIL_WIDTH] = srcCell->get_data(blockLID,popID); + blockDataPointer[b + VLASOV_STENCIL_WIDTH] = srcCell->get_data(blockLID,popID); // //prefetch storage pointers to L1 - // _mm_prefetch((char *)(blockDatas[b + VLASOV_STENCIL_WIDTH]), _MM_HINT_T0); - // _mm_prefetch((char *)(blockDatas[b + VLASOV_STENCIL_WIDTH]) + 64, _MM_HINT_T0); - // _mm_prefetch((char *)(blockDatas[b + VLASOV_STENCIL_WIDTH]) + 128, _MM_HINT_T0); - // _mm_prefetch((char *)(blockDatas[b + VLASOV_STENCIL_WIDTH]) + 192, _MM_HINT_T0); + // _mm_prefetch((char *)(blockDataPointer[b + VLASOV_STENCIL_WIDTH]), _MM_HINT_T0); + // _mm_prefetch((char *)(blockDataPointer[b + VLASOV_STENCIL_WIDTH]) + 64, _MM_HINT_T0); + // _mm_prefetch((char *)(blockDataPointer[b + VLASOV_STENCIL_WIDTH]) + 128, _MM_HINT_T0); + // _mm_prefetch((char *)(blockDataPointer[b + VLASOV_STENCIL_WIDTH]) + 192, _MM_HINT_T0); // if(VPREC == 8) { - // //prefetch storage pointers to L1 - // _mm_prefetch((char *)(blockDatas[b + VLASOV_STENCIL_WIDTH]) + 256, _MM_HINT_T0); - // _mm_prefetch((char *)(blockDatas[b + VLASOV_STENCIL_WIDTH]) + 320, _MM_HINT_T0); - // _mm_prefetch((char *)(blockDatas[b + VLASOV_STENCIL_WIDTH]) + 384, _MM_HINT_T0); - // _mm_prefetch((char *)(blockDatas[b + VLASOV_STENCIL_WIDTH]) + 448, _MM_HINT_T0); + // //prefetch storage pointers to L1 + // _mm_prefetch((char *)(blockDataPointer[b + VLASOV_STENCIL_WIDTH]) + 256, _MM_HINT_T0); + // _mm_prefetch((char *)(blockDataPointer[b + VLASOV_STENCIL_WIDTH]) + 320, _MM_HINT_T0); + // _mm_prefetch((char *)(blockDataPointer[b + VLASOV_STENCIL_WIDTH]) + 384, _MM_HINT_T0); + // _mm_prefetch((char *)(blockDataPointer[b + VLASOV_STENCIL_WIDTH]) + 448, _MM_HINT_T0); // } } else { - blockDatas[b + VLASOV_STENCIL_WIDTH] = NULL; + blockDataPointer[b + VLASOV_STENCIL_WIDTH] = NULL; } } // Copy volume averages of this block from all spatial cells: - for (int b = -VLASOV_STENCIL_WIDTH; b < lengthOfPencil + VLASOV_STENCIL_WIDTH; ++b) { - if(blockDatas[b + VLASOV_STENCIL_WIDTH] != NULL) { + for (int b = -VLASOV_STENCIL_WIDTH; b < lengthOfPencil + VLASOV_STENCIL_WIDTH; b++) { + if(blockDataPointer[b + VLASOV_STENCIL_WIDTH] != NULL) { Realv blockValues[WID3]; - const Realf* block_data = blockDatas[b + VLASOV_STENCIL_WIDTH]; + const Realf* block_data = blockDataPointer[b + VLASOV_STENCIL_WIDTH]; // Copy data to a temporary array and transpose values so that mapping is along k direction. // spatial source_neighbors already taken care of when // creating source_neighbors table. If a normal spatial cell does not @@ -1421,19 +1473,18 @@ void copy_trans_block_data_amr( // now load values into the actual values table.. uint offset =0; - for (uint k=0; k& allCells.insert(allCells.end(), remoteTargetCells.begin(), remoteTargetCells.end()); // Vectors of pointers to the cell structs - std::vector allCellsPointer(allCells.size()); - - Vec allCellsDz[allCells.size()]; + std::vector allCellsPointer(allCells.size()); // Initialize allCellsPointer //#pragma omp parallel for @@ -1476,13 +1525,10 @@ bool trans_map_1d_amr(const dccrg::Dccrg& for(uint celli = 0; celli < allCells.size(); celli++){ //cout << allCells[celli] << " "; allCellsPointer[celli] = mpiGrid[allCells[celli]]; - - // At the same time, calculate dz's and store them in an array. - allCellsDz[celli] = P::dz_ini / pow(2.0, mpiGrid.get_refinement_level(celli)); } //cout << endl; - // Fiddle indices x,y,z + // Fiddle indices x,y,z in VELOCITY SPACE switch (dimension) { case 0: // set values in array that is used to convert block indices @@ -1617,20 +1663,41 @@ bool trans_map_1d_amr(const dccrg::Dccrg& // Get global id of the velocity block vmesh::GlobalID blockGID = unionOfBlocks[blocki]; - // bool debugflag = false; - // uint debugcell = 0; - // const vmesh::LocalID debugLID = allCellsPointer[debugcell]->get_velocity_block_local_id(blockGID, popID); - // Realf* data = allCellsPointer[debugcell]->get_data(debugLID,popID); + bool debugflag = false; + CellID debugcell; + uint allCellsPointerIndex = 0; + + const vmesh::LocalID debugLID = allCellsPointer[allCellsPointerIndex]->get_velocity_block_local_id(blockGID, popID); + Realf* data = allCellsPointer[allCellsPointerIndex]->get_data(debugLID,popID); // for (uint i = 0; i < WID3; i++) // if (data[i] != 0) debugflag = true; - // if (debugflag) { - // cout << "blockGID, blockLID " << blockGID << ", " << debugLID << endl; - // cout << "sourceData: " << endl; - // for (uint i = 0; i < WID3; i++) - // cout << ", " << data[i]; - // cout << endl; - // } + if (debugflag) { + cout << "blockGID, blockLID " << blockGID << ", " << debugLID << endl; + + cout << "cell " << allCellsPointerIndex << " coordinates: "; + cout << allCellsPointer[allCellsPointerIndex]->SpatialCell::parameters[CellParams::XCRD] << ", "; + cout << allCellsPointer[allCellsPointerIndex]->SpatialCell::parameters[CellParams::YCRD] << ", "; + cout << allCellsPointer[allCellsPointerIndex]->SpatialCell::parameters[CellParams::ZCRD]; + cout << endl; + + const creal x = allCellsPointer[allCellsPointerIndex]->SpatialCell::parameters[CellParams::XCRD]; + const creal y = allCellsPointer[allCellsPointerIndex]->SpatialCell::parameters[CellParams::YCRD]; + const creal z = allCellsPointer[allCellsPointerIndex]->SpatialCell::parameters[CellParams::ZCRD]; + const creal dx = allCellsPointer[allCellsPointerIndex]->SpatialCell::parameters[CellParams::DX]; + const creal dy = allCellsPointer[allCellsPointerIndex]->SpatialCell::parameters[CellParams::DY]; + const creal dz = allCellsPointer[allCellsPointerIndex]->SpatialCell::parameters[CellParams::DZ]; + debugcell = 1 + (int) ((x - Parameters::xmin) / dx) + + (int) ((y - Parameters::ymin) / dy) * Parameters::xcells_ini + + (int) ((z - Parameters::zmin) / dz) * Parameters::xcells_ini * Parameters::ycells_ini; + + cout << "Debug cell id is " << debugcell << endl; + + cout << "sourceData: " << endl; + for (uint i = 0; i < WID3; i++) + cout << ", " << data[i]; + cout << endl; + } velocity_block_indices_t block_indices; @@ -1650,7 +1717,7 @@ bool trans_map_1d_amr(const dccrg::Dccrg& // Initialize targetvecdata to 0 for( uint i = 0; i < (pencils.sumOfLengths + 2 * pencils.N) * WID3 / VECL; i++ ) { - targetVecData[i] = 0.0; + targetVecData[i] = Vec(0.0); } // TODO: There's probably a smarter way to keep track of where we are writing @@ -1661,50 +1728,98 @@ bool trans_map_1d_amr(const dccrg::Dccrg& // For targets we need the local cells, plus a padding of 1 cell at both ends std::vector targetNeighbors(pencils.sumOfLengths + pencils.N * 2 ); - compute_spatial_target_neighbors_for_pencils(mpiGrid, pencils, dimension,targetNeighbors.data()); + compute_spatial_target_neighbors_for_pencils(mpiGrid, pencils, dimension, targetNeighbors.data()); + + // if(debugflag) { + // uint i = 0; + // for (auto neighbor: targetNeighbors) { + // cout << "Target Neighbor " << i << " coordinates : "; + // cout << neighbor->SpatialCell::parameters[CellParams::XCRD] << ", "; + // cout << neighbor->SpatialCell::parameters[CellParams::YCRD] << ", "; + // cout << neighbor->SpatialCell::parameters[CellParams::ZCRD]; + // cout << endl; + // i++; + // } + // } // Loop over pencils for(uint pencili = 0; pencili < pencils.N; pencili++){ - + + vector pencilIds = pencils.getIds(pencili); int L = pencils.lengthOfPencils[pencili]; // Compute spatial neighbors for the source cells of the pencil. In // source cells we have a wider stencil and take into account boundaries. std::vector sourceNeighbors(L + 2 * VLASOV_STENCIL_WIDTH); compute_spatial_source_neighbors_for_pencil(mpiGrid, pencils, pencili, dimension, sourceNeighbors.data()); + + + Vec dz[sourceNeighbors.size()]; + uint i = 0; + for(neighbor: sourceNeighbors) { + switch (dimension) { + case(0): + dz[i] = neighbor->SpatialCell::parameters[CellParams::DX]; + break; + case(1): + dz[i] = neighbor->SpatialCell::parameters[CellParams::DY]; + break; + case(2): + dz[i] = neighbor->SpatialCell::parameters[CellParams::DZ]; + break; + } + i++; + + //cout << i << " " << dz[i][0] << ", " << endl; + } + // if(debugflag) { + // uint i = 0; + // for (auto neighbor: sourceNeighbors) { + // cout << "Source Neighbor " << i << " of pencil " << pencili << " coordinates : "; + // cout << neighbor->SpatialCell::parameters[CellParams::XCRD] << ", "; + // cout << neighbor->SpatialCell::parameters[CellParams::YCRD] << ", "; + // cout << neighbor->SpatialCell::parameters[CellParams::ZCRD]; + // cout << endl; + // i++; + // } + // } + // Allocate source data: sourcedata pencilIds = pencils.getIds(pencili); - // load data(=> sourcedata) / (proper xy reconstruction in future) copy_trans_block_data_amr(sourceNeighbors.data(), blockGID, L, sourceVecData, cellid_transpose, popID); - - // for (auto celli: pencilIds) { - // if (celli == debugcell) { - // if (debugflag) { - // cout << "sourceVecData: " << endl; - // for (uint i = 0; i < WID3 / VECL; i++) { - // for (uint j = 0; j < VECL; j++) - // cout << ", " << sourceVecData[i][j]; - // } - // cout << endl; - // } - // } - // } + + int n = 0; + for (auto celli: pencilIds) { + n++; + if (celli == debugcell && debugflag) { + cout << "Cell coordinates (x): " << + pencils.x[pencili] - 0.5 * allCellsPointer[allCellsPointerIndex]->SpatialCell::parameters[CellParams::DX]; + cout << " (y): " << + (n - 1) * allCellsPointer[allCellsPointerIndex]->SpatialCell::parameters[CellParams::DY] << endl; + cout << "sourceVecData: " << sizeof(sourceVecData) / sizeof(sourceVecData[0]) << " " << WID3 << " " << VECL << endl; + + for (uint i = 0; i < WID3 / VECL; i++) { + for (uint j = 0; j < VECL; j++) + cout << ", " << sourceVecData[i][j]; + } + cout << endl; + } + } // cout << "sourceVecData: " << endl; // for (uint i = 0; i < WID3; i++) // cout << " " << sourceVecData[VLASOV_STENCIL_WIDTH + i][0]; - // cout << endl; - - Vec * dzPointer = allCellsDz + pencilIds[0]; - - propagatePencil(dzPointer, sourceVecData, dimension, blockGID, dt, vmesh, L); + // cout << endl; + + // Dz and sourceVecData are both padded by VLASOV_STENCIL_WIDTH + // Dz has 1 value/cell, sourceVecData has WID3 values/cell + propagatePencil(dz, sourceVecData, dimension, blockGID, dt, vmesh, L); // sourcedata => targetdata[this pencil]) for (int i = 0; i < L + 2; i++) { @@ -1713,18 +1828,16 @@ bool trans_map_1d_amr(const dccrg::Dccrg& targetDataIndex++; } - // for (auto celli: pencilIds) { - // if (celli == debugcell) { - // if (debugflag) { - // cout << "targetVecData: " << endl; - // for (uint i = 0; i < WID3 / VECL; i++) { - // for (uint j = 0; j < VECL; j++) - // cout << ", " << targetVecData[i][j]; - // } - // cout << endl; - // } - // } - // } + for (auto celli: pencilIds) { + if (celli == debugcell && debugflag) { + cout << "targetVecData: " << sizeof(targetVecData) / sizeof(targetVecData[0]) << endl; + for (uint i = 0; i < WID3 / VECL; i++) { + for (uint j = 0; j < VECL; j++) + cout << ", " << targetVecData[i][j]; + } + cout << endl; + } + } // dealloc source data -- Should be automatic since it's declared in this iteration? //throw; } @@ -1733,29 +1846,37 @@ bool trans_map_1d_amr(const dccrg::Dccrg& // store_data(target_data => allCellsPointer) :Aggregate data for blockid to original location // Loop over pencils again + uint totalLength = 0; for(uint pencili = 0; pencili < pencils.N; pencili++){ vector pencilIds = pencils.getIds(pencili); uint targetLength = pencils.lengthOfPencils[pencili] + 2; - uint totalLength = 0; // Unpack the vector data // Loop over cells in pencil +- 1 padded cell for ( uint celli = 0; celli < targetLength; ++celli ) { Realv vector[VECL]; - // Loop over 1 vspace dimension + // Loop over 1st vspace dimension for (uint k = 0; k< WID; ++k) { // Loop over 2nd vspace dimension for(uint planeVector = 0; planeVector < VEC_PER_PLANE; planeVector++) { - targetVecData[i_trans_pt_blockv(planeVector, k, celli)].store(vector); + targetVecData[i_trans_pt_blockv(planeVector, k, celli - 1)].store(vector); // Loop over 3rd (vectorized) vspace dimension for (uint i = 0; i < VECL; i++) { - targetBlockData[(totalLength + celli) * WID3 + cellid_transpose[i + planeVector * VECL + k * WID2]] = vector[i]; + targetBlockData[(totalLength + celli) * WID3 + cellid_transpose[i + planeVector * VECL + k * WID2]] + = vector[i]; } } } - } + + // if (celli > 0 && pencilIds[celli - 1] == debugcell && debugflag) { + // cout << "targetBlockData: "; + // for (uint i = (totalLength + celli) * WID3; i < (totalLength + celli + 1) * WID3; i++) + // cout << targetBlockData[i] << ", "; + // cout << endl; + // } + } //cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; @@ -1778,27 +1899,46 @@ bool trans_map_1d_amr(const dccrg::Dccrg& } // store values from targetBlockData array to the actual blocks - //cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; + // cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; // Loop over cells in the pencil, including the padded cells of the target array - for ( uint icell = -1; icell < pencils.lengthOfPencils[pencili] + 1; icell++ ) { - - uint GID = (icell + 1) + pencili * pencils.lengthOfPencils[pencili]; + for ( uint celli = 0; celli < targetLength; celli++ ) { + + uint GID = celli + totalLength; SpatialCell* spatial_cell = targetNeighbors[GID]; - if(spatial_cell ==NULL) { + if(spatial_cell == NULL) { //invalid target spatial cell + //cerr << "invalid target spatial cell" << endl; continue; } const vmesh::LocalID blockLID = spatial_cell->get_velocity_block_local_id(blockGID, popID); if (blockLID == vmesh::VelocityMesh::invalidLocalID()) { + //cerr << "invalid local id" << endl; continue; } Realf* blockData = spatial_cell->get_data(blockLID, popID); for(int i = 0; i < WID3 ; i++) { + // if (celli > 0 && pencilIds[celli - 1] == debugcell && debugflag) { + // cout << targetBlockData[GID * WID3 + i] << ", "; + // } blockData[i] += targetBlockData[GID * WID3 + i]; } + // if (celli > 0 && pencilIds[celli - 1] == debugcell && debugflag) { + // cout << endl; + // cout << "GID: " << GID << endl; + // cout << "totalLength: " << totalLength << endl; + // cout << "celli: " << celli << endl; + // cout << "pencili: " << pencili << endl; + // cout << "Cell Id: " << pencilIds[celli - 1] << endl; + // cout << "Spatial cell coordinates : "; + // cout << spatial_cell->SpatialCell::parameters[CellParams::XCRD] << ", "; + // cout << spatial_cell->SpatialCell::parameters[CellParams::YCRD] << ", "; + // cout << spatial_cell->SpatialCell::parameters[CellParams::ZCRD]; + // cout << endl; + + // } } totalLength += targetLength; @@ -1808,16 +1948,18 @@ bool trans_map_1d_amr(const dccrg::Dccrg& } } - // data = allCellsPointer[debugcell]->get_data(debugLID,popID); + data = allCellsPointer[allCellsPointerIndex]->get_data(debugLID,popID); - // if (debugflag) { - // cout << "blockGID, blockLID " << blockGID << ", " << debugLID << endl; - // cout << "targetData: " << endl; - // for (uint i = 0; i < WID3; i++) - // cout << ", " << data[i]; - // cout << endl; - // throw; - // } + if (debugflag) { + cout << "blockGID, blockLID " << blockGID << ", " << debugLID << endl; + cout << "targetData: " << endl; + for (uint i = 0; i < WID3; i++) { + if (i != 0) cout << ", "; + cout << data[i]; + } + cout << endl; + throw; + } } } diff --git a/vlasovsolver/vlasovmover.cpp b/vlasovsolver/vlasovmover.cpp index fcc46cb68..8587e8b56 100644 --- a/vlasovsolver/vlasovmover.cpp +++ b/vlasovsolver/vlasovmover.cpp @@ -110,8 +110,6 @@ void calculateSpatialTranslation( bool foo; foo = trans_map_1d_amr(mpiGrid,local_propagated_cells, remoteTargetCellsx, 0,dt,popID); // map along x// phiprof::stop("compute-mapping-x"); - - cout << "return value of trans_map_1d_amr: " << foo << endl; trans_timer=phiprof::initializeTimer("update_remote-x","MPI"); phiprof::start("update_remote-x"); From edaac43b648f31869c19aa5a281cf4f50bc64946 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Mon, 3 Sep 2018 15:20:58 +0300 Subject: [PATCH 048/602] Fixed bugs --- .../build_pencils/grid_test_neighbors.cpp | 62 +++++++++---------- 1 file changed, 31 insertions(+), 31 deletions(-) diff --git a/mini-apps/build_pencils/grid_test_neighbors.cpp b/mini-apps/build_pencils/grid_test_neighbors.cpp index a415f41ea..382ff335e 100644 --- a/mini-apps/build_pencils/grid_test_neighbors.cpp +++ b/mini-apps/build_pencils/grid_test_neighbors.cpp @@ -34,7 +34,7 @@ struct setOfPencils { sumOfLengths = 0; } - void addPencil(std::vector idsIn, Real xIn, Real yIn, vector zIn) { + void addPencil(std::vector idsIn, Real xIn, Real yIn) { N += 1; sumOfLengths += idsIn.size(); @@ -42,14 +42,14 @@ struct setOfPencils { ids.insert(ids.end(),idsIn.begin(),idsIn.end()); x.push_back(xIn); y.push_back(yIn); - z.insert(z.end(),zIn.begin(),zIn.end()); } std::vector getIds(uint pencilId) { if (pencilId > N) { - return; + vector foo; + return foo; } CellID ibeg = 0; @@ -246,34 +246,34 @@ setOfPencils buildPencilsWithNeighbors( dccrg::Dccrg &grid, // Get the x,y - coordinates of the pencil (in the direction perpendicular to the pencil) const auto coordinates = grid.get_center(ids[0]); double x,y; - uint ix,iy,iz - switch(dimension) { - case 0: { - ix = 1; - iy = 2; - iz = 0; - break; - } - case 1: { - ix = 2; - iy = 0; - iz = 1; - break; - } - case 2: { - ix = 0; - iy = 1; - iz = 2; - break; - } - default: { - ix = 0; - iy = 1; - iz = 2; - break; - } - } - + uint ix,iy,iz; + switch(dimension) { + case 0: + ix = 1; + iy = 2; + iz = 0; + break; + + case 1: + ix = 2; + iy = 0; + iz = 1; + break; + + case 2: + ix = 0; + iy = 1; + iz = 2; + break; + + default: + ix = 0; + iy = 1; + iz = 2; + break; + + } + x = coordinates[ix]; y = coordinates[iy]; // z = vector; From 8660eba90e49b47d5eb1844463bde449af6a8a13 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Thu, 13 Sep 2018 16:34:46 +0300 Subject: [PATCH 049/602] Working version of amr - routines in cpu_trans_map.cpp. Needs cleaning up and comments. Contains a lot of debugging print statements (off by default). Reproduces correct behaviour in transtest_2_maxw_500k_100k_20kms_20x20. Not tested with refined grids yet. --- MAKE/Makefile.appa | 4 +- Makefile | 2 +- mini-apps/translation_pencils/Makefile | 16 + mini-apps/translation_pencils/trans_test.cpp | 94 ++++ vlasovsolver/cpu_trans_map.cpp | 483 ++++++++++--------- vlasovsolver/cpu_trans_map.hpp | 57 +++ vlasovsolver/vlasovmover.cpp | 2 +- 7 files changed, 429 insertions(+), 229 deletions(-) create mode 100644 mini-apps/translation_pencils/Makefile create mode 100644 mini-apps/translation_pencils/trans_test.cpp diff --git a/MAKE/Makefile.appa b/MAKE/Makefile.appa index 10d7665bf..33e90ff57 100644 --- a/MAKE/Makefile.appa +++ b/MAKE/Makefile.appa @@ -48,8 +48,8 @@ FLAGS = #GNU flags: CC_BRAND = gcc CC_BRAND_VERSION = 5.4.0 -#CXXFLAGS += -O3 -fopenmp -funroll-loops -std=c++11 -W -Wall -Wno-unused -fabi-version=0 -mavx2 -CXXFLAGS += -g -fopenmp -funroll-loops -std=c++11 -W -Wall -Wno-unused -fabi-version=0 -mavx2 +CXXFLAGS += -O3 -fopenmp -funroll-loops -std=c++11 -W -Wall -Wno-unused -fabi-version=0 -mavx2 +#CXXFLAGS += -g -fopenmp -funroll-loops -std=c++11 -W -Wall -Wno-unused -fabi-version=0 -mavx2 testpackage: CXXFLAGS = -O2 -fopenmp -funroll-loops -std=c++0x -fabi-version=0 -mavx MATHFLAGS = -ffast-math diff --git a/Makefile b/Makefile index 0a2ce10b8..101010f98 100644 --- a/Makefile +++ b/Makefile @@ -3,7 +3,7 @@ ARCH = ${VLASIATOR_ARCH} #set FP precision to SP (single) or DP (double) FP_PRECISION = DP #Set floating point precision for distribution function to SPF (single) or DPF (double) -DISTRIBUTION_FP_PRECISION = SPF +DISTRIBUTION_FP_PRECISION = DPF #override flags if we are building testpackage: ifneq (,$(findstring testpackage,$(MAKECMDGOALS))) diff --git a/mini-apps/translation_pencils/Makefile b/mini-apps/translation_pencils/Makefile new file mode 100644 index 000000000..5666aada6 --- /dev/null +++ b/mini-apps/translation_pencils/Makefile @@ -0,0 +1,16 @@ +ARCH=$(VLASIATOR_ARCH) +include ../../MAKE/Makefile.${ARCH} + +FLAGS = -W -Wall -Wextra -pedantic -std=c++11 -O3 -D${VECTORCLASS} +INCLUDES = ${INC_DCCRG} ${INC_VECTORCLASS} ${INC_PROFILE} ${INC_FSGRID} -L$/usr/lib/x86_64-linux-gnu -lboost_program_options -I$/usr/include/boost -L/home/tkoskela/lib/zoltan/Zoltan_v3.83/build/lib -lzoltan -I/home/tkoskela/lib/zoltan/Zoltan_v3.83/build/include + +default: trans_test + +clean: + rm -rf *.o trans_test + +trans_test.o: trans_test.cpp + ${CMP} ${FLAGS} ${INCLUDES} -c $^ + +grid_test: trans_test.o + $(CMP) ${FLAGS} $^ ${INCLUDES} -o $@ diff --git a/mini-apps/translation_pencils/trans_test.cpp b/mini-apps/translation_pencils/trans_test.cpp new file mode 100644 index 000000000..8f566cbf6 --- /dev/null +++ b/mini-apps/translation_pencils/trans_test.cpp @@ -0,0 +1,94 @@ +#include +#include +#include +//#include "dccrg.hpp" +#include "../../grid.h" +#include "mpi.h" +#include "../../definitions.h" +#include "../../parameters.h" +#include "../../vlasovsolver/cpu_trans_map.hpp" + +using namespace std; + +// struct grid_data { + +// int value = 0; + +// std::tuple get_mpi_datatype() +// { +// return std::make_tuple(this, 0, MPI_BYTE); +// } + +// }; + +int main(int argc, char* argv[]) { + + if (MPI_Init(&argc, &argv) != MPI_SUCCESS) { + // cerr << "Coudln't initialize MPI." << endl; + abort(); + } + + MPI_Comm comm = MPI_COMM_WORLD; + + int rank = 0, comm_size = 0; + MPI_Comm_rank(comm, &rank); + MPI_Comm_size(comm, &comm_size); + + const dccrg::Dccrg grid; + + const uint dimension = 0; + const uint xDim = 9; + const uint yDim = 9; + const uint zDim = 1; + const std::array grid_size = {{xDim,yDim,zDim}}; + + int argn; + char **argc; + + initializeGrid(argn,argc,grid,sysBoundaries,project); + //grid.initialize(grid_size, comm, "RANDOM", 1); + + grid.balance_load(); + + bool doRefine = false; + const std::array refinementIds = {{1,2,3,4}}; + if(doRefine) { + for(uint i = 0; i < refinementIds.size(); i++) { + if(refinementIds[i] > 0) { + grid.refine_completely(refinementIds[i]); + grid.stop_refining(); + } + } + } + + grid.balance_load(); + + setOfPencils pencils; + vector seedIds; + vector localPropagatedCells; + vector ids; + vector path; + + for (CellID i = 0; i < xDim * yDim * zDim; i++) localPropagatedCells.push_back( i + 1 ); + get_seed_ids(grid, localPropagatedCells, dimension, seedIds); + for (const auto seedId : seedIds) { + // Construct pencils from the seedIds into a set of pencils. + pencils = buildPencilsWithNeighbors(grid, pencils, seedId, ids, dimension, path); + } + + uint ibeg = 0; + uint iend = 0; + std::cout << "I have created " << pencils.N << " pencils along dimension " << dimension << ":\n"; + std::cout << "(x, y): indices " << std::endl; + std::cout << "-----------------------------------------------------------------" << std::endl; + for (uint i = 0; i < pencils.N; i++) { + iend += pencils.lengthOfPencils[i]; + std::cout << "(" << pencils.x[i] << ", " << pencils.y[i] << "): "; + for (auto j = pencils.ids.begin() + ibeg; j != pencils.ids.begin() + iend; ++j) { + std::cout << *j << " "; + } + ibeg = iend; + std::cout << std::endl; + } + +} diff --git a/vlasovsolver/cpu_trans_map.cpp b/vlasovsolver/cpu_trans_map.cpp index 1d166e3b6..3e45302b8 100644 --- a/vlasovsolver/cpu_trans_map.cpp +++ b/vlasovsolver/cpu_trans_map.cpp @@ -56,53 +56,6 @@ void store_trans_block_data(SpatialCell** target_neighbors,const vmesh::GlobalID Vec* __restrict__ target_values, const unsigned char* const cellid_transpose,const uint popID); -struct setOfPencils { - - uint N; // Number of pencils in the set - uint sumOfLengths; - std::vector lengthOfPencils; // Lengths of pencils - std::vector ids; // List of cells - std::vector x,y; // x,y - position - - setOfPencils() { - N = 0; - sumOfLengths = 0; - } - - void addPencil(std::vector idsIn, Real xIn, Real yIn) { - - N += 1; - sumOfLengths += idsIn.size(); - lengthOfPencils.push_back(idsIn.size()); - ids.insert(ids.end(),idsIn.begin(),idsIn.end()); - x.push_back(xIn); - y.push_back(yIn); - - } - - std::vector getIds(const uint pencilId) { - - vector idsOut; - - if (pencilId > N) { - return idsOut; - } - - CellID ibeg = 0; - for (uint i = 0; i < pencilId; i++) { - ibeg += lengthOfPencils[i]; - } - CellID iend = ibeg + lengthOfPencils[pencilId]; - - for (uint i = ibeg; i <= iend; i++) { - idsOut.push_back(ids[i]); - } - - return idsOut; - } - -}; - // indices in padded source block, which is of type Vec with VECL // element sin each vector. b_k is the block index in z direction in @@ -272,16 +225,48 @@ void compute_spatial_source_neighbors(const dccrg::Dccrg& mpiGrid, - setOfPencils pencils, - const uint iPencil, - const uint dimension, - SpatialCell **neighbors){ +void compute_spatial_source_cells_for_pencil(const dccrg::Dccrg& mpiGrid, + setOfPencils pencils, + const uint iPencil, + const uint dimension, + SpatialCell **sourceCells){ // L = length of the pencil iPencil int L = pencils.lengthOfPencils[iPencil]; - vector ids = pencils.getIds(iPencil); + + // // First loop over ids, get pointers to cells in the pencil. Then loop from 0 to VLASOV_STENCIL_WIDTH on + // // Both ends, compare results to cells already in pencil and if they are not equal, add to both sides of array + // // If equal pointers are found (can compare with ==), add null pointer to array + // vector ids = pencils.getIds(iPencil); + // for (int iCell = 0; iCell < L; iCell++) { + // sourceCells[iCell + VLASOV_STENCIL_WIDTH] = get_spatial_neighbor_pointer(mpiGrid, ids[iCell], false, 0, 0, 0); + // } + + // for (int iCell = -VLASOV_STENCIL_WIDTH; iCell < 0; iCell++) { + // switch (dimension) { + // case 0: + // auto cellPointer = get_spatial_neighbor_pointer(mpiGrid, ids[iCell], false, iCell, 0, 0); + // break; + // case 1: + // auto cellPointer = get_spatial_neighbor_pointer(mpiGrid, ids[iCell], false, 0, iCell, 0); + // break; + // case 2: + // auto cellPointer = get_spatial_neighbor_pointer(mpiGrid, ids[iCell], false, 0, 0, iCell); + // break; + // } + + // bool cellAlreadyInPencil = false; + // for (auto cellInPencil: sourceCells) { + // if(cellPointer == cellInPencil) cellAlreadyInPencil = true; + // } + // if(cellAlreadyInPencil) { + // sourceCells[iCell] = NULL; + // } else { + // sourceCells[iCell] = cellPointer; + // } + // } + for (int iCell = -VLASOV_STENCIL_WIDTH; iCell < L + VLASOV_STENCIL_WIDTH; iCell++) { CellID cellID = ids[min(max(iCell, 0), L - 1)]; @@ -291,13 +276,13 @@ void compute_spatial_source_neighbors_for_pencil(const dccrg::Dccrg=-VLASOV_STENCIL_WIDTH;i--){ - if(neighbors[i + VLASOV_STENCIL_WIDTH] == NULL) - neighbors[i + VLASOV_STENCIL_WIDTH] = last_good_cell; + if(sourceCells[i + VLASOV_STENCIL_WIDTH] == NULL) + sourceCells[i + VLASOV_STENCIL_WIDTH] = last_good_cell; else - last_good_cell = neighbors[i + VLASOV_STENCIL_WIDTH]; + last_good_cell = sourceCells[i + VLASOV_STENCIL_WIDTH]; } last_good_cell = mpiGrid[ids.back()]; /*loop to positive side and replace all invalid cells with the closest good cell*/ for(int i = L + 1; i <= L + VLASOV_STENCIL_WIDTH; i++){ - if(neighbors[i + VLASOV_STENCIL_WIDTH] == NULL) - neighbors[i + VLASOV_STENCIL_WIDTH] = last_good_cell; + if(sourceCells[i + VLASOV_STENCIL_WIDTH] == NULL) + sourceCells[i + VLASOV_STENCIL_WIDTH] = last_good_cell; else - last_good_cell = neighbors[i + VLASOV_STENCIL_WIDTH]; + last_good_cell = sourceCells[i + VLASOV_STENCIL_WIDTH]; } } @@ -345,10 +330,10 @@ void compute_spatial_target_neighbors(const dccrg::Dccrg& mpiGrid, - setOfPencils& pencils, - const uint dimension, - SpatialCell **neighbors){ +void compute_spatial_target_cells_for_pencils(const dccrg::Dccrg& mpiGrid, + setOfPencils& pencils, + const uint dimension, + SpatialCell **targetCells){ uint GID = 0; for(uint iPencil = 0; iPencil < pencils.N; iPencil++){ @@ -365,13 +350,13 @@ void compute_spatial_target_neighbors_for_pencils(const dccrg::Dccrg& mpi z_1 = 1.0 - z_translation; z_2 = 1.0; } + for (uint planeVector = 0; planeVector < VEC_PER_PLANE; planeVector++) { //compute reconstruction #ifdef TRANS_SEMILAG_PLM @@ -1025,9 +1011,13 @@ setOfPencils buildPencilsWithNeighbors( const dccrg::Dccrg 0) { + periodic = false; + // Find the refinement level in the neighboring cell. Any neighbor will do // since refinement level can only increase by 1 between neighbors. nextNeighbor = selectNeighbor(grid,id,dimension); @@ -1102,13 +1092,13 @@ setOfPencils buildPencilsWithNeighbors( const dccrg::Dccrg &vmesh, const uint lengthOfPencil) { + const vmesh::VelocityMesh &vmesh, const uint lengthOfPencil, bool debugflag) { // Get velocity data from vmesh that we need later to calculate the translation velocity_block_indices_t block_indices; @@ -1187,10 +1177,10 @@ void propagatePencil(Vec* dz, Vec* values, const uint dimension, const uint bloc // } // cout << endl; - for (auto value: targetValues) { + for (uint i = 0; i < (lengthOfPencil + 2 * nTargetNeighborsPerPencil) * WID3 / VECL; i++) { // init target_values - value = 0.0; + targetValues[i] = Vec(0.0); } // Go from 0 to length here to propagate all the cells in the pencil @@ -1214,14 +1204,14 @@ void propagatePencil(Vec* dz, Vec* values, const uint dimension, const uint bloc // Normalize the coordinates to the origin cell. Then we scale with the difference // in volume between target and origin later when adding the integrated value. Vec z_1,z_2; - z_1 = select(positiveTranslationDirection, 1.0 - z_translation / dz[i_source], 0.0); - z_2 = select(positiveTranslationDirection, 1.0, - z_translation / dz[i_source]); + z_1 = select(positiveTranslationDirection, 1.0 - z_translation, 0.0); + z_2 = select(positiveTranslationDirection, 1.0, - z_translation); - // if( horizontal_or(abs(z_1) > Vec(1.0)) || horizontal_or(abs(z_2) > Vec(1.0)) ) { - // std::cout << "Error, CFL condition violated\n"; - // std::cout << "Exiting\n"; - // std::exit(1); - // } + if( horizontal_or(abs(z_1) > Vec(1.0)) || horizontal_or(abs(z_2) > Vec(1.0)) ) { + std::cout << "Error, CFL condition violated\n"; + std::cout << "Exiting\n"; + std::exit(1); + } // cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; @@ -1232,71 +1222,68 @@ void propagatePencil(Vec* dz, Vec* values, const uint dimension, const uint bloc // Dz: is a padded array, pointer can point to the beginning, i + VLASOV_STENCIL_WIDTH will get the right cell. // values: transpose function adds VLASOV_STENCIL_WIDTH to the block index, therefore we substract it here, then // i + VLASOV_STENCIL_WIDTH will point to the right cell. Complicated! Why! Sad! MVGA! - compute_ppm_coeff_nonuniform(dz, - values + i_trans_ps_blockv_pencil(planeVector, k, i - VLASOV_STENCIL_WIDTH, lengthOfPencil), - h4, i + VLASOV_STENCIL_WIDTH, a); - // TODO: may need i - VLASOV_STENCIL_WIDTH instead of i for i_trans_ps_blockv_pencil. + if(debugflag) cout << endl; + if(debugflag) cout << planeVector << " " << k << " " << i << ", " << i_trans_ps_blockv_pencil(planeVector, k, i - VLASOV_STENCIL_WIDTH, lengthOfPencil) << ", " << z_1[0] << " " << z_2[0] << ", " << a[0][0] << " " << a[1][0] << " " << a[2][0] << endl; + compute_ppm_coeff_nonuniform(dz, + values + i_trans_ps_blockv_pencil(planeVector, k, i-VLASOV_STENCIL_WIDTH, lengthOfPencil), + h4, VLASOV_STENCIL_WIDTH, a); // cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; - - if (horizontal_or(is_nan(z_1))) { - cerr << "nan found in z_1" << endl; - throw; - } - - if (horizontal_or(is_nan(z_2))) { - cerr << "nan found in z_2" << endl; - throw; - } - - for (uint ia = 0; ia < 3; ia++) { - if(horizontal_or(is_nan(a[ia]))) { - cerr << "Found NaN in polynomial coefficient " << ia << endl; - for (uint j = 0; j < VECL; j++) { - cout << j << " " << a[0][j] << " " << a[1][j] << " " << a[2][j] << endl; - } - for (int ii = -2; ii <= 1; ii++) { - for (uint j = 0; j < VECL; j++) { - cout << ii << " " << j << " dz, value: " << dz[ii + i_source][0] << ", "; - cout << values[ii + i_trans_ps_blockv_pencil(planeVector, k, i, lengthOfPencil)][0] << endl; - } - } - - throw; - } - } // Compute integral const Vec ngbr_target_density = z_2 * ( a[0] + z_2 * ( a[1] + z_2 * a[2] ) ) - z_1 * ( a[0] + z_1 * ( a[1] + z_1 * a[2] ) ); + + + //if (debugflag) cout << planeVector << " " << k << " " << i << " " << ngbr_target_density[0] << endl; + if (debugflag) cout << "ngbr_target_density: " << ngbr_target_density[0] << endl; + + if(debugflag) cout << "targetValues before: "; + if(debugflag) cout << targetValues[i_trans_pt_blockv(planeVector, k, i - 1)][0] << " "; + if(debugflag) cout << targetValues[i_trans_pt_blockv(planeVector, k, i)][0] << " "; + if(debugflag) cout << targetValues[i_trans_pt_blockv(planeVector, k, i + 1)][0] << endl; // Store mapped density in two target cells // in the neighbor cell we will put this density //targetValues[i_cell + target_scell_index] += ngbr_target_density * dz[i_cell] / dz[i_cell + target_scell_index]; targetValues[i_trans_pt_blockv(planeVector, k, i + 1)] += select( positiveTranslationDirection, - ngbr_target_density * dz[i] / dz[i_source + 1],Vec(0.0)); + ngbr_target_density * dz[i_source] / dz[i_source + 1],Vec(0.0)); targetValues[i_trans_pt_blockv(planeVector, k, i - 1 )] += select(!positiveTranslationDirection, - ngbr_target_density * dz[i] / dz[i_source - 1],Vec(0.0)); + ngbr_target_density * dz[i_source] / dz[i_source - 1],Vec(0.0)); // in the current original cells we will put the rest of the original density - targetValues[i_trans_pt_blockv(planeVector, k, i)] += values[i_trans_ps_blockv_pencil(planeVector, k, i, lengthOfPencil)] - ngbr_target_density; + targetValues[i_trans_pt_blockv(planeVector, k, i)] += + values[i_trans_ps_blockv_pencil(planeVector, k, i, lengthOfPencil)] - ngbr_target_density; + + if(debugflag) cout << "targetValues after: "; + if(debugflag) cout << targetValues[i_trans_pt_blockv(planeVector, k, i - 1)][0] << " "; + if(debugflag) cout << targetValues[i_trans_pt_blockv(planeVector, k, i)][0] << " "; + if(debugflag) cout << targetValues[i_trans_pt_blockv(planeVector, k, i + 1)][0] << endl;; } } } // Write target data into source data // VLASOV_STENCIL_WIDTH >= nTargetNeighborsPerPencil is required (default 2 >= 1) + + //cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; - for (int i = -nTargetNeighborsPerPencil; i < lengthOfPencil + nTargetNeighborsPerPencil; i++) { + for (int i = 0; i < lengthOfPencil + 2 * nTargetNeighborsPerPencil; i++) { + //cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; + for (uint k = 0; k < WID; ++k) { + //cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; + for (uint planeVector = 0; planeVector < VEC_PER_PLANE; planeVector++) { + + //cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; + + values[i_trans_ps_blockv_pencil(planeVector, k, i - 1, lengthOfPencil)] = + targetValues[i_trans_pt_blockv(planeVector, k, i - 1)]; - values[i_trans_ps_blockv_pencil(planeVector, k, i, lengthOfPencil)] = - targetValues[i_trans_pt_blockv(planeVector, k, i)]; - - if(horizontal_or(is_nan(values[i_trans_ps_blockv_pencil(planeVector, k, i, lengthOfPencil)]))) { + if(horizontal_or(is_nan(values[i_trans_ps_blockv_pencil(planeVector, k, i - 1, lengthOfPencil)]))) { cerr << "nan detected in output" << endl; throw; } @@ -1653,6 +1640,12 @@ bool trans_map_1d_amr(const dccrg::Dccrg& { //std::vector targetsValid(localPropagatedCells.size()); //std::vector allCellsBlockLocalID(allCells.size()); + + // cout << unionOfBlocks.size() << " Block global ids are: "; + // for(uint blocki = 0; blocki < unionOfBlocks.size(); blocki++){ + // cout << unionOfBlocks[blocki] << " "; + // } + // cout << endl; //#pragma omp for schedule(guided) // Loop over velocity space blocks. Thread this loop (over vspace blocks) with OpenMP. @@ -1665,12 +1658,11 @@ bool trans_map_1d_amr(const dccrg::Dccrg& bool debugflag = false; CellID debugcell; - uint allCellsPointerIndex = 0; + uint allCellsPointerIndex = 16; const vmesh::LocalID debugLID = allCellsPointer[allCellsPointerIndex]->get_velocity_block_local_id(blockGID, popID); Realf* data = allCellsPointer[allCellsPointerIndex]->get_data(debugLID,popID); - // for (uint i = 0; i < WID3; i++) - // if (data[i] != 0) debugflag = true; + //for (uint i = 0; i < WID3; i++) if (data[i] != 0) debugflag = true; if (debugflag) { cout << "blockGID, blockLID " << blockGID << ", " << debugLID << endl; @@ -1726,13 +1718,13 @@ bool trans_map_1d_amr(const dccrg::Dccrg& // Compute spatial neighbors for target cells. // For targets we need the local cells, plus a padding of 1 cell at both ends - std::vector targetNeighbors(pencils.sumOfLengths + pencils.N * 2 ); + std::vector targetCells(pencils.sumOfLengths + pencils.N * 2 ); - compute_spatial_target_neighbors_for_pencils(mpiGrid, pencils, dimension, targetNeighbors.data()); + compute_spatial_target_cells_for_pencils(mpiGrid, pencils, dimension, targetCells.data()); // if(debugflag) { // uint i = 0; - // for (auto neighbor: targetNeighbors) { + // for (auto neighbor: targetCells) { // cout << "Target Neighbor " << i << " coordinates : "; // cout << neighbor->SpatialCell::parameters[CellParams::XCRD] << ", "; // cout << neighbor->SpatialCell::parameters[CellParams::YCRD] << ", "; @@ -1742,21 +1734,24 @@ bool trans_map_1d_amr(const dccrg::Dccrg& // } // } - // Loop over pencils + // Loop over pencils + uint totalTargetLength = 0; for(uint pencili = 0; pencili < pencils.N; pencili++){ vector pencilIds = pencils.getIds(pencili); int L = pencils.lengthOfPencils[pencili]; + uint targetLength = L + 2; + uint sourceLength = L + 2 * VLASOV_STENCIL_WIDTH; // Compute spatial neighbors for the source cells of the pencil. In // source cells we have a wider stencil and take into account boundaries. - std::vector sourceNeighbors(L + 2 * VLASOV_STENCIL_WIDTH); - compute_spatial_source_neighbors_for_pencil(mpiGrid, pencils, pencili, dimension, sourceNeighbors.data()); + std::vector sourceCells(sourceLength); + compute_spatial_source_cells_for_pencil(mpiGrid, pencils, pencili, dimension, sourceCells.data()); - Vec dz[sourceNeighbors.size()]; + Vec dz[sourceCells.size()]; uint i = 0; - for(neighbor: sourceNeighbors) { + for(auto neighbor: sourceCells) { switch (dimension) { case(0): dz[i] = neighbor->SpatialCell::parameters[CellParams::DX]; @@ -1775,7 +1770,7 @@ bool trans_map_1d_amr(const dccrg::Dccrg& // if(debugflag) { // uint i = 0; - // for (auto neighbor: sourceNeighbors) { + // for (auto neighbor: sourceCells) { // cout << "Source Neighbor " << i << " of pencil " << pencili << " coordinates : "; // cout << neighbor->SpatialCell::parameters[CellParams::XCRD] << ", "; // cout << neighbor->SpatialCell::parameters[CellParams::YCRD] << ", "; @@ -1787,161 +1782,192 @@ bool trans_map_1d_amr(const dccrg::Dccrg& // Allocate source data: sourcedata sourcedata) / (proper xy reconstruction in future) - copy_trans_block_data_amr(sourceNeighbors.data(), blockGID, L, sourceVecData, + copy_trans_block_data_amr(sourceCells.data(), blockGID, L, sourceVecData, cellid_transpose, popID); - int n = 0; - for (auto celli: pencilIds) { - n++; - if (celli == debugcell && debugflag) { - cout << "Cell coordinates (x): " << - pencils.x[pencili] - 0.5 * allCellsPointer[allCellsPointerIndex]->SpatialCell::parameters[CellParams::DX]; + for (int celli = 0; celli < L; celli++) { + if (pencilIds[celli] == debugcell && debugflag) { + cout << "Cell " << pencilIds[celli] << " coordinates (x): " << + celli * allCellsPointer[allCellsPointerIndex]->SpatialCell::parameters[CellParams::DY]; cout << " (y): " << - (n - 1) * allCellsPointer[allCellsPointerIndex]->SpatialCell::parameters[CellParams::DY] << endl; + pencils.x[pencili] - 0.5 * allCellsPointer[allCellsPointerIndex]->SpatialCell::parameters[CellParams::DX] << endl; cout << "sourceVecData: " << sizeof(sourceVecData) / sizeof(sourceVecData[0]) << " " << WID3 << " " << VECL << endl; - for (uint i = 0; i < WID3 / VECL; i++) { - for (uint j = 0; j < VECL; j++) - cout << ", " << sourceVecData[i][j]; + //for (uint i = 0; i < WID3 / VECL; i++) { + for (uint k=0; k targetdata[this pencil]) - for (int i = 0; i < L + 2; i++) { - Vec value = sourceVecData[i + VLASOV_STENCIL_WIDTH - 1]; - targetVecData[targetDataIndex] += value; - targetDataIndex++; + //cout << "writing into index "; + for (int i = 0; i < targetLength; i++) { + for (uint k=0; ksysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY) { + // Get local velocity block id + const vmesh::LocalID blockLID = spatial_cell->get_velocity_block_local_id(blockGID, popID); + // Check for invalid id + if (blockLID != vmesh::VelocityMesh::invalidLocalID()) { + // Get a pointer to the block data + Realf* blockData = spatial_cell->get_data(blockLID, popID); + // Loop over velocity block cells + for(int i = 0; i < WID3; i++) { + blockData[i] = 0.0; + } + } + } + } - // store_data(target_data => allCellsPointer) :Aggregate data for blockid to original location + // store_data(target_data => targetCells) :Aggregate data for blockid to original location // Loop over pencils again - uint totalLength = 0; + totalTargetLength = 0; for(uint pencili = 0; pencili < pencils.N; pencili++){ + if(debugflag) { + cout << "pencil.periodic: " << pencils.periodic[pencili] << endl; + } + + int L = pencils.lengthOfPencils[pencili]; + int targetLength = L + 2; vector pencilIds = pencils.getIds(pencili); - uint targetLength = pencils.lengthOfPencils[pencili] + 2; + + bool debugPencilFlag = false; // Unpack the vector data // Loop over cells in pencil +- 1 padded cell for ( uint celli = 0; celli < targetLength; ++celli ) { + + // // If the pencil is periodic, we do not write the ghost cells because + // // They are copies of cells that are already in the pencil + // // - It seems that doing this was wrong. Investigate! + // if(pencils.periodic[pencili] && (celli == 0 || celli == targetLength - 1)) + // continue; + + if(celli > 0 && pencilIds[celli - 1] == debugcell) debugPencilFlag = true; + Realv vector[VECL]; // Loop over 1st vspace dimension - for (uint k = 0; k< WID; ++k) { + for (uint k = 0; k < WID; ++k) { // Loop over 2nd vspace dimension for(uint planeVector = 0; planeVector < VEC_PER_PLANE; planeVector++) { - targetVecData[i_trans_pt_blockv(planeVector, k, celli - 1)].store(vector); + targetVecData[i_trans_pt_blockv(planeVector, k, totalTargetLength + celli - 1)].store(vector); // Loop over 3rd (vectorized) vspace dimension for (uint i = 0; i < VECL; i++) { - targetBlockData[(totalLength + celli) * WID3 + cellid_transpose[i + planeVector * VECL + k * WID2]] + // if (celli > 0 && pencilIds[celli - 1] == debugcell && debugflag) { + // cout << static_cast(cellid_transpose[i + planeVector * VECL + k * WID2]) << " " << + // (totalTargetLength + celli) * WID3 + cellid_transpose[i + planeVector * VECL + k * WID2] << endl; + // } + targetBlockData[(totalTargetLength + celli) * WID3 + + cellid_transpose[i + planeVector * VECL + k * WID2]] = vector[i]; - } - } + //if(vector[i] == 0) cerr << "targetblockdata is 0!" << endl; + } + } } // if (celli > 0 && pencilIds[celli - 1] == debugcell && debugflag) { - // cout << "targetBlockData: "; - // for (uint i = (totalLength + celli) * WID3; i < (totalLength + celli + 1) * WID3; i++) + // cout << "targetBlockData: " << targetBlockData.size() << endl; + // for (uint i = (totalTargetLength + celli) * WID3; i < (totalTargetLength + celli + 1) * WID3; i++) // cout << targetBlockData[i] << ", "; // cout << endl; // } - } - - //cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; - - // reset blocks in all non-sysboundary neighbor spatial cells for this block id and pencil id - for (auto *spatial_cell: targetNeighbors) { - // Check for system boundary - if(spatial_cell->sysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY) { - // Get local velocity block id - const vmesh::LocalID blockLID = spatial_cell->get_velocity_block_local_id(blockGID, popID); - // Check for invalid id - if (blockLID != vmesh::VelocityMesh::invalidLocalID()) { - // Get a pointer to the block data - Realf* blockData = spatial_cell->get_data(blockLID, popID); - // Loop over velocity block cells - for(int i = 0; i < WID3; i++) { - blockData[i] = 0.0; - } - } - } } + + // cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; + // store values from targetBlockData array to the actual blocks // cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; // Loop over cells in the pencil, including the padded cells of the target array for ( uint celli = 0; celli < targetLength; celli++ ) { + + // if(pencils.periodic[pencili] && (celli == 0 || celli == targetLength - 1)) + // continue; + + uint GID = celli + totalTargetLength; + SpatialCell* spatial_cell = targetCells[GID]; - uint GID = celli + totalLength; - SpatialCell* spatial_cell = targetNeighbors[GID]; if(spatial_cell == NULL) { //invalid target spatial cell - //cerr << "invalid target spatial cell" << endl; + cerr << "invalid target spatial cell" << endl; continue; } + if (celli > 0 && pencilIds[celli - 1] == debugcell && debugflag) { + cout << "Debug cell " << pencilIds[celli - 1] << " coordinates (in write): "; + cout << spatial_cell->SpatialCell::parameters[CellParams::XCRD] << ", "; + cout << spatial_cell->SpatialCell::parameters[CellParams::YCRD] << ", "; + cout << spatial_cell->SpatialCell::parameters[CellParams::ZCRD]; + cout << endl; + } + + const vmesh::LocalID blockLID = spatial_cell->get_velocity_block_local_id(blockGID, popID); if (blockLID == vmesh::VelocityMesh::invalidLocalID()) { - //cerr << "invalid local id" << endl; + //cout << "invalid local id " << blockLID << " with Global ID " << blockGID << " and cell id " << pencilIds[celli] << endl; continue; } Realf* blockData = spatial_cell->get_data(blockLID, popID); for(int i = 0; i < WID3 ; i++) { - // if (celli > 0 && pencilIds[celli - 1] == debugcell && debugflag) { - // cout << targetBlockData[GID * WID3 + i] << ", "; - // } blockData[i] += targetBlockData[GID * WID3 + i]; } - // if (celli > 0 && pencilIds[celli - 1] == debugcell && debugflag) { - // cout << endl; - // cout << "GID: " << GID << endl; - // cout << "totalLength: " << totalLength << endl; - // cout << "celli: " << celli << endl; - // cout << "pencili: " << pencili << endl; - // cout << "Cell Id: " << pencilIds[celli - 1] << endl; - // cout << "Spatial cell coordinates : "; - // cout << spatial_cell->SpatialCell::parameters[CellParams::XCRD] << ", "; - // cout << spatial_cell->SpatialCell::parameters[CellParams::YCRD] << ", "; - // cout << spatial_cell->SpatialCell::parameters[CellParams::ZCRD]; - // cout << endl; - - // } } - totalLength += targetLength; + if(debugflag && debugPencilFlag) cout << "TotalTargetLength = " << totalTargetLength << endl; + totalTargetLength += targetLength; // dealloc target data -- Should be automatic again? // cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; @@ -1951,7 +1977,14 @@ bool trans_map_1d_amr(const dccrg::Dccrg& data = allCellsPointer[allCellsPointerIndex]->get_data(debugLID,popID); if (debugflag) { + cout << endl; cout << "blockGID, blockLID " << blockGID << ", " << debugLID << endl; + cout << "Debug cell coordinates (at end): "; + cout << allCellsPointer[allCellsPointerIndex]->SpatialCell::parameters[CellParams::XCRD] << ", "; + cout << allCellsPointer[allCellsPointerIndex]->SpatialCell::parameters[CellParams::YCRD] << ", "; + cout << allCellsPointer[allCellsPointerIndex]->SpatialCell::parameters[CellParams::ZCRD]; + cout << endl; + cout << "targetData: " << endl; for (uint i = 0; i < WID3; i++) { if (i != 0) cout << ", "; diff --git a/vlasovsolver/cpu_trans_map.hpp b/vlasovsolver/cpu_trans_map.hpp index 6101ad0cf..287ba48ec 100644 --- a/vlasovsolver/cpu_trans_map.hpp +++ b/vlasovsolver/cpu_trans_map.hpp @@ -27,6 +27,55 @@ #include "vec.h" #include "../common.h" #include "../spatial_cell.hpp" + +struct setOfPencils { + + uint N; // Number of pencils in the set + uint sumOfLengths; + std::vector lengthOfPencils; // Lengths of pencils + std::vector ids; // List of cells + std::vector x,y; // x,y - position + std::vector periodic; + + setOfPencils() { + N = 0; + sumOfLengths = 0; + } + + void addPencil(std::vector idsIn, Real xIn, Real yIn, bool periodicIn) { + + N += 1; + sumOfLengths += idsIn.size(); + lengthOfPencils.push_back(idsIn.size()); + ids.insert(ids.end(),idsIn.begin(),idsIn.end()); + x.push_back(xIn); + y.push_back(yIn); + periodic.push_back(periodicIn); + } + + std::vector getIds(const uint pencilId) { + + std::vector idsOut; + + if (pencilId > N) { + return idsOut; + } + + CellID ibeg = 0; + for (uint i = 0; i < pencilId; i++) { + ibeg += lengthOfPencils[i]; + } + CellID iend = ibeg + lengthOfPencils[pencilId]; + + for (uint i = ibeg; i <= iend; i++) { + idsOut.push_back(ids[i]); + } + + return idsOut; + } + +}; + bool do_translate_cell(spatial_cell::SpatialCell* SC); bool trans_map_1d(const dccrg::Dccrg& mpiGrid, const std::vector& localPropagatedCells, @@ -64,6 +113,14 @@ bool trans_map_1d_amr(const dccrg::Dccrg &grid, + setOfPencils &pencils, CellID startingId, + std::vector ids, uint dimension, + std::vector path); +void get_seed_ids(const dccrg::Dccrg& mpiGrid, + const std::vector &localPropagatedCells, + const uint dimension, + std::vector &seedIds); #endif diff --git a/vlasovsolver/vlasovmover.cpp b/vlasovsolver/vlasovmover.cpp index 8587e8b56..09269886a 100644 --- a/vlasovsolver/vlasovmover.cpp +++ b/vlasovsolver/vlasovmover.cpp @@ -127,7 +127,7 @@ void calculateSpatialTranslation( phiprof::stop(trans_timer); phiprof::start("compute-mapping-y"); - trans_map_1d(mpiGrid,local_propagated_cells, remoteTargetCellsy, 1,dt,popID); // map along y// + trans_map_1d_amr(mpiGrid,local_propagated_cells, remoteTargetCellsy, 1,dt,popID); // map along y// phiprof::stop("compute-mapping-y"); trans_timer=phiprof::initializeTimer("update_remote-y","MPI"); From b2c38b872b3b93b394b9dfe615871c1449bb3268 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Thu, 13 Sep 2018 16:47:21 +0300 Subject: [PATCH 050/602] Cleaned up version with debug prints removed. --- vlasovsolver/cpu_trans_map.cpp | 302 +++------------------------------ 1 file changed, 28 insertions(+), 274 deletions(-) diff --git a/vlasovsolver/cpu_trans_map.cpp b/vlasovsolver/cpu_trans_map.cpp index 3e45302b8..571b770ce 100644 --- a/vlasovsolver/cpu_trans_map.cpp +++ b/vlasovsolver/cpu_trans_map.cpp @@ -234,39 +234,7 @@ void compute_spatial_source_cells_for_pencil(const dccrg::Dccrg ids = pencils.getIds(iPencil); - - // // First loop over ids, get pointers to cells in the pencil. Then loop from 0 to VLASOV_STENCIL_WIDTH on - // // Both ends, compare results to cells already in pencil and if they are not equal, add to both sides of array - // // If equal pointers are found (can compare with ==), add null pointer to array - // vector ids = pencils.getIds(iPencil); - // for (int iCell = 0; iCell < L; iCell++) { - // sourceCells[iCell + VLASOV_STENCIL_WIDTH] = get_spatial_neighbor_pointer(mpiGrid, ids[iCell], false, 0, 0, 0); - // } - - // for (int iCell = -VLASOV_STENCIL_WIDTH; iCell < 0; iCell++) { - // switch (dimension) { - // case 0: - // auto cellPointer = get_spatial_neighbor_pointer(mpiGrid, ids[iCell], false, iCell, 0, 0); - // break; - // case 1: - // auto cellPointer = get_spatial_neighbor_pointer(mpiGrid, ids[iCell], false, 0, iCell, 0); - // break; - // case 2: - // auto cellPointer = get_spatial_neighbor_pointer(mpiGrid, ids[iCell], false, 0, 0, iCell); - // break; - // } - - // bool cellAlreadyInPencil = false; - // for (auto cellInPencil: sourceCells) { - // if(cellPointer == cellInPencil) cellAlreadyInPencil = true; - // } - // if(cellAlreadyInPencil) { - // sourceCells[iCell] = NULL; - // } else { - // sourceCells[iCell] = cellPointer; - // } - // } - + for (int iCell = -VLASOV_STENCIL_WIDTH; iCell < L + VLASOV_STENCIL_WIDTH; iCell++) { CellID cellID = ids[min(max(iCell, 0), L - 1)]; @@ -1165,17 +1133,9 @@ void propagatePencil(Vec* dz, Vec* values, const uint dimension, const uint bloc // Assuming 1 neighbor in the target array because of the CFL condition // In fact propagating to > 1 neighbor will give an error const uint nTargetNeighborsPerPencil = 1; - - //Veci target_scell_index = truncate_to_int(select(z_translation > Vec(0.0), 1, -1)); // Vector buffer where we write data, initialized to 0*/ Vec targetValues[(lengthOfPencil + 2 * nTargetNeighborsPerPencil) * WID3 / VECL]; - - // cout << "propagatePencil" << endl; - // for (uint i = 0; i < lengthOfPencil + 2 * VLASOV_STENCIL_WIDTH; i++) { - // cout << dz[i][0] << ", "; - // } - // cout << endl; for (uint i = 0; i < (lengthOfPencil + 2 * nTargetNeighborsPerPencil) * WID3 / VECL; i++) { @@ -1222,31 +1182,17 @@ void propagatePencil(Vec* dz, Vec* values, const uint dimension, const uint bloc // Dz: is a padded array, pointer can point to the beginning, i + VLASOV_STENCIL_WIDTH will get the right cell. // values: transpose function adds VLASOV_STENCIL_WIDTH to the block index, therefore we substract it here, then // i + VLASOV_STENCIL_WIDTH will point to the right cell. Complicated! Why! Sad! MVGA! - if(debugflag) cout << endl; - if(debugflag) cout << planeVector << " " << k << " " << i << ", " << i_trans_ps_blockv_pencil(planeVector, k, i - VLASOV_STENCIL_WIDTH, lengthOfPencil) << ", " << z_1[0] << " " << z_2[0] << ", " << a[0][0] << " " << a[1][0] << " " << a[2][0] << endl; - compute_ppm_coeff_nonuniform(dz, values + i_trans_ps_blockv_pencil(planeVector, k, i-VLASOV_STENCIL_WIDTH, lengthOfPencil), h4, VLASOV_STENCIL_WIDTH, a); - // cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; // Compute integral const Vec ngbr_target_density = z_2 * ( a[0] + z_2 * ( a[1] + z_2 * a[2] ) ) - z_1 * ( a[0] + z_1 * ( a[1] + z_1 * a[2] ) ); - - - //if (debugflag) cout << planeVector << " " << k << " " << i << " " << ngbr_target_density[0] << endl; - if (debugflag) cout << "ngbr_target_density: " << ngbr_target_density[0] << endl; - - if(debugflag) cout << "targetValues before: "; - if(debugflag) cout << targetValues[i_trans_pt_blockv(planeVector, k, i - 1)][0] << " "; - if(debugflag) cout << targetValues[i_trans_pt_blockv(planeVector, k, i)][0] << " "; - if(debugflag) cout << targetValues[i_trans_pt_blockv(planeVector, k, i + 1)][0] << endl; - + // Store mapped density in two target cells // in the neighbor cell we will put this density - //targetValues[i_cell + target_scell_index] += ngbr_target_density * dz[i_cell] / dz[i_cell + target_scell_index]; targetValues[i_trans_pt_blockv(planeVector, k, i + 1)] += select( positiveTranslationDirection, ngbr_target_density * dz[i_source] / dz[i_source + 1],Vec(0.0)); targetValues[i_trans_pt_blockv(planeVector, k, i - 1 )] += select(!positiveTranslationDirection, @@ -1255,11 +1201,6 @@ void propagatePencil(Vec* dz, Vec* values, const uint dimension, const uint bloc // in the current original cells we will put the rest of the original density targetValues[i_trans_pt_blockv(planeVector, k, i)] += values[i_trans_ps_blockv_pencil(planeVector, k, i, lengthOfPencil)] - ngbr_target_density; - - if(debugflag) cout << "targetValues after: "; - if(debugflag) cout << targetValues[i_trans_pt_blockv(planeVector, k, i - 1)][0] << " "; - if(debugflag) cout << targetValues[i_trans_pt_blockv(planeVector, k, i)][0] << " "; - if(debugflag) cout << targetValues[i_trans_pt_blockv(planeVector, k, i + 1)][0] << endl;; } } } @@ -1267,19 +1208,12 @@ void propagatePencil(Vec* dz, Vec* values, const uint dimension, const uint bloc // Write target data into source data // VLASOV_STENCIL_WIDTH >= nTargetNeighborsPerPencil is required (default 2 >= 1) - //cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; - for (int i = 0; i < lengthOfPencil + 2 * nTargetNeighborsPerPencil; i++) { - //cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; - for (uint k = 0; k < WID; ++k) { - //cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; for (uint planeVector = 0; planeVector < VEC_PER_PLANE; planeVector++) { - //cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; - values[i_trans_ps_blockv_pencil(planeVector, k, i - 1, lengthOfPencil)] = targetValues[i_trans_pt_blockv(planeVector, k, i - 1)]; @@ -1298,9 +1232,6 @@ void get_seed_ids(const dccrg::Dccrg& mpi const uint dimension, vector &seedIds) { - //cout << "localpropagatedcells.size() " << localPropagatedCells.size() << endl; - //cout << "dimension " << dimension << endl; - const bool debug = false; //#pragma omp parallel for @@ -1414,12 +1345,6 @@ void copy_trans_block_data_amr( for (int b = -VLASOV_STENCIL_WIDTH; b < lengthOfPencil + VLASOV_STENCIL_WIDTH; b++) { // Get cell pointer and local block id SpatialCell* srcCell = source_neighbors[b + VLASOV_STENCIL_WIDTH]; - - // cout << "Spatial cell coordinates : "; - // cout << srcCell->SpatialCell::parameters[CellParams::XCRD] << ", "; - // cout << srcCell->SpatialCell::parameters[CellParams::YCRD] << ", "; - // cout << srcCell->SpatialCell::parameters[CellParams::ZCRD]; - // cout << endl; const vmesh::LocalID blockLID = srcCell->get_velocity_block_local_id(blockGID,popID); if (blockLID != srcCell->invalid_local_id()) { @@ -1490,8 +1415,6 @@ bool trans_map_1d_amr(const dccrg::Dccrg& uint cell_indices_to_id[3]; /*< used when computing id of target cell in block*/ unsigned char cellid_transpose[WID3]; /*< defines the transpose for the solver internal (transposed) id: i + j*WID + k*WID2 to actual one*/ const uint blocks_per_dim = 1; - - // cout << "entering trans_map_1d_amr" << endl; // return if there's no cells to propagate if(localPropagatedCells.size() == 0) { @@ -1508,12 +1431,9 @@ bool trans_map_1d_amr(const dccrg::Dccrg& // Initialize allCellsPointer //#pragma omp parallel for - //cout << "list of cell ids: "; for(uint celli = 0; celli < allCells.size(); celli++){ - //cout << allCells[celli] << " "; allCellsPointer[celli] = mpiGrid[allCells[celli]]; } - //cout << endl; // Fiddle indices x,y,z in VELOCITY SPACE switch (dimension) { @@ -1556,11 +1476,7 @@ bool trans_map_1d_amr(const dccrg::Dccrg& } } } - // **************************************************************************** - - //cout << "end of index fiddle" << endl; - - // **************************************************************************** + // **************************************************************************** // compute pencils => set of pencils (shared datastructure) @@ -1581,21 +1497,24 @@ bool trans_map_1d_amr(const dccrg::Dccrg& // Construct pencils from the seedIds into a set of pencils. pencils = buildPencilsWithNeighbors(mpiGrid, pencils, seedId, ids, dimension, path); } - - // uint ibeg = 0; - // uint iend = 0; - // std::cout << "I have created " << pencils.N << " pencils along dimension " << dimension << ":\n"; - // std::cout << "(x, y): indices " << std::endl; - // std::cout << "-----------------------------------------------------------------" << std::endl; - // for (uint i = 0; i < pencils.N; i++) { - // iend += pencils.lengthOfPencils[i]; - // std::cout << "(" << pencils.x[i] << ", " << pencils.y[i] << "): "; - // for (auto j = pencils.ids.begin() + ibeg; j != pencils.ids.begin() + iend; ++j) { - // std::cout << *j << " "; - // } - // ibeg = iend; - // std::cout << std::endl; - // } + + // Print out ids of pencils (if needed for debugging) + if (false) { + uint ibeg = 0; + uint iend = 0; + std::cout << "I have created " << pencils.N << " pencils along dimension " << dimension << ":\n"; + std::cout << "(x, y): indices " << std::endl; + std::cout << "-----------------------------------------------------------------" << std::endl; + for (uint i = 0; i < pencils.N; i++) { + iend += pencils.lengthOfPencils[i]; + std::cout << "(" << pencils.x[i] << ", " << pencils.y[i] << "): "; + for (auto j = pencils.ids.begin() + ibeg; j != pencils.ids.begin() + iend; ++j) { + std::cout << *j << " "; + } + ibeg = iend; + std::cout << std::endl; + } + } // Add the final set of pencils to the pencilSets - vector. @@ -1633,20 +1552,10 @@ bool trans_map_1d_amr(const dccrg::Dccrg& } // **************************************************************************** - //cout << "Beginning of parallel region" << endl; int t1 = phiprof::initializeTimer("mappingAndStore"); //#pragma omp parallel - { - //std::vector targetsValid(localPropagatedCells.size()); - //std::vector allCellsBlockLocalID(allCells.size()); - - // cout << unionOfBlocks.size() << " Block global ids are: "; - // for(uint blocki = 0; blocki < unionOfBlocks.size(); blocki++){ - // cout << unionOfBlocks[blocki] << " "; - // } - // cout << endl; - + { //#pragma omp for schedule(guided) // Loop over velocity space blocks. Thread this loop (over vspace blocks) with OpenMP. for(uint blocki = 0; blocki < unionOfBlocks.size(); blocki++){ @@ -1663,35 +1572,7 @@ bool trans_map_1d_amr(const dccrg::Dccrg& const vmesh::LocalID debugLID = allCellsPointer[allCellsPointerIndex]->get_velocity_block_local_id(blockGID, popID); Realf* data = allCellsPointer[allCellsPointerIndex]->get_data(debugLID,popID); //for (uint i = 0; i < WID3; i++) if (data[i] != 0) debugflag = true; - - if (debugflag) { - cout << "blockGID, blockLID " << blockGID << ", " << debugLID << endl; - - cout << "cell " << allCellsPointerIndex << " coordinates: "; - cout << allCellsPointer[allCellsPointerIndex]->SpatialCell::parameters[CellParams::XCRD] << ", "; - cout << allCellsPointer[allCellsPointerIndex]->SpatialCell::parameters[CellParams::YCRD] << ", "; - cout << allCellsPointer[allCellsPointerIndex]->SpatialCell::parameters[CellParams::ZCRD]; - cout << endl; - - const creal x = allCellsPointer[allCellsPointerIndex]->SpatialCell::parameters[CellParams::XCRD]; - const creal y = allCellsPointer[allCellsPointerIndex]->SpatialCell::parameters[CellParams::YCRD]; - const creal z = allCellsPointer[allCellsPointerIndex]->SpatialCell::parameters[CellParams::ZCRD]; - const creal dx = allCellsPointer[allCellsPointerIndex]->SpatialCell::parameters[CellParams::DX]; - const creal dy = allCellsPointer[allCellsPointerIndex]->SpatialCell::parameters[CellParams::DY]; - const creal dz = allCellsPointer[allCellsPointerIndex]->SpatialCell::parameters[CellParams::DZ]; - debugcell = 1 + (int) ((x - Parameters::xmin) / dx) + - (int) ((y - Parameters::ymin) / dy) * Parameters::xcells_ini + - (int) ((z - Parameters::zmin) / dz) * Parameters::xcells_ini * Parameters::ycells_ini; - - cout << "Debug cell id is " << debugcell << endl; - - cout << "sourceData: " << endl; - for (uint i = 0; i < WID3; i++) - cout << ", " << data[i]; - cout << endl; - } - - + velocity_block_indices_t block_indices; uint8_t vRefLevel; vmesh.getIndices(blockGID,vRefLevel, block_indices[0], @@ -1699,7 +1580,6 @@ bool trans_map_1d_amr(const dccrg::Dccrg& // Loop over sets of pencils // This loop only has one iteration for now - // cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; for ( auto pencils: pencilSets ) { std::vector targetBlockData((pencils.sumOfLengths + 2 * pencils.N) * WID3); @@ -1721,18 +1601,6 @@ bool trans_map_1d_amr(const dccrg::Dccrg& std::vector targetCells(pencils.sumOfLengths + pencils.N * 2 ); compute_spatial_target_cells_for_pencils(mpiGrid, pencils, dimension, targetCells.data()); - - // if(debugflag) { - // uint i = 0; - // for (auto neighbor: targetCells) { - // cout << "Target Neighbor " << i << " coordinates : "; - // cout << neighbor->SpatialCell::parameters[CellParams::XCRD] << ", "; - // cout << neighbor->SpatialCell::parameters[CellParams::YCRD] << ", "; - // cout << neighbor->SpatialCell::parameters[CellParams::ZCRD]; - // cout << endl; - // i++; - // } - // } // Loop over pencils uint totalTargetLength = 0; @@ -1764,56 +1632,21 @@ bool trans_map_1d_amr(const dccrg::Dccrg& break; } i++; - - //cout << i << " " << dz[i][0] << ", " << endl; } - // if(debugflag) { - // uint i = 0; - // for (auto neighbor: sourceCells) { - // cout << "Source Neighbor " << i << " of pencil " << pencili << " coordinates : "; - // cout << neighbor->SpatialCell::parameters[CellParams::XCRD] << ", "; - // cout << neighbor->SpatialCell::parameters[CellParams::YCRD] << ", "; - // cout << neighbor->SpatialCell::parameters[CellParams::ZCRD]; - // cout << endl; - // i++; - // } - // } - // Allocate source data: sourcedata sourcedata) / (proper xy reconstruction in future) copy_trans_block_data_amr(sourceCells.data(), blockGID, L, sourceVecData, - cellid_transpose, popID); - - for (int celli = 0; celli < L; celli++) { - if (pencilIds[celli] == debugcell && debugflag) { - cout << "Cell " << pencilIds[celli] << " coordinates (x): " << - celli * allCellsPointer[allCellsPointerIndex]->SpatialCell::parameters[CellParams::DY]; - cout << " (y): " << - pencils.x[pencili] - 0.5 * allCellsPointer[allCellsPointerIndex]->SpatialCell::parameters[CellParams::DX] << endl; - cout << "sourceVecData: " << sizeof(sourceVecData) / sizeof(sourceVecData[0]) << " " << WID3 << " " << VECL << endl; - - //for (uint i = 0; i < WID3 / VECL; i++) { - for (uint k=0; k targetdata[this pencil]) - //cout << "writing into index "; for (int i = 0; i < targetLength; i++) { for (uint k=0; k& } } } - //cout << endl; - - - for (int celli = 0; celli < L; celli++) { - if (pencilIds[celli] == debugcell && debugflag) { - cout << "debug cell id is " << pencilIds[celli] << endl; - cout << "targetVecData: " << sizeof(targetVecData) / sizeof(targetVecData[0]) << endl; - for (uint k=0; k& targetVecData[i_trans_pt_blockv(planeVector, k, totalTargetLength + celli - 1)].store(vector); // Loop over 3rd (vectorized) vspace dimension for (uint i = 0; i < VECL; i++) { - // if (celli > 0 && pencilIds[celli - 1] == debugcell && debugflag) { - // cout << static_cast(cellid_transpose[i + planeVector * VECL + k * WID2]) << " " << - // (totalTargetLength + celli) * WID3 + cellid_transpose[i + planeVector * VECL + k * WID2] << endl; - // } targetBlockData[(totalTargetLength + celli) * WID3 + cellid_transpose[i + planeVector * VECL + k * WID2]] = vector[i]; - //if(vector[i] == 0) cerr << "targetblockdata is 0!" << endl; } } } - - // if (celli > 0 && pencilIds[celli - 1] == debugcell && debugflag) { - // cout << "targetBlockData: " << targetBlockData.size() << endl; - // for (uint i = (totalTargetLength + celli) * WID3; i < (totalTargetLength + celli + 1) * WID3; i++) - // cout << targetBlockData[i] << ", "; - // cout << endl; - // } } - // cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; - // store values from targetBlockData array to the actual blocks - // cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; - // Loop over cells in the pencil, including the padded cells of the target array for ( uint celli = 0; celli < targetLength; celli++ ) { - - // if(pencils.periodic[pencili] && (celli == 0 || celli == targetLength - 1)) - // continue; uint GID = celli + totalTargetLength; SpatialCell* spatial_cell = targetCells[GID]; if(spatial_cell == NULL) { - //invalid target spatial cell - cerr << "invalid target spatial cell" << endl; + // Invalid target spatial cell continue; } - - if (celli > 0 && pencilIds[celli - 1] == debugcell && debugflag) { - cout << "Debug cell " << pencilIds[celli - 1] << " coordinates (in write): "; - cout << spatial_cell->SpatialCell::parameters[CellParams::XCRD] << ", "; - cout << spatial_cell->SpatialCell::parameters[CellParams::YCRD] << ", "; - cout << spatial_cell->SpatialCell::parameters[CellParams::ZCRD]; - cout << endl; - } - const vmesh::LocalID blockLID = spatial_cell->get_velocity_block_local_id(blockGID, popID); if (blockLID == vmesh::VelocityMesh::invalidLocalID()) { - //cout << "invalid local id " << blockLID << " with Global ID " << blockGID << " and cell id " << pencilIds[celli] << endl; + // Invalid local id. continue; } @@ -1970,37 +1750,11 @@ bool trans_map_1d_amr(const dccrg::Dccrg& totalTargetLength += targetLength; // dealloc target data -- Should be automatic again? - // cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; } } - data = allCellsPointer[allCellsPointerIndex]->get_data(debugLID,popID); - - if (debugflag) { - cout << endl; - cout << "blockGID, blockLID " << blockGID << ", " << debugLID << endl; - cout << "Debug cell coordinates (at end): "; - cout << allCellsPointer[allCellsPointerIndex]->SpatialCell::parameters[CellParams::XCRD] << ", "; - cout << allCellsPointer[allCellsPointerIndex]->SpatialCell::parameters[CellParams::YCRD] << ", "; - cout << allCellsPointer[allCellsPointerIndex]->SpatialCell::parameters[CellParams::ZCRD]; - cout << endl; - - cout << "targetData: " << endl; - for (uint i = 0; i < WID3; i++) { - if (i != 0) cout << ", "; - cout << data[i]; - } - cout << endl; - throw; - } - + data = allCellsPointer[allCellsPointerIndex]->get_data(debugLID,popID); } - } - - //cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; - - // cerr << "At the end of trans_map_1d_amr" << endl; - // throw; - + } return true; } From 7d3525f23c9a34b6581ad942d2eaab69b22c72dd Mon Sep 17 00:00:00 2001 From: tkoskela Date: Fri, 14 Sep 2018 12:11:58 +0300 Subject: [PATCH 051/602] Added missing } --- vlasovsolver/cpu_trans_map.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/vlasovsolver/cpu_trans_map.cpp b/vlasovsolver/cpu_trans_map.cpp index 571b770ce..48c2fef0a 100644 --- a/vlasovsolver/cpu_trans_map.cpp +++ b/vlasovsolver/cpu_trans_map.cpp @@ -1657,6 +1657,7 @@ bool trans_map_1d_amr(const dccrg::Dccrg& } } } + } // reset blocks in all non-sysboundary neighbor spatial cells for this block id // At this point the data is saved in targetVecData so we can reset the spatial cells From 8ca60a653334af0320d96ef4b8e009f5da490526 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Fri, 14 Sep 2018 12:58:58 +0300 Subject: [PATCH 052/602] Bug fix to cleaned-up version. --- ...transtest_2_maxw_500k_100k_20kms_20x20.cfg | 10 ++-- vlasovsolver/cpu_trans_map.cpp | 47 ++++--------------- 2 files changed, 15 insertions(+), 42 deletions(-) diff --git a/testpackage/tests/transtest_2_maxw_500k_100k_20kms_20x20/transtest_2_maxw_500k_100k_20kms_20x20.cfg b/testpackage/tests/transtest_2_maxw_500k_100k_20kms_20x20/transtest_2_maxw_500k_100k_20kms_20x20.cfg index 914069d41..8bd8cb398 100644 --- a/testpackage/tests/transtest_2_maxw_500k_100k_20kms_20x20/transtest_2_maxw_500k_100k_20kms_20x20.cfg +++ b/testpackage/tests/transtest_2_maxw_500k_100k_20kms_20x20/transtest_2_maxw_500k_100k_20kms_20x20.cfg @@ -12,9 +12,9 @@ charge = 1 [io] diagnostic_write_interval = 1 -write_initial_state = 0 +write_initial_state = 1 -system_write_t_interval = 9.4 +system_write_t_interval = 0.25 system_write_file_name = fullf system_write_distribution_stride = 1 system_write_distribution_xline_stride = 0 @@ -32,7 +32,7 @@ y_min = 0.0 y_max = 1.0e6 z_min = 0 z_max = 50000.0 -timestep_max = 200 +timestep_max = 100 [proton_vspace] vx_min = -2.0e6 @@ -88,6 +88,6 @@ Vz = 0.0 Tx = 500000.0 Ty = 500000.0 Tz = 500000.0 -rho = 1000000.0 -rhoPertAbsAmp = 10000 +rho = 0.0 +rhoPertAbsAmp = 1.0e6 diff --git a/vlasovsolver/cpu_trans_map.cpp b/vlasovsolver/cpu_trans_map.cpp index 48c2fef0a..c4b83e5c7 100644 --- a/vlasovsolver/cpu_trans_map.cpp +++ b/vlasovsolver/cpu_trans_map.cpp @@ -560,20 +560,6 @@ bool trans_map_1d(const dccrg::Dccrg& mpi for(uint blocki = 0; blocki < unionOfBlocks.size(); blocki++){ vmesh::GlobalID blockGID = unionOfBlocks[blocki]; phiprof::start(t1); - - // bool exitflag = false; - // cout << "sourceData: " << endl; - // const vmesh::LocalID blockLID = allCellsPointer[0]->get_velocity_block_local_id(blockGID, popID); - // Realf* data = allCellsPointer[0]->get_data(blockLID,popID); - // for (uint i = 0; i < WID3; i++) { - // cout << " " << data[i]; - // if (data[i] != 0) exitflag = true; - // } - // cout << endl; - // if(exitflag) { - // cout << blockGID << " " << blockLID << endl; - // throw; - // } for(uint celli = 0; celli < allCellsPointer.size(); celli++){ allCellsBlockLocalID[celli] = allCellsPointer[celli]->get_velocity_block_local_id(blockGID, popID); @@ -1070,7 +1056,6 @@ setOfPencils buildPencilsWithNeighbors( const dccrg::Dccrg& mpiGrid, @@ -1241,11 +1218,9 @@ void get_seed_ids(const dccrg::Dccrg& mpi // These are the seed ids for the pencils. vector negativeNeighbors; // Returns all neighbors as (id, direction-dimension) pairs. - //cout << "neighbors of cell " << localCelli << " are "; for ( const auto neighbor : mpiGrid.get_face_neighbors_of(celli ) ) { if ( mpiGrid.get_process(neighbor.first) == myProcess ) { - //cout << neighbor.first << "," << neighbor.second << " "; // select the neighbor in the negative dimension of the propagation if (neighbor.second == - (static_cast(dimension) + 1)) { @@ -1415,7 +1390,6 @@ bool trans_map_1d_amr(const dccrg::Dccrg& uint cell_indices_to_id[3]; /*< used when computing id of target cell in block*/ unsigned char cellid_transpose[WID3]; /*< defines the transpose for the solver internal (transposed) id: i + j*WID + k*WID2 to actual one*/ const uint blocks_per_dim = 1; - // return if there's no cells to propagate if(localPropagatedCells.size() == 0) { cout << "Returning because of no cells" << endl; @@ -1476,8 +1450,8 @@ bool trans_map_1d_amr(const dccrg::Dccrg& } } } - // **************************************************************************** - + // **************************************************************************** + // compute pencils => set of pencils (shared datastructure) vector seedIds; @@ -1555,7 +1529,7 @@ bool trans_map_1d_amr(const dccrg::Dccrg& int t1 = phiprof::initializeTimer("mappingAndStore"); //#pragma omp parallel - { + { //#pragma omp for schedule(guided) // Loop over velocity space blocks. Thread this loop (over vspace blocks) with OpenMP. for(uint blocki = 0; blocki < unionOfBlocks.size(); blocki++){ @@ -1572,7 +1546,6 @@ bool trans_map_1d_amr(const dccrg::Dccrg& const vmesh::LocalID debugLID = allCellsPointer[allCellsPointerIndex]->get_velocity_block_local_id(blockGID, popID); Realf* data = allCellsPointer[allCellsPointerIndex]->get_data(debugLID,popID); //for (uint i = 0; i < WID3; i++) if (data[i] != 0) debugflag = true; - velocity_block_indices_t block_indices; uint8_t vRefLevel; vmesh.getIndices(blockGID,vRefLevel, block_indices[0], @@ -1641,7 +1614,7 @@ bool trans_map_1d_amr(const dccrg::Dccrg& // load data(=> sourcedata) / (proper xy reconstruction in future) copy_trans_block_data_amr(sourceCells.data(), blockGID, L, sourceVecData, cellid_transpose, popID); - + // Dz and sourceVecData are both padded by VLASOV_STENCIL_WIDTH // Dz has 1 value/cell, sourceVecData has WID3 values/cell propagatePencil(dz, sourceVecData, dimension, blockGID, dt, vmesh, L, debugflag); @@ -1657,6 +1630,8 @@ bool trans_map_1d_amr(const dccrg::Dccrg& } } } + totalTargetLength += targetLength; + // dealloc source data -- Should be automatic since it's declared in this iteration? } // reset blocks in all non-sysboundary neighbor spatial cells for this block id @@ -1682,10 +1657,6 @@ bool trans_map_1d_amr(const dccrg::Dccrg& // Loop over pencils again totalTargetLength = 0; for(uint pencili = 0; pencili < pencils.N; pencili++){ - - if(debugflag) { - cout << "pencil.periodic: " << pencils.periodic[pencili] << endl; - } int L = pencils.lengthOfPencils[pencili]; int targetLength = L + 2; @@ -1723,7 +1694,6 @@ bool trans_map_1d_amr(const dccrg::Dccrg& } // store values from targetBlockData array to the actual blocks - // Loop over cells in the pencil, including the padded cells of the target array for ( uint celli = 0; celli < targetLength; celli++ ) { @@ -1757,5 +1727,8 @@ bool trans_map_1d_amr(const dccrg::Dccrg& data = allCellsPointer[allCellsPointerIndex]->get_data(debugLID,popID); } } + + //cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; + return true; } From 1eee75c14fc8741f5584d588541febd20aad9f08 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Fri, 14 Sep 2018 14:36:13 +0300 Subject: [PATCH 053/602] enabled openmp pragmas. --- vlasovsolver/cpu_trans_map.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/vlasovsolver/cpu_trans_map.cpp b/vlasovsolver/cpu_trans_map.cpp index c4b83e5c7..84c905102 100644 --- a/vlasovsolver/cpu_trans_map.cpp +++ b/vlasovsolver/cpu_trans_map.cpp @@ -1404,7 +1404,7 @@ bool trans_map_1d_amr(const dccrg::Dccrg& std::vector allCellsPointer(allCells.size()); // Initialize allCellsPointer - //#pragma omp parallel for + #pragma omp parallel for for(uint celli = 0; celli < allCells.size(); celli++){ allCellsPointer[celli] = mpiGrid[allCells[celli]]; } @@ -1528,10 +1528,10 @@ bool trans_map_1d_amr(const dccrg::Dccrg& int t1 = phiprof::initializeTimer("mappingAndStore"); - //#pragma omp parallel +#pragma omp parallel { - //#pragma omp for schedule(guided) // Loop over velocity space blocks. Thread this loop (over vspace blocks) with OpenMP. +#pragma omp for schedule(guided) for(uint blocki = 0; blocki < unionOfBlocks.size(); blocki++){ phiprof::start(t1); From 99999c8f495446065a0b94daf7b886692cd17298 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Fri, 14 Sep 2018 17:05:43 +0300 Subject: [PATCH 054/602] Shut up compiler warnings about uint - int comparisons. Added some const statements. --- vlasovsolver/cpu_trans_map.cpp | 14 +++++++------- vlasovsolver/cpu_trans_map.hpp | 2 +- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/vlasovsolver/cpu_trans_map.cpp b/vlasovsolver/cpu_trans_map.cpp index 84c905102..c0b849ea6 100644 --- a/vlasovsolver/cpu_trans_map.cpp +++ b/vlasovsolver/cpu_trans_map.cpp @@ -1190,14 +1190,14 @@ void propagatePencil(Vec* dz, Vec* values, const uint dimension, const uint bloc // Write target data into source data // VLASOV_STENCIL_WIDTH >= nTargetNeighborsPerPencil is required (default 2 >= 1) - for (int i = 0; i < lengthOfPencil + 2 * nTargetNeighborsPerPencil; i++) { + for (uint i = 0; i < lengthOfPencil + 2 * nTargetNeighborsPerPencil; i++) { for (uint k = 0; k < WID; ++k) { - for (uint planeVector = 0; planeVector < VEC_PER_PLANE; planeVector++) { - - values[i_trans_ps_blockv_pencil(planeVector, k, i - 1, lengthOfPencil)] = - targetValues[i_trans_pt_blockv(planeVector, k, i - 1)]; + for (uint planeVector = 0; planeVector < VEC_PER_PLANE; planeVector++) { + int im1 = i - 1; // doing this to shut up compiler warnings + values[i_trans_ps_blockv_pencil(planeVector, k, im1, lengthOfPencil)] = + targetValues[i_trans_pt_blockv(planeVector, k, im1)]; } } @@ -1620,7 +1620,7 @@ bool trans_map_1d_amr(const dccrg::Dccrg& propagatePencil(dz, sourceVecData, dimension, blockGID, dt, vmesh, L, debugflag); // sourcedata => targetdata[this pencil]) - for (int i = 0; i < targetLength; i++) { + for (uint i = 0; i < targetLength; i++) { for (uint k=0; k& for(uint pencili = 0; pencili < pencils.N; pencili++){ int L = pencils.lengthOfPencils[pencili]; - int targetLength = L + 2; + uint targetLength = L + 2; vector pencilIds = pencils.getIds(pencili); bool debugPencilFlag = false; diff --git a/vlasovsolver/cpu_trans_map.hpp b/vlasovsolver/cpu_trans_map.hpp index 287ba48ec..0a97c8fbd 100644 --- a/vlasovsolver/cpu_trans_map.hpp +++ b/vlasovsolver/cpu_trans_map.hpp @@ -53,7 +53,7 @@ struct setOfPencils { periodic.push_back(periodicIn); } - std::vector getIds(const uint pencilId) { + std::vector getIds(const uint pencilId) const { std::vector idsOut; From 670d1c39024d3f3c89b4e524abec81ddcd537023 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Mon, 17 Sep 2018 12:37:27 +0300 Subject: [PATCH 055/602] Reverted config file back to its original state --- .../transtest_2_maxw_500k_100k_20kms_20x20.cfg | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/testpackage/tests/transtest_2_maxw_500k_100k_20kms_20x20/transtest_2_maxw_500k_100k_20kms_20x20.cfg b/testpackage/tests/transtest_2_maxw_500k_100k_20kms_20x20/transtest_2_maxw_500k_100k_20kms_20x20.cfg index 8bd8cb398..914069d41 100644 --- a/testpackage/tests/transtest_2_maxw_500k_100k_20kms_20x20/transtest_2_maxw_500k_100k_20kms_20x20.cfg +++ b/testpackage/tests/transtest_2_maxw_500k_100k_20kms_20x20/transtest_2_maxw_500k_100k_20kms_20x20.cfg @@ -12,9 +12,9 @@ charge = 1 [io] diagnostic_write_interval = 1 -write_initial_state = 1 +write_initial_state = 0 -system_write_t_interval = 0.25 +system_write_t_interval = 9.4 system_write_file_name = fullf system_write_distribution_stride = 1 system_write_distribution_xline_stride = 0 @@ -32,7 +32,7 @@ y_min = 0.0 y_max = 1.0e6 z_min = 0 z_max = 50000.0 -timestep_max = 100 +timestep_max = 200 [proton_vspace] vx_min = -2.0e6 @@ -88,6 +88,6 @@ Vz = 0.0 Tx = 500000.0 Ty = 500000.0 Tz = 500000.0 -rho = 0.0 -rhoPertAbsAmp = 1.0e6 +rho = 1000000.0 +rhoPertAbsAmp = 10000 From 279320e2c6963f1478f5037ce393c59b2cbe90e2 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Mon, 17 Sep 2018 14:04:19 +0300 Subject: [PATCH 056/602] Split amr - code into a separate file in vlasovsolver/cpu_trans_map_amr.[c,h]pp. --- Makefile | 2 +- vlasiator.cpp | 10 +- vlasovsolver/cpu_trans_map.cpp | 963 --------------------- vlasovsolver/cpu_trans_map.hpp | 79 +- vlasovsolver/cpu_trans_map_amr.cpp | 1267 ++++++++++++++-------------- vlasovsolver/cpu_trans_map_amr.hpp | 92 +- 6 files changed, 767 insertions(+), 1646 deletions(-) diff --git a/Makefile b/Makefile index 101010f98..4ddeb32b5 100644 --- a/Makefile +++ b/Makefile @@ -201,7 +201,7 @@ ifeq ($(MESH),AMR) OBJS += cpu_moments.o else OBJS += cpu_acc_intersections.o cpu_acc_map.o cpu_acc_sort_blocks.o cpu_acc_load_blocks.o cpu_acc_semilag.o cpu_acc_transform.o \ - cpu_moments.o cpu_trans_map.o #cpu_trans_map_amr.o + cpu_moments.o cpu_trans_map.o cpu_trans_map_amr.o endif # Add field solver objects diff --git a/vlasiator.cpp b/vlasiator.cpp index 4c5113e4d..1235af47f 100644 --- a/vlasiator.cpp +++ b/vlasiator.cpp @@ -361,7 +361,7 @@ int main(int argn,char* args[]) { phiprof::start("Init grid"); //dccrg::Dccrg mpiGrid; initializeGrid(argn,args,mpiGrid,sysBoundaries,*project); - isSysBoundaryCondDynamic = sysBoundaries.isDynamic(); + isSysBoundaryCondDynamic = sysBoundaries.isDynamic(); phiprof::stop("Init grid"); // Initialize data reduction operators. This should be done elsewhere in order to initialize @@ -601,6 +601,14 @@ int main(int argn,char* args[]) { phiprof::stop("propagate-velocity-space-dt/2"); } + + // std::array coords; + // coords[1] = (P::xmax - P::xmin) / 2.0; + // coords[2] = (P::ymax - P::ymin) / 2.0; + // coords[3] = (P::zmax - P::zmin) / 2.0; + // mpiGrid.refine_completely_at(coords); + // mpiGrid.stop_refining(); + phiprof::stop("Initialization"); // *********************************** diff --git a/vlasovsolver/cpu_trans_map.cpp b/vlasovsolver/cpu_trans_map.cpp index c0b849ea6..22134d78a 100644 --- a/vlasovsolver/cpu_trans_map.cpp +++ b/vlasovsolver/cpu_trans_map.cpp @@ -40,23 +40,6 @@ using namespace std; using namespace spatial_cell; -void compute_spatial_source_neighbors(const dccrg::Dccrg& mpiGrid, - const CellID& cellID,const uint dimension,SpatialCell **neighbors); -void compute_spatial_target_neighbors(const dccrg::Dccrg& mpiGrid, - const CellID& cellID,const uint dimension,SpatialCell **neighbors); -void copy_trans_block_data(SpatialCell** source_neighbors,const vmesh::GlobalID blockGID, - Vec* values,const unsigned char* const cellid_transpose,const uint popID); -CellID get_spatial_neighbor(const dccrg::Dccrg& mpiGrid, - const CellID& cellID,const bool include_first_boundary_layer, - const int spatial_di,const int spatial_dj,const int spatial_dk); -SpatialCell* get_spatial_neighbor_pointer(const dccrg::Dccrg& mpiGrid, - const CellID& cellID,const bool include_first_boundary_layer, - const int spatial_di,const int spatial_dj,const int spatial_dk); -void store_trans_block_data(SpatialCell** target_neighbors,const vmesh::GlobalID blockGID, - Vec* __restrict__ target_values, - const unsigned char* const cellid_transpose,const uint popID); - - // indices in padded source block, which is of type Vec with VECL // element sin each vector. b_k is the block index in z direction in // ordinary space [- VLASOV_STENCIL_WIDTH to VLASOV_STENCIL_WIDTH], @@ -72,8 +55,6 @@ void store_trans_block_data(SpatialCell** target_neighbors,const vmesh::GlobalID //#define i_trans_pt_blockv(j, k, b_k) ( ( (j) * WID + (k) * WID2 + ((b_k) + 1 ) * WID3) / VECL ) #define i_trans_pt_blockv(planeVectorIndex, planeIndex, blockIndex) ( planeVectorIndex + planeIndex * VEC_PER_PLANE + (blockIndex + 1) * VEC_PER_BLOCK) -#define i_trans_ps_blockv_pencil(planeVectorIndex, planeIndex, blockIndex, lengthOfPencil) ( (blockIndex) + VLASOV_STENCIL_WIDTH + ( (planeVectorIndex) + (planeIndex) * VEC_PER_PLANE ) * ( lengthOfPencil + 2 * VLASOV_STENCIL_WIDTH) ) - //Is cell translated? It is not translated if DO_NO_COMPUTE or if it is sysboundary cell and not in first sysboundarylayer bool do_translate_cell(SpatialCell* SC){ if(SC->sysBoundaryFlag == sysboundarytype::DO_NOT_COMPUTE || @@ -225,56 +206,6 @@ void compute_spatial_source_neighbors(const dccrg::Dccrg& mpiGrid, - setOfPencils pencils, - const uint iPencil, - const uint dimension, - SpatialCell **sourceCells){ - - // L = length of the pencil iPencil - int L = pencils.lengthOfPencils[iPencil]; - vector ids = pencils.getIds(iPencil); - - for (int iCell = -VLASOV_STENCIL_WIDTH; iCell < L + VLASOV_STENCIL_WIDTH; iCell++) { - CellID cellID = ids[min(max(iCell, 0), L - 1)]; - - int i = 0; - if(iCell <= 0) i = iCell; - if(iCell >= L) i = iCell - (L - 1); - - switch (dimension) { - case 0: - sourceCells[iCell + VLASOV_STENCIL_WIDTH] = get_spatial_neighbor_pointer(mpiGrid, cellID, false, i, 0, 0); - break; - case 1: - sourceCells[iCell + VLASOV_STENCIL_WIDTH] = get_spatial_neighbor_pointer(mpiGrid, cellID, false, 0, i, 0); - break; - case 2: - sourceCells[iCell + VLASOV_STENCIL_WIDTH] = get_spatial_neighbor_pointer(mpiGrid, cellID, false, 0, 0, i); - break; - } - } - - SpatialCell* last_good_cell = mpiGrid[ids.front()]; - /*loop to neative side and replace all invalid cells with the closest good cell*/ - for(int i = -1;i>=-VLASOV_STENCIL_WIDTH;i--){ - if(sourceCells[i + VLASOV_STENCIL_WIDTH] == NULL) - sourceCells[i + VLASOV_STENCIL_WIDTH] = last_good_cell; - else - last_good_cell = sourceCells[i + VLASOV_STENCIL_WIDTH]; - } - - last_good_cell = mpiGrid[ids.back()]; - /*loop to positive side and replace all invalid cells with the closest good cell*/ - for(int i = L + 1; i <= L + VLASOV_STENCIL_WIDTH; i++){ - if(sourceCells[i + VLASOV_STENCIL_WIDTH] == NULL) - sourceCells[i + VLASOV_STENCIL_WIDTH] = last_good_cell; - else - last_good_cell = sourceCells[i + VLASOV_STENCIL_WIDTH]; - } -} - - /*compute spatial target neighbors, stencil has a size of 3. No boundary cells are included*/ void compute_spatial_target_neighbors(const dccrg::Dccrg& mpiGrid, const CellID& cellID, @@ -297,42 +228,6 @@ void compute_spatial_target_neighbors(const dccrg::Dccrg& mpiGrid, - setOfPencils& pencils, - const uint dimension, - SpatialCell **targetCells){ - - uint GID = 0; - for(uint iPencil = 0; iPencil < pencils.N; iPencil++){ - // L = length of the pencil iPencil - int L = pencils.lengthOfPencils[iPencil]; - - vector ids = pencils.getIds(iPencil); - for (int iCell = -1; iCell <= L; iCell++) { - CellID cellID = ids[min(max(iCell,0),L - 1)]; - - int i = 0; - if(iCell == -1) i = -1; - if(iCell == L) i = 1; - - switch (dimension) { - case 0: - targetCells[GID + iCell + 1] = get_spatial_neighbor_pointer(mpiGrid, cellID, false, i, 0, 0); - break; - case 1: - targetCells[GID + iCell + 1] = get_spatial_neighbor_pointer(mpiGrid, cellID, false, 0, i, 0); - break; - case 2: - targetCells[GID + iCell + 1] = get_spatial_neighbor_pointer(mpiGrid, cellID, false, 0, 0, i); - break; - } - } - GID += (L + 2); - } -} - - /* Copy the data to the temporary values array, so that the * dimensions are correctly swapped. Also, copy the same block for * then neighboring spatial cells (in the dimension). neighbors @@ -874,861 +769,3 @@ void update_remote_mapping_contribution( } } -CellID selectNeighbor(const dccrg::Dccrg &grid, - CellID id, int dimension = 0, uint path = 0) { - - const auto neighbors = grid.get_face_neighbors_of(id); - const int myProcess = grid.get_process(id); - - vector < CellID > myNeighbors; - // Collect neighbor ids in the positive direction of the chosen dimension, - // that are on the same process as the origin. - // Note that dimension indexing starts from 1 (of course it does) - for (const auto cell : neighbors) { - if (cell.second == dimension + 1 && grid.get_process(cell.first) == myProcess) - myNeighbors.push_back(cell.first); - } - - CellID neighbor; - - switch( myNeighbors.size() ) { - // Since refinement can only increase by 1 level the only possibilities - // Should be 0 neighbors, 1 neighbor or 4 neighbors. - case 0 : { - // did not find neighbors - neighbor = INVALID_CELLID; - break; - } - case 1 : { - neighbor = myNeighbors[0]; - break; - } - case 4 : { - neighbor = myNeighbors[path]; - break; - } - default: { - // something is wrong - neighbor = INVALID_CELLID; - throw "Invalid neighbor count!"; - break; - } - } - - return neighbor; - -} - -setOfPencils buildPencilsWithNeighbors( const dccrg::Dccrg &grid, - setOfPencils &pencils, CellID startingId, - vector ids, uint dimension, - vector path) { - - const bool debug = false; - CellID nextNeighbor; - uint id = startingId; - uint startingRefLvl = grid.get_refinement_level(id); - - if( ids.size() == 0 ) - ids.push_back(startingId); - - // If the cell where we start is refined, we need to figure out which path - // to follow in future refined cells. This is a bit hacky but we have to - // use the order or the children of the parent cell to figure out which - // corner we are in. - // Maybe you could use physical coordinates here? - if( startingRefLvl > path.size() ) { - for ( uint i = path.size(); i < startingRefLvl; i++) { - auto parent = grid.get_parent(id); - auto children = grid.get_all_children(parent); - auto it = std::find(children.begin(),children.end(),id); - auto index = std::distance(children.begin(),it); - auto index2 = index; - - switch( dimension ) { - case 0: { - index2 = index / 2; - break; - } - case 1: { - index2 = index - index / 2; - break; - } - case 2: { - index2 = index % 4; - break; - } - } - path.insert(path.begin(),index2); - id = parent; - } - } - - id = startingId; - - bool periodic; - - while (id > 0) { - - periodic = false; - - // Find the refinement level in the neighboring cell. Any neighbor will do - // since refinement level can only increase by 1 between neighbors. - nextNeighbor = selectNeighbor(grid,id,dimension); - - // If there are no neighbors, we can stop. - if (nextNeighbor == 0) - break; - - uint refLvl = grid.get_refinement_level(nextNeighbor); - - if (refLvl > 0) { - - // If we have encountered this refinement level before and stored - // the path this builder follows, we will just take the same path - // again. - if ( path.size() >= refLvl ) { - - if(debug) { - std::cout << "I am cell " << id << ". "; - std::cout << "I have seen refinement level " << refLvl << " before. Path is "; - for (auto k = path.begin(); k != path.end(); ++k) - std::cout << *k << " "; - std::cout << std::endl; - } - - nextNeighbor = selectNeighbor(grid,id,dimension,path[refLvl-1]); - - } else { - - if(debug) { - std::cout << "I am cell " << id << ". "; - std::cout << "I have NOT seen refinement level " << refLvl << " before. Path is "; - for (auto k = path.begin(); k != path.end(); ++k) - std::cout << *k << ' '; - std::cout << std::endl; - } - - // New refinement level, create a path through each neighbor cell - for ( uint i : {0,1,2,3} ) { - - vector < uint > myPath = path; - myPath.push_back(i); - - nextNeighbor = selectNeighbor(grid,id,dimension,myPath.back()); - - if ( i == 3 ) { - - // This builder continues with neighbor 3 - ids.push_back(nextNeighbor); - path = myPath; - - } else { - - // Spawn new builders for neighbors 0,1,2 - buildPencilsWithNeighbors(grid,pencils,id,ids,dimension,myPath); - - } - - } - - } - - } else { - if(debug) { - std::cout << "I am cell " << id << ". "; - std::cout << " I am on refinement level 0." << std::endl; - } - }// Closes if (refLvl == 0) - - // If we found a neighbor, add it to the list of ids for this pencil. - if(nextNeighbor != INVALID_CELLID) { - if (debug) { - std::cout << " Next neighbor is " << nextNeighbor << "." << std::endl; - } - - for (auto id : ids) { - if (nextNeighbor == id) { - periodic = true; - } - } - if (periodic) { - // Exit the while loop - id = -1; - } else { - ids.push_back(nextNeighbor); - // Move to the next cell. - id = nextNeighbor; - } - } - - } // Closes while loop - - // Get the x,y - coordinates of the pencil (in the direction perpendicular to the pencil) - const auto coordinates = grid.get_center(ids[0]); - double x,y; - uint ix,iy,iz; - - switch(dimension) { - case 0: - ix = 1; - iy = 2; - iz = 0; - break; - - case 1: - ix = 2; - iy = 0; - iz = 1; - break; - - case 2: - ix = 0; - iy = 1; - iz = 2; - break; - - default: - ix = 0; - iy = 1; - iz = 2; - break; - } - - x = coordinates[ix]; - y = coordinates[iy]; - - pencils.addPencil(ids,x,y,periodic); - - return pencils; - -} - -//void propagatePencil(Vec dr[], Vec values, Vec z_translation, uint blocks_per_dim ) { -void propagatePencil(Vec* dz, Vec* values, const uint dimension, const uint blockGID, const Realv dt, - const vmesh::VelocityMesh &vmesh, const uint lengthOfPencil, bool debugflag) { - - // Get velocity data from vmesh that we need later to calculate the translation - velocity_block_indices_t block_indices; - uint8_t refLevel; - vmesh.getIndices(blockGID,refLevel, block_indices[0], block_indices[1], block_indices[2]); - Realv dvz = vmesh.getCellSize(refLevel)[dimension]; - Realv vz_min = vmesh.getMeshMinLimits()[dimension]; - - // Assuming 1 neighbor in the target array because of the CFL condition - // In fact propagating to > 1 neighbor will give an error - const uint nTargetNeighborsPerPencil = 1; - - // Vector buffer where we write data, initialized to 0*/ - Vec targetValues[(lengthOfPencil + 2 * nTargetNeighborsPerPencil) * WID3 / VECL]; - - for (uint i = 0; i < (lengthOfPencil + 2 * nTargetNeighborsPerPencil) * WID3 / VECL; i++) { - - // init target_values - targetValues[i] = Vec(0.0); - - } - // Go from 0 to length here to propagate all the cells in the pencil - for (uint i = 0; i < lengthOfPencil; i++){ - - // The source array is padded by VLASOV_STENCIL_WIDTH on both sides. - uint i_source = i + VLASOV_STENCIL_WIDTH; - - for (uint k = 0; k < WID; ++k) { - - const Realv cell_vz = (block_indices[dimension] * WID + k + 0.5) * dvz + vz_min; //cell centered velocity - const Vec z_translation = cell_vz * dt / dz[i_source]; // how much it moved in time dt (reduced units) - - // Determine direction of translation - // part of density goes here (cell index change along spatial direcion) - Vecb positiveTranslationDirection = (z_translation > Vec(0.0)); - - // Calculate normalized coordinates in current cell. - // The coordinates (scaled units from 0 to 1) between which we will - // integrate to put mass in the target neighboring cell. - // Normalize the coordinates to the origin cell. Then we scale with the difference - // in volume between target and origin later when adding the integrated value. - Vec z_1,z_2; - z_1 = select(positiveTranslationDirection, 1.0 - z_translation, 0.0); - z_2 = select(positiveTranslationDirection, 1.0, - z_translation); - - if( horizontal_or(abs(z_1) > Vec(1.0)) || horizontal_or(abs(z_2) > Vec(1.0)) ) { - std::cout << "Error, CFL condition violated\n"; - std::cout << "Exiting\n"; - std::exit(1); - } - - for (uint planeVector = 0; planeVector < VEC_PER_PLANE; planeVector++) { - - // Compute polynomial coefficients - Vec a[3]; - // Dz: is a padded array, pointer can point to the beginning, i + VLASOV_STENCIL_WIDTH will get the right cell. - // values: transpose function adds VLASOV_STENCIL_WIDTH to the block index, therefore we substract it here, then - // i + VLASOV_STENCIL_WIDTH will point to the right cell. Complicated! Why! Sad! MVGA! - compute_ppm_coeff_nonuniform(dz, - values + i_trans_ps_blockv_pencil(planeVector, k, i-VLASOV_STENCIL_WIDTH, lengthOfPencil), - h4, VLASOV_STENCIL_WIDTH, a); - - // Compute integral - const Vec ngbr_target_density = - z_2 * ( a[0] + z_2 * ( a[1] + z_2 * a[2] ) ) - - z_1 * ( a[0] + z_1 * ( a[1] + z_1 * a[2] ) ); - // Store mapped density in two target cells - // in the neighbor cell we will put this density - targetValues[i_trans_pt_blockv(planeVector, k, i + 1)] += select( positiveTranslationDirection, - ngbr_target_density * dz[i_source] / dz[i_source + 1],Vec(0.0)); - targetValues[i_trans_pt_blockv(planeVector, k, i - 1 )] += select(!positiveTranslationDirection, - ngbr_target_density * dz[i_source] / dz[i_source - 1],Vec(0.0)); - - // in the current original cells we will put the rest of the original density - targetValues[i_trans_pt_blockv(planeVector, k, i)] += - values[i_trans_ps_blockv_pencil(planeVector, k, i, lengthOfPencil)] - ngbr_target_density; - } - } - } - - // Write target data into source data - // VLASOV_STENCIL_WIDTH >= nTargetNeighborsPerPencil is required (default 2 >= 1) - - for (uint i = 0; i < lengthOfPencil + 2 * nTargetNeighborsPerPencil; i++) { - - for (uint k = 0; k < WID; ++k) { - - for (uint planeVector = 0; planeVector < VEC_PER_PLANE; planeVector++) { - int im1 = i - 1; // doing this to shut up compiler warnings - values[i_trans_ps_blockv_pencil(planeVector, k, im1, lengthOfPencil)] = - targetValues[i_trans_pt_blockv(planeVector, k, im1)]; - - } - } - } -} - -void get_seed_ids(const dccrg::Dccrg& mpiGrid, - const vector &localPropagatedCells, - const uint dimension, - vector &seedIds) { - - const bool debug = false; - - //#pragma omp parallel for - for(auto celli: localPropagatedCells) { - int myProcess = mpiGrid.get_process(celli); - // Collect a list of cell ids that do not have a neighbor in the negative direction - // These are the seed ids for the pencils. - vector negativeNeighbors; - // Returns all neighbors as (id, direction-dimension) pairs. - for ( const auto neighbor : mpiGrid.get_face_neighbors_of(celli ) ) { - - if ( mpiGrid.get_process(neighbor.first) == myProcess ) { - // select the neighbor in the negative dimension of the propagation - if (neighbor.second == - (static_cast(dimension) + 1)) { - - // add the id of the neighbor to a list if it's on the same process - negativeNeighbors.push_back(neighbor.first); - - } - - } - } - //cout << endl; - // if no neighbors were found in the negative direction, add this cell id to the seed cells - if (negativeNeighbors.size() == 0) - seedIds.push_back(celli); - } - - // If no seed ids were found, let's assume we have a periodic boundary and - // a single process in the dimension of propagation. In this case we start from - // the first cells of the plane perpendicular to the propagation dimension - if (seedIds.size() == 0) { - for (uint ix = 0; ix < P::xcells_ini; ix++) { - for (uint iy = 0; iy < P::ycells_ini; iy++) { - for (uint iz = 0; iz < P::zcells_ini; iz++) { - CellID seedId; - switch (dimension) { - case 0: - // yz - plane - if(ix == 0) { - seedId = P::xcells_ini * P::ycells_ini * iz + P::xcells_ini * iy + 1; - if(mpiGrid.get_process(seedId) == mpiGrid.get_process(localPropagatedCells[0])) - seedIds.push_back(seedId); - - } - break; - case 1: - // xz - plane - if(iy == 0) { - seedId = P::xcells_ini * P::ycells_ini * iz + ix + 1; - if(mpiGrid.get_process(seedId) == mpiGrid.get_process(localPropagatedCells[0])) - seedIds.push_back(seedId); - - } - break; - case 2: - // xy - plane - if(iz == 0) { - seedId = P::xcells_ini * iy + ix + 1; - if(mpiGrid.get_process(seedId) == mpiGrid.get_process(localPropagatedCells[0])) - seedIds.push_back(seedId); - } - break; - } - } - } - } - } - - if(debug) { - cout << "Number of seed ids is " << seedIds.size() << endl; - cout << "Seed ids are: "; - for (const auto seedId : seedIds) { - cout << seedId << " "; - } - cout << endl; - } -} - - - - -/* Copy the data to the temporary values array, so that the - * dimensions are correctly swapped. Also, copy the same block for - * then neighboring spatial cells (in the dimension). neighbors - * generated with compute_spatial_neighbors_wboundcond). - * - * This function must be thread-safe. - * - * @param source_neighbors Array containing the VLASOV_STENCIL_WIDTH closest - * spatial neighbors of this cell in the propagated dimension. - * @param blockGID Global ID of the velocity block. - * @param int lengthOfPencil Number of spatial cells in pencil - * @param values Vector where loaded data is stored. - * @param cellid_transpose - * @param popID ID of the particle species. - */ -void copy_trans_block_data_amr( - SpatialCell** source_neighbors, - const vmesh::GlobalID blockGID, - int lengthOfPencil, - Vec* values, - const unsigned char* const cellid_transpose, - const uint popID) { - - // Allocate data pointer for all blocks in pencil. Pad on both ends by VLASOV_STENCIL_WIDTH - Realf* blockDataPointer[lengthOfPencil + 2 * VLASOV_STENCIL_WIDTH]; - - for (int b = -VLASOV_STENCIL_WIDTH; b < lengthOfPencil + VLASOV_STENCIL_WIDTH; b++) { - // Get cell pointer and local block id - SpatialCell* srcCell = source_neighbors[b + VLASOV_STENCIL_WIDTH]; - - const vmesh::LocalID blockLID = srcCell->get_velocity_block_local_id(blockGID,popID); - if (blockLID != srcCell->invalid_local_id()) { - // Get data pointer - blockDataPointer[b + VLASOV_STENCIL_WIDTH] = srcCell->get_data(blockLID,popID); - // //prefetch storage pointers to L1 - // _mm_prefetch((char *)(blockDataPointer[b + VLASOV_STENCIL_WIDTH]), _MM_HINT_T0); - // _mm_prefetch((char *)(blockDataPointer[b + VLASOV_STENCIL_WIDTH]) + 64, _MM_HINT_T0); - // _mm_prefetch((char *)(blockDataPointer[b + VLASOV_STENCIL_WIDTH]) + 128, _MM_HINT_T0); - // _mm_prefetch((char *)(blockDataPointer[b + VLASOV_STENCIL_WIDTH]) + 192, _MM_HINT_T0); - // if(VPREC == 8) { - // //prefetch storage pointers to L1 - // _mm_prefetch((char *)(blockDataPointer[b + VLASOV_STENCIL_WIDTH]) + 256, _MM_HINT_T0); - // _mm_prefetch((char *)(blockDataPointer[b + VLASOV_STENCIL_WIDTH]) + 320, _MM_HINT_T0); - // _mm_prefetch((char *)(blockDataPointer[b + VLASOV_STENCIL_WIDTH]) + 384, _MM_HINT_T0); - // _mm_prefetch((char *)(blockDataPointer[b + VLASOV_STENCIL_WIDTH]) + 448, _MM_HINT_T0); - // } - - } else { - blockDataPointer[b + VLASOV_STENCIL_WIDTH] = NULL; - } - } - - // Copy volume averages of this block from all spatial cells: - for (int b = -VLASOV_STENCIL_WIDTH; b < lengthOfPencil + VLASOV_STENCIL_WIDTH; b++) { - if(blockDataPointer[b + VLASOV_STENCIL_WIDTH] != NULL) { - Realv blockValues[WID3]; - const Realf* block_data = blockDataPointer[b + VLASOV_STENCIL_WIDTH]; - // Copy data to a temporary array and transpose values so that mapping is along k direction. - // spatial source_neighbors already taken care of when - // creating source_neighbors table. If a normal spatial cell does not - // simply have the block, its value will be its null_block which - // is fine. This null_block has a value of zero in data, and that - // is thus the velocity space boundary - for (uint i=0; i& mpiGrid, - const vector& localPropagatedCells, - const vector& remoteTargetCells, - const uint dimension, - const Realv dt, - const uint popID) { - - Realv dvz,vz_min; - uint cell_indices_to_id[3]; /*< used when computing id of target cell in block*/ - unsigned char cellid_transpose[WID3]; /*< defines the transpose for the solver internal (transposed) id: i + j*WID + k*WID2 to actual one*/ - const uint blocks_per_dim = 1; - // return if there's no cells to propagate - if(localPropagatedCells.size() == 0) { - cout << "Returning because of no cells" << endl; - return false; - } - - // Vector with all cell ids - vector allCells(localPropagatedCells); - allCells.insert(allCells.end(), remoteTargetCells.begin(), remoteTargetCells.end()); - - // Vectors of pointers to the cell structs - std::vector allCellsPointer(allCells.size()); - - // Initialize allCellsPointer - #pragma omp parallel for - for(uint celli = 0; celli < allCells.size(); celli++){ - allCellsPointer[celli] = mpiGrid[allCells[celli]]; - } - - // Fiddle indices x,y,z in VELOCITY SPACE - switch (dimension) { - case 0: - // set values in array that is used to convert block indices - // to global ID using a dot product. - cell_indices_to_id[0]=WID2; - cell_indices_to_id[1]=WID; - cell_indices_to_id[2]=1; - break; - case 1: - // set values in array that is used to convert block indices - // to global ID using a dot product - cell_indices_to_id[0]=1; - cell_indices_to_id[1]=WID2; - cell_indices_to_id[2]=WID; - break; - case 2: - // set values in array that is used to convert block indices - // to global id using a dot product. - cell_indices_to_id[0]=1; - cell_indices_to_id[1]=WID; - cell_indices_to_id[2]=WID2; - break; - default: - cerr << __FILE__ << ":"<< __LINE__ << " Wrong dimension, abort"< set of pencils (shared datastructure) - - vector seedIds; - get_seed_ids(mpiGrid, localPropagatedCells, dimension, seedIds); - - // Empty vectors for internal use of buildPencilsWithNeighbors. Could be default values but - // default vectors are complicated. Should overload buildPencilsWithNeighbors like suggested here - // https://stackoverflow.com/questions/3147274/c-default-argument-for-vectorint - vector ids; - vector path; - - // Output vectors for ready pencils - setOfPencils pencils; - vector pencilSets; - - for (const auto seedId : seedIds) { - // Construct pencils from the seedIds into a set of pencils. - pencils = buildPencilsWithNeighbors(mpiGrid, pencils, seedId, ids, dimension, path); - } - - // Print out ids of pencils (if needed for debugging) - if (false) { - uint ibeg = 0; - uint iend = 0; - std::cout << "I have created " << pencils.N << " pencils along dimension " << dimension << ":\n"; - std::cout << "(x, y): indices " << std::endl; - std::cout << "-----------------------------------------------------------------" << std::endl; - for (uint i = 0; i < pencils.N; i++) { - iend += pencils.lengthOfPencils[i]; - std::cout << "(" << pencils.x[i] << ", " << pencils.y[i] << "): "; - for (auto j = pencils.ids.begin() + ibeg; j != pencils.ids.begin() + iend; ++j) { - std::cout << *j << " "; - } - ibeg = iend; - std::cout << std::endl; - } - } - - - // Add the final set of pencils to the pencilSets - vector. - // Only one set is created for now but we retain support for multiple sets - pencilSets.push_back(pencils); - // **************************************************************************** - - const uint8_t VMESH_REFLEVEL = 0; - - // Get a pointer to the velocity mesh of the first spatial cell - const vmesh::VelocityMesh& vmesh = allCellsPointer[0]->get_velocity_mesh(popID); - - // set cell size in dimension direction - dvz = vmesh.getCellSize(VMESH_REFLEVEL)[dimension]; - vz_min = vmesh.getMeshMinLimits()[dimension]; - - // Get a unique sorted list of blockids that are in any of the - // propagated cells. First use set for this, then add to vector (may not - // be the most nice way to do this and in any case we could do it along - // dimension for data locality reasons => copy acc map column code, TODO: FIXME - // TODO: Do this separately for each pencil? - std::unordered_set unionOfBlocksSet; - - for(auto cell : allCellsPointer) { - vmesh::VelocityMesh& vmesh = cell->get_velocity_mesh(popID); - for (vmesh::LocalID block_i=0; block_i< vmesh.size(); ++block_i) { - unionOfBlocksSet.insert(vmesh.getGlobalID(block_i)); - } - } - - std::vector unionOfBlocks; - unionOfBlocks.reserve(unionOfBlocksSet.size()); - for(const auto blockGID: unionOfBlocksSet) { - unionOfBlocks.push_back(blockGID); - } - // **************************************************************************** - - int t1 = phiprof::initializeTimer("mappingAndStore"); - -#pragma omp parallel - { - // Loop over velocity space blocks. Thread this loop (over vspace blocks) with OpenMP. -#pragma omp for schedule(guided) - for(uint blocki = 0; blocki < unionOfBlocks.size(); blocki++){ - - phiprof::start(t1); - - // Get global id of the velocity block - vmesh::GlobalID blockGID = unionOfBlocks[blocki]; - - bool debugflag = false; - CellID debugcell; - uint allCellsPointerIndex = 16; - - const vmesh::LocalID debugLID = allCellsPointer[allCellsPointerIndex]->get_velocity_block_local_id(blockGID, popID); - Realf* data = allCellsPointer[allCellsPointerIndex]->get_data(debugLID,popID); - //for (uint i = 0; i < WID3; i++) if (data[i] != 0) debugflag = true; - velocity_block_indices_t block_indices; - uint8_t vRefLevel; - vmesh.getIndices(blockGID,vRefLevel, block_indices[0], - block_indices[1], block_indices[2]); - - // Loop over sets of pencils - // This loop only has one iteration for now - for ( auto pencils: pencilSets ) { - - std::vector targetBlockData((pencils.sumOfLengths + 2 * pencils.N) * WID3); - // Allocate vectorized targetvecdata sum(lengths of pencils)*WID3 / VECL) - // Add padding by 2 for each pencil - Vec targetVecData[(pencils.sumOfLengths + 2 * pencils.N) * WID3 / VECL]; - - // Initialize targetvecdata to 0 - for( uint i = 0; i < (pencils.sumOfLengths + 2 * pencils.N) * WID3 / VECL; i++ ) { - targetVecData[i] = Vec(0.0); - } - - // TODO: There's probably a smarter way to keep track of where we are writing - // in the target data array. - uint targetDataIndex = 0; - - // Compute spatial neighbors for target cells. - // For targets we need the local cells, plus a padding of 1 cell at both ends - std::vector targetCells(pencils.sumOfLengths + pencils.N * 2 ); - - compute_spatial_target_cells_for_pencils(mpiGrid, pencils, dimension, targetCells.data()); - - // Loop over pencils - uint totalTargetLength = 0; - for(uint pencili = 0; pencili < pencils.N; pencili++){ - - vector pencilIds = pencils.getIds(pencili); - int L = pencils.lengthOfPencils[pencili]; - uint targetLength = L + 2; - uint sourceLength = L + 2 * VLASOV_STENCIL_WIDTH; - - // Compute spatial neighbors for the source cells of the pencil. In - // source cells we have a wider stencil and take into account boundaries. - std::vector sourceCells(sourceLength); - compute_spatial_source_cells_for_pencil(mpiGrid, pencils, pencili, dimension, sourceCells.data()); - - - Vec dz[sourceCells.size()]; - uint i = 0; - for(auto neighbor: sourceCells) { - switch (dimension) { - case(0): - dz[i] = neighbor->SpatialCell::parameters[CellParams::DX]; - break; - case(1): - dz[i] = neighbor->SpatialCell::parameters[CellParams::DY]; - break; - case(2): - dz[i] = neighbor->SpatialCell::parameters[CellParams::DZ]; - break; - } - i++; - } - - // Allocate source data: sourcedata sourcedata) / (proper xy reconstruction in future) - copy_trans_block_data_amr(sourceCells.data(), blockGID, L, sourceVecData, - cellid_transpose, popID); - - // Dz and sourceVecData are both padded by VLASOV_STENCIL_WIDTH - // Dz has 1 value/cell, sourceVecData has WID3 values/cell - propagatePencil(dz, sourceVecData, dimension, blockGID, dt, vmesh, L, debugflag); - - // sourcedata => targetdata[this pencil]) - for (uint i = 0; i < targetLength; i++) { - for (uint k=0; ksysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY) { - // Get local velocity block id - const vmesh::LocalID blockLID = spatial_cell->get_velocity_block_local_id(blockGID, popID); - // Check for invalid id - if (blockLID != vmesh::VelocityMesh::invalidLocalID()) { - // Get a pointer to the block data - Realf* blockData = spatial_cell->get_data(blockLID, popID); - // Loop over velocity block cells - for(int i = 0; i < WID3; i++) { - blockData[i] = 0.0; - } - } - } - } - - // store_data(target_data => targetCells) :Aggregate data for blockid to original location - // Loop over pencils again - totalTargetLength = 0; - for(uint pencili = 0; pencili < pencils.N; pencili++){ - - int L = pencils.lengthOfPencils[pencili]; - uint targetLength = L + 2; - vector pencilIds = pencils.getIds(pencili); - - bool debugPencilFlag = false; - - // Unpack the vector data - - // Loop over cells in pencil +- 1 padded cell - for ( uint celli = 0; celli < targetLength; ++celli ) { - - // // If the pencil is periodic, we do not write the ghost cells because - // // They are copies of cells that are already in the pencil - // // - It seems that doing this was wrong. Investigate! - // if(pencils.periodic[pencili] && (celli == 0 || celli == targetLength - 1)) - // continue; - - if(celli > 0 && pencilIds[celli - 1] == debugcell) debugPencilFlag = true; - - Realv vector[VECL]; - // Loop over 1st vspace dimension - for (uint k = 0; k < WID; ++k) { - // Loop over 2nd vspace dimension - for(uint planeVector = 0; planeVector < VEC_PER_PLANE; planeVector++) { - targetVecData[i_trans_pt_blockv(planeVector, k, totalTargetLength + celli - 1)].store(vector); - // Loop over 3rd (vectorized) vspace dimension - for (uint i = 0; i < VECL; i++) { - targetBlockData[(totalTargetLength + celli) * WID3 + - cellid_transpose[i + planeVector * VECL + k * WID2]] - = vector[i]; - } - } - } - } - - // store values from targetBlockData array to the actual blocks - // Loop over cells in the pencil, including the padded cells of the target array - for ( uint celli = 0; celli < targetLength; celli++ ) { - - uint GID = celli + totalTargetLength; - SpatialCell* spatial_cell = targetCells[GID]; - - if(spatial_cell == NULL) { - // Invalid target spatial cell - continue; - } - - const vmesh::LocalID blockLID = spatial_cell->get_velocity_block_local_id(blockGID, popID); - if (blockLID == vmesh::VelocityMesh::invalidLocalID()) { - // Invalid local id. - continue; - } - - Realf* blockData = spatial_cell->get_data(blockLID, popID); - for(int i = 0; i < WID3 ; i++) { - blockData[i] += targetBlockData[GID * WID3 + i]; - } - } - - if(debugflag && debugPencilFlag) cout << "TotalTargetLength = " << totalTargetLength << endl; - totalTargetLength += targetLength; - - // dealloc target data -- Should be automatic again? - } - } - - data = allCellsPointer[allCellsPointerIndex]->get_data(debugLID,popID); - } - } - - //cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; - - return true; -} diff --git a/vlasovsolver/cpu_trans_map.hpp b/vlasovsolver/cpu_trans_map.hpp index 0a97c8fbd..3f993cf7b 100644 --- a/vlasovsolver/cpu_trans_map.hpp +++ b/vlasovsolver/cpu_trans_map.hpp @@ -28,53 +28,21 @@ #include "../common.h" #include "../spatial_cell.hpp" -struct setOfPencils { - - uint N; // Number of pencils in the set - uint sumOfLengths; - std::vector lengthOfPencils; // Lengths of pencils - std::vector ids; // List of cells - std::vector x,y; // x,y - position - std::vector periodic; - - setOfPencils() { - N = 0; - sumOfLengths = 0; - } - - void addPencil(std::vector idsIn, Real xIn, Real yIn, bool periodicIn) { - - N += 1; - sumOfLengths += idsIn.size(); - lengthOfPencils.push_back(idsIn.size()); - ids.insert(ids.end(),idsIn.begin(),idsIn.end()); - x.push_back(xIn); - y.push_back(yIn); - periodic.push_back(periodicIn); - } - - std::vector getIds(const uint pencilId) const { - - std::vector idsOut; - - if (pencilId > N) { - return idsOut; - } - - CellID ibeg = 0; - for (uint i = 0; i < pencilId; i++) { - ibeg += lengthOfPencils[i]; - } - CellID iend = ibeg + lengthOfPencils[pencilId]; - - for (uint i = ibeg; i <= iend; i++) { - idsOut.push_back(ids[i]); - } - - return idsOut; - } - -}; +void compute_spatial_source_neighbors(const dccrg::Dccrg& mpiGrid, + const CellID& cellID,const uint dimension,SpatialCell **neighbors); +void compute_spatial_target_neighbors(const dccrg::Dccrg& mpiGrid, + const CellID& cellID,const uint dimension,SpatialCell **neighbors); +void copy_trans_block_data(SpatialCell** source_neighbors,const vmesh::GlobalID blockGID, + Vec* values,const unsigned char* const cellid_transpose,const uint popID); +CellID get_spatial_neighbor(const dccrg::Dccrg& mpiGrid, + const CellID& cellID,const bool include_first_boundary_layer, + const int spatial_di,const int spatial_dj,const int spatial_dk); +SpatialCell* get_spatial_neighbor_pointer(const dccrg::Dccrg& mpiGrid, + const CellID& cellID,const bool include_first_boundary_layer, + const int spatial_di,const int spatial_dj,const int spatial_dk); +void store_trans_block_data(SpatialCell** target_neighbors,const vmesh::GlobalID blockGID, + Vec* __restrict__ target_values, + const unsigned char* const cellid_transpose,const uint popID); bool do_translate_cell(spatial_cell::SpatialCell* SC); bool trans_map_1d(const dccrg::Dccrg& mpiGrid, @@ -106,21 +74,4 @@ void copy_trans_block_data(SpatialCell** source_neighbors, const unsigned char* const cellid_transpose, const uint popID); -bool trans_map_1d_amr(const dccrg::Dccrg& mpiGrid, - const std::vector& localPropagatedCells, - const std::vector& remoteTargetCells, - const uint dimension, - const Realv dt, - const uint popID); - -setOfPencils buildPencilsWithNeighbors( const dccrg::Dccrg &grid, - setOfPencils &pencils, CellID startingId, - std::vector ids, uint dimension, - std::vector path); - -void get_seed_ids(const dccrg::Dccrg& mpiGrid, - const std::vector &localPropagatedCells, - const uint dimension, - std::vector &seedIds); - #endif diff --git a/vlasovsolver/cpu_trans_map_amr.cpp b/vlasovsolver/cpu_trans_map_amr.cpp index a2ed692ce..955c49aa7 100644 --- a/vlasovsolver/cpu_trans_map_amr.cpp +++ b/vlasovsolver/cpu_trans_map_amr.cpp @@ -1,4 +1,4 @@ -#include "vlasovsolver/cpu_1d_ppm_nonuniform.hpp" +#include "cpu_1d_ppm_nonuniform.hpp" //#include "cpu_1d_ppm_nonuniform_conserving.hpp" #include "vec.h" #include "../grid.h" @@ -9,350 +9,119 @@ using namespace std; using namespace spatial_cell; -// void compute_spatial_source_neighbors(const dccrg::Dccrg& mpiGrid, -// const CellID& cellID,const uint dimension,SpatialCell **neighbors); -// void compute_spatial_target_neighbors(const dccrg::Dccrg& mpiGrid, -// const CellID& cellID,const uint dimension,SpatialCell **neighbors); -// void copy_trans_block_data(SpatialCell** source_neighbors,const vmesh::GlobalID blockGID, -// Vec* values,const unsigned char* const cellid_transpose,const uint popID); -// CellID get_spatial_neighbor(const dccrg::Dccrg& mpiGrid, -// const CellID& cellID,const bool include_first_boundary_layer, -// const int spatial_di,const int spatial_dj,const int spatial_dk); -// SpatialCell* get_spatial_neighbor_pointer(const dccrg::Dccrg& mpiGrid, -// const CellID& cellID,const bool include_first_boundary_layer, -// const int spatial_di,const int spatial_dj,const int spatial_dk); -// void store_trans_block_data(SpatialCell** target_neighbors,const vmesh::GlobalID blockGID, -// Vec* __restrict__ target_values, -// const unsigned char* const cellid_transpose,const uint popID); - -// // indices in padded source block, which is of type Vec with VECL -// // element sin each vector. b_k is the block index in z direction in -// // ordinary space [- VLASOV_STENCIL_WIDTH to VLASOV_STENCIL_WIDTH], -// // i,j,k are the cell ids inside on block (i in vector elements). -// // Vectors with same i,j,k coordinates, but in different spatial cells, are consequtive -// //#define i_trans_ps_blockv(j, k, b_k) ( (b_k + VLASOV_STENCIL_WIDTH ) + ( (((j) * WID + (k) * WID2)/VECL) * ( 1 + 2 * VLASOV_STENCIL_WIDTH) ) ) -// #define i_trans_ps_blockv(planeVectorIndex, planeIndex, blockIndex) ( (blockIndex) + VLASOV_STENCIL_WIDTH + ( (planeVectorIndex) + (planeIndex) * VEC_PER_PLANE ) * ( 1 + 2 * VLASOV_STENCIL_WIDTH) ) - -// // indices in padded target block, which is of type Vec with VECL -// // element sin each vector. b_k is the block index in z direction in -// // ordinary space, i,j,k are the cell ids inside on block (i in vector -// // elements). -// //#define i_trans_pt_blockv(j, k, b_k) ( ( (j) * WID + (k) * WID2 + ((b_k) + 1 ) * WID3) / VECL ) -// #define i_trans_pt_blockv(planeVectorIndex, planeIndex, blockIndex) ( planeVectorIndex + planeIndex * VEC_PER_PLANE + (blockIndex + 1) * VEC_PER_BLOCK) - -// //Is cell translated? It is not translated if DO_NO_COMPUTE or if it is sysboundary cell and not in first sysboundarylayer -// bool do_translate_cell(SpatialCell* SC){ -// if(SC->sysBoundaryFlag == sysboundarytype::DO_NOT_COMPUTE || -// (SC->sysBoundaryLayer != 1 && SC->sysBoundaryFlag != sysboundarytype::NOT_SYSBOUNDARY)) -// return false; -// else -// return true; -// } - -// /* -// * return INVALID_CELLID if the spatial neighbor does not exist, or if -// * it is a cell that is not computed. If the -// * include_first_boundary_layer flag is set, then also first boundary -// * layer is inlcuded (does not return INVALID_CELLID). -// * This does not use dccrg's get_neighbor_of function as it does not support computing neighbors for remote cells -// */ -// CellID get_spatial_neighbor(const dccrg::Dccrg& mpiGrid, -// const CellID& cellID, -// const bool include_first_boundary_layer, -// const int spatial_di, -// const int spatial_dj, -// const int spatial_dk ) { -// dccrg::Types<3>::indices_t indices_unsigned = mpiGrid.mapping.get_indices(cellID); -// int64_t indices[3]; -// dccrg::Grid_Length::type length = mpiGrid.mapping.length.get(); - -// //compute raw new indices -// indices[0] = spatial_di + indices_unsigned[0]; -// indices[1] = spatial_dj + indices_unsigned[1]; -// indices[2] = spatial_dk + indices_unsigned[2]; - -// //take periodicity into account -// for(uint i = 0; i<3; i++) { -// if(mpiGrid.topology.is_periodic(i)) { -// while(indices[i] < 0 ) -// indices[i] += length[i]; -// while(indices[i] >= length[i] ) -// indices[i] -= length[i]; -// } -// } -// //return INVALID_CELLID for cells outside system (non-periodic) -// for(uint i = 0; i<3; i++) { -// if(indices[i]< 0) -// return INVALID_CELLID; -// if(indices[i]>=length[i]) -// return INVALID_CELLID; -// } -// //store nbr indices into the correct datatype -// for(uint i = 0; i<3; i++) { -// indices_unsigned[i] = indices[i]; -// } -// //get nbrID -// CellID nbrID = mpiGrid.mapping.get_cell_from_indices(indices_unsigned,0); -// if (nbrID == dccrg::error_cell ) { -// std::cerr << __FILE__ << ":" << __LINE__ -// << " No neighbor for cell?" << cellID -// << " at offsets " << spatial_di << ", " << spatial_dj << ", " << spatial_dk -// << std::endl; -// abort(); -// } - -// // not existing cell or do not compute -// if( mpiGrid[nbrID]->sysBoundaryFlag == sysboundarytype::DO_NOT_COMPUTE) -// return INVALID_CELLID; - -// //cell on boundary, but not first layer and we want to include -// //first layer (e.g. when we compute source cells) -// if( include_first_boundary_layer && -// mpiGrid[nbrID]->sysBoundaryFlag != sysboundarytype::NOT_SYSBOUNDARY && -// mpiGrid[nbrID]->sysBoundaryLayer != 1 ) { -// return INVALID_CELLID; -// } - -// //cell on boundary, and we want none of the layers, -// //invalid.(e.g. when we compute targets) -// if( !include_first_boundary_layer && -// mpiGrid[nbrID]->sysBoundaryFlag != sysboundarytype::NOT_SYSBOUNDARY){ -// return INVALID_CELLID; -// } - -// return nbrID; //no AMR -// } - - -// /* -// * return NULL if the spatial neighbor does not exist, or if -// * it is a cell that is not computed. If the -// * include_first_boundary_layer flag is set, then also first boundary -// * layer is inlcuded (does not return INVALID_CELLID). -// * This does not use dccrg's get_neighbor_of function as it does not support computing neighbors for remote cells - - -// */ - -// SpatialCell* get_spatial_neighbor_pointer(const dccrg::Dccrg& mpiGrid, -// const CellID& cellID, -// const bool include_first_boundary_layer, -// const int spatial_di, -// const int spatial_dj, -// const int spatial_dk ) { -// CellID nbrID=get_spatial_neighbor(mpiGrid, cellID, include_first_boundary_layer, spatial_di, spatial_dj, spatial_dk); - -// if(nbrID!=INVALID_CELLID) -// return mpiGrid[nbrID]; -// else -// return NULL; -// } - -// /*compute spatial neighbors for source stencil with a size of 2* -// * VLASOV_STENCIL_WIDTH + 1, cellID at VLASOV_STENCIL_WIDTH. First -// * bondary layer included. Invalid cells are replaced by closest good -// * cells (i.e. boundary condition uses constant extrapolation for the -// * stencil values at boundaries*/ - -// void compute_spatial_source_neighbors(const dccrg::Dccrg& mpiGrid, -// const CellID& cellID, -// const uint dimension, -// SpatialCell **neighbors){ -// for(int i = -VLASOV_STENCIL_WIDTH; i <= VLASOV_STENCIL_WIDTH; i++){ -// switch (dimension){ -// case 0: -// neighbors[i + VLASOV_STENCIL_WIDTH] = get_spatial_neighbor_pointer(mpiGrid, cellID, true, i, 0, 0); -// break; -// case 1: -// neighbors[i + VLASOV_STENCIL_WIDTH] = get_spatial_neighbor_pointer(mpiGrid, cellID, true, 0, i, 0); -// break; -// case 2: -// neighbors[i + VLASOV_STENCIL_WIDTH] = get_spatial_neighbor_pointer(mpiGrid, cellID, true, 0, 0, i); -// break; -// } -// } - -// SpatialCell* last_good_cell = mpiGrid[cellID]; -// /*loop to neative side and replace all invalid cells with the closest good cell*/ -// for(int i = -1;i>=-VLASOV_STENCIL_WIDTH;i--){ -// if(neighbors[i + VLASOV_STENCIL_WIDTH] == NULL) -// neighbors[i + VLASOV_STENCIL_WIDTH] = last_good_cell; -// else -// last_good_cell = neighbors[i + VLASOV_STENCIL_WIDTH]; -// } - -// last_good_cell = mpiGrid[cellID]; -// /*loop to positive side and replace all invalid cells with the closest good cell*/ -// for(int i = 1; i <= VLASOV_STENCIL_WIDTH; i++){ -// if(neighbors[i + VLASOV_STENCIL_WIDTH] == NULL) -// neighbors[i + VLASOV_STENCIL_WIDTH] = last_good_cell; -// else -// last_good_cell = neighbors[i + VLASOV_STENCIL_WIDTH]; -// } -// } - -// /*compute spatial target neighbors, stencil has a size of 3. No boundary cells are included*/ -// void compute_spatial_target_neighbors(const dccrg::Dccrg& mpiGrid, -// const CellID& cellID, -// const uint dimension, -// SpatialCell **neighbors){ - -// for(int i = -1; i <= 1; i++){ -// switch (dimension){ -// case 0: -// neighbors[i + 1] = get_spatial_neighbor_pointer(mpiGrid, cellID, false, i, 0, 0); -// break; -// case 1: -// neighbors[i + 1] = get_spatial_neighbor_pointer(mpiGrid, cellID, false, 0, i, 0); -// break; -// case 2: -// neighbors[i + 1] = get_spatial_neighbor_pointer(mpiGrid, cellID, false, 0, 0, i); -// break; -// } -// } - -// } - -// /* Copy the data to the temporary values array, so that the -// * dimensions are correctly swapped. Also, copy the same block for -// * then neighboring spatial cells (in the dimension). neighbors -// * generated with compute_spatial_neighbors_wboundcond). -// * -// * This function must be thread-safe. -// * -// * @param source_neighbors Array containing the VLASOV_STENCIL_WIDTH closest -// * spatial neighbors of this cell in the propagated dimension. -// * @param blockGID Global ID of the velocity block. -// * @param values Vector where loaded data is stored. -// * @param cellid_transpose -// * @param popID ID of the particle species. -// */ -// inline void copy_trans_block_data( -// SpatialCell** source_neighbors, -// const vmesh::GlobalID blockGID, -// Vec* values, -// const unsigned char* const cellid_transpose, -// const uint popID) { - -// /*load pointers to blocks and prefetch them to L1*/ -// Realf* blockDatas[VLASOV_STENCIL_WIDTH * 2 + 1]; -// for (int b = -VLASOV_STENCIL_WIDTH; b <= VLASOV_STENCIL_WIDTH; ++b) { -// SpatialCell* srcCell = source_neighbors[b + VLASOV_STENCIL_WIDTH]; -// const vmesh::LocalID blockLID = srcCell->get_velocity_block_local_id(blockGID,popID); -// if (blockLID != srcCell->invalid_local_id()) { -// blockDatas[b + VLASOV_STENCIL_WIDTH] = srcCell->get_data(blockLID,popID); -// //prefetch storage pointers to L1 -// _mm_prefetch((char *)(blockDatas[b + VLASOV_STENCIL_WIDTH]), _MM_HINT_T0); -// _mm_prefetch((char *)(blockDatas[b + VLASOV_STENCIL_WIDTH]) + 64, _MM_HINT_T0); -// _mm_prefetch((char *)(blockDatas[b + VLASOV_STENCIL_WIDTH]) + 128, _MM_HINT_T0); -// _mm_prefetch((char *)(blockDatas[b + VLASOV_STENCIL_WIDTH]) + 192, _MM_HINT_T0); -// if(VPREC == 8) { -// //prefetch storage pointers to L1 -// _mm_prefetch((char *)(blockDatas[b + VLASOV_STENCIL_WIDTH]) + 256, _MM_HINT_T0); -// _mm_prefetch((char *)(blockDatas[b + VLASOV_STENCIL_WIDTH]) + 320, _MM_HINT_T0); -// _mm_prefetch((char *)(blockDatas[b + VLASOV_STENCIL_WIDTH]) + 384, _MM_HINT_T0); -// _mm_prefetch((char *)(blockDatas[b + VLASOV_STENCIL_WIDTH]) + 448, _MM_HINT_T0); -// } -// } -// else{ -// blockDatas[b + VLASOV_STENCIL_WIDTH] = NULL; -// } -// } - -// // Copy volume averages of this block from all spatial cells: -// for (int b = -VLASOV_STENCIL_WIDTH; b <= VLASOV_STENCIL_WIDTH; ++b) { -// if(blockDatas[b + VLASOV_STENCIL_WIDTH] != NULL) { -// Realv blockValues[WID3]; -// const Realf* block_data = blockDatas[b + VLASOV_STENCIL_WIDTH]; -// // Copy data to a temporary array and transpose values so that mapping is along k direction. -// // spatial source_neighbors already taken care of when -// // creating source_neighbors table. If a normal spatial cell does not -// // simply have the block, its value will be its null_block which -// // is fine. This null_block has a value of zero in data, and that -// // is thus the velocity space boundary -// for (uint i=0; i& mpiGrid, + setOfPencils pencils, + const uint iPencil, + const uint dimension, + SpatialCell **sourceCells){ + + // L = length of the pencil iPencil + int L = pencils.lengthOfPencils[iPencil]; + vector ids = pencils.getIds(iPencil); -// // now load values into the actual values table.. -// uint offset =0; -// for (uint k=0; k lengthOfPencils; // Lengths of pencils - std::vector ids; // List of cells - std::vector x,y; // x,y - position - - setOfPencils() { - N = 0; - sumOfLengths = 0; + for (int iCell = -VLASOV_STENCIL_WIDTH; iCell < L + VLASOV_STENCIL_WIDTH; iCell++) { + CellID cellID = ids[min(max(iCell, 0), L - 1)]; + + int i = 0; + if(iCell <= 0) i = iCell; + if(iCell >= L) i = iCell - (L - 1); + + switch (dimension) { + case 0: + sourceCells[iCell + VLASOV_STENCIL_WIDTH] = get_spatial_neighbor_pointer(mpiGrid, cellID, false, i, 0, 0); + break; + case 1: + sourceCells[iCell + VLASOV_STENCIL_WIDTH] = get_spatial_neighbor_pointer(mpiGrid, cellID, false, 0, i, 0); + break; + case 2: + sourceCells[iCell + VLASOV_STENCIL_WIDTH] = get_spatial_neighbor_pointer(mpiGrid, cellID, false, 0, 0, i); + break; + } } - void addPencil(std::vector idsIn, Real xIn, Real yIn) { - - N += 1; - sumOfLengths += idsIn.size(); - lengthOfPencils.push_back(idsIn.size()); - ids.insert(ids.end(),idsIn.begin(),idsIn.end()); - x.push_back(xIn); - y.push_back(yIn); - + SpatialCell* last_good_cell = mpiGrid[ids.front()]; + /*loop to neative side and replace all invalid cells with the closest good cell*/ + for(int i = -1;i>=-VLASOV_STENCIL_WIDTH;i--){ + if(sourceCells[i + VLASOV_STENCIL_WIDTH] == NULL) + sourceCells[i + VLASOV_STENCIL_WIDTH] = last_good_cell; + else + last_good_cell = sourceCells[i + VLASOV_STENCIL_WIDTH]; } - std::vector getIds(uint pencilId) { - - vector idsOut; - - if (pencilId > N) { - return idsOut; - } + last_good_cell = mpiGrid[ids.back()]; + /*loop to positive side and replace all invalid cells with the closest good cell*/ + for(int i = L + 1; i <= L + VLASOV_STENCIL_WIDTH; i++){ + if(sourceCells[i + VLASOV_STENCIL_WIDTH] == NULL) + sourceCells[i + VLASOV_STENCIL_WIDTH] = last_good_cell; + else + last_good_cell = sourceCells[i + VLASOV_STENCIL_WIDTH]; + } +} - CellID ibeg = 0; - for (uint i = 0; i < pencilId; i++) { - ibeg += lengthOfPencils[i]; - } - CellID iend = ibeg + lengthOfPencils[pencilId]; - - for (uint i = ibeg; i <= iend; i++) { - idsOut.push_back(ids[i]); +/*compute spatial target neighbors for pencils of size N. No boundary cells are included*/ +void compute_spatial_target_cells_for_pencils(const dccrg::Dccrg& mpiGrid, + setOfPencils& pencils, + const uint dimension, + SpatialCell **targetCells){ + + uint GID = 0; + for(uint iPencil = 0; iPencil < pencils.N; iPencil++){ + // L = length of the pencil iPencil + int L = pencils.lengthOfPencils[iPencil]; + + vector ids = pencils.getIds(iPencil); + for (int iCell = -1; iCell <= L; iCell++) { + CellID cellID = ids[min(max(iCell,0),L - 1)]; + + int i = 0; + if(iCell == -1) i = -1; + if(iCell == L) i = 1; + + switch (dimension) { + case 0: + targetCells[GID + iCell + 1] = get_spatial_neighbor_pointer(mpiGrid, cellID, false, i, 0, 0); + break; + case 1: + targetCells[GID + iCell + 1] = get_spatial_neighbor_pointer(mpiGrid, cellID, false, 0, i, 0); + break; + case 2: + targetCells[GID + iCell + 1] = get_spatial_neighbor_pointer(mpiGrid, cellID, false, 0, 0, i); + break; + } } - - return idsOut; + GID += (L + 2); } +} -}; - -CellID selectNeighbor(dccrg::Dccrg &grid, CellID id, int dimension = 0, uint path = 0) { +CellID selectNeighbor(const dccrg::Dccrg &grid, + CellID id, int dimension = 0, uint path = 0) { const auto neighbors = grid.get_face_neighbors_of(id); - + const int myProcess = grid.get_process(id); + vector < CellID > myNeighbors; - // Collect neighbor ids in the positive direction of the chosen dimension. + // Collect neighbor ids in the positive direction of the chosen dimension, + // that are on the same process as the origin. // Note that dimension indexing starts from 1 (of course it does) for (const auto cell : neighbors) { - if (cell.second == dimension + 1) + if (cell.second == dimension + 1 && grid.get_process(cell.first) == myProcess) myNeighbors.push_back(cell.first); } @@ -386,7 +155,8 @@ CellID selectNeighbor(dccrg::Dccrg &grid, } -setOfPencils buildPencilsWithNeighbors( dccrg::Dccrg grid, + +setOfPencils buildPencilsWithNeighbors( const dccrg::Dccrg &grid, setOfPencils &pencils, CellID startingId, vector ids, uint dimension, vector path) { @@ -432,9 +202,13 @@ setOfPencils buildPencilsWithNeighbors( dccrg::Dccrg 0) { + periodic = false; + // Find the refinement level in the neighboring cell. Any neighbor will do // since refinement level can only increase by 1 between neighbors. nextNeighbor = selectNeighbor(grid,id,dimension); @@ -508,12 +282,22 @@ setOfPencils buildPencilsWithNeighbors( dccrg::Dccrg &vmesh, const uint lengthOfPencil, bool debugflag) { + + // Get velocity data from vmesh that we need later to calculate the translation + velocity_block_indices_t block_indices; + uint8_t refLevel; + vmesh.getIndices(blockGID,refLevel, block_indices[0], block_indices[1], block_indices[2]); + Realv dvz = vmesh.getCellSize(refLevel)[dimension]; + Realv vz_min = vmesh.getMeshMinLimits()[dimension]; + // Assuming 1 neighbor in the target array because of the CFL condition // In fact propagating to > 1 neighbor will give an error - const uint nTargetNeighborsPerCell = 1; - - // Determine direction of translation - // part of density goes here (cell index change along spatial direcion) - Vecb positiveTranslationDirection = (z_translation > Vec(0.0)); - //Veci target_scell_index = truncate_to_int(select(z_translation > Vec(0.0), 1, -1)); + const uint nTargetNeighborsPerPencil = 1; // Vector buffer where we write data, initialized to 0*/ - Vec targetValues[lengthOfPencil + 2 * nTargetNeighborsPerCell]; - - for (uint i_target = 0; i_target < lengthOfPencil + nTargetNeighborsPerCell; i_target++) { + Vec targetValues[(lengthOfPencil + 2 * nTargetNeighborsPerPencil) * WID3 / VECL]; + + for (uint i = 0; i < (lengthOfPencil + 2 * nTargetNeighborsPerPencil) * WID3 / VECL; i++) { // init target_values - targetValues[i_target] = 0.0; + targetValues[i] = Vec(0.0); } // Go from 0 to length here to propagate all the cells in the pencil for (uint i = 0; i < lengthOfPencil; i++){ - // We padded the target array by 1 cell on both sides - // Assume the source array has been padded by nSourceNeighborsPerCell - // To have room for propagation. Refer to dr and values by i_cell - // and targetValues by i_target - uint i_cell = i + nSourceNeighborsPerCell; - uint i_target = i + nTargetNeighborsPerCell; + // The source array is padded by VLASOV_STENCIL_WIDTH on both sides. + uint i_source = i + VLASOV_STENCIL_WIDTH; - // Calculate normalized coordinates in current cell. - // The coordinates (scaled units from 0 to 1) between which we will - // integrate to put mass in the target neighboring cell. - // Normalize the coordinates to the origin cell. Then we scale with the difference - // in volume between target and origin later when adding the integrated value. - Vec z_1,z_2; - z_1 = select(positiveTranslationDirection, 1.0 - z_translation / dr[i_cell], 0.0); - z_2 = select(positiveTranslationDirection, 1.0, - z_translation / dr[i_cell]); - - if( horizontal_or(abs(z_1) > Vec(1.0)) || horizontal_or(abs(z_2) > Vec(1.0)) ) { - std::cout << "Error, CFL condition violated\n"; - std::cout << "Exiting\n"; - std::exit(1); - } - - // Compute polynomial coefficients - Vec a[3]; - compute_ppm_coeff_nonuniform(dr, values, h4, i_cell, a); - - // Compute integral - const Vec ngbr_target_density = - z_2 * ( a[0] + z_2 * ( a[1] + z_2 * a[2] ) ) - - z_1 * ( a[0] + z_1 * ( a[1] + z_1 * a[2] ) ); + for (uint k = 0; k < WID; ++k) { + + const Realv cell_vz = (block_indices[dimension] * WID + k + 0.5) * dvz + vz_min; //cell centered velocity + const Vec z_translation = cell_vz * dt / dz[i_source]; // how much it moved in time dt (reduced units) + + // Determine direction of translation + // part of density goes here (cell index change along spatial direcion) + Vecb positiveTranslationDirection = (z_translation > Vec(0.0)); + + // Calculate normalized coordinates in current cell. + // The coordinates (scaled units from 0 to 1) between which we will + // integrate to put mass in the target neighboring cell. + // Normalize the coordinates to the origin cell. Then we scale with the difference + // in volume between target and origin later when adding the integrated value. + Vec z_1,z_2; + z_1 = select(positiveTranslationDirection, 1.0 - z_translation, 0.0); + z_2 = select(positiveTranslationDirection, 1.0, - z_translation); + + if( horizontal_or(abs(z_1) > Vec(1.0)) || horizontal_or(abs(z_2) > Vec(1.0)) ) { + std::cout << "Error, CFL condition violated\n"; + std::cout << "Exiting\n"; + std::exit(1); + } + + for (uint planeVector = 0; planeVector < VEC_PER_PLANE; planeVector++) { - // Store mapped density in two target cells - // in the neighbor cell we will put this density - //targetValues[i_cell + target_scell_index] += ngbr_target_density * dr[i_cell] / dr[i_cell + target_scell_index]; - targetValues[i_target + 1] += select( positiveTranslationDirection,ngbr_target_density * dr[i_cell] / dr[i_cell + 1],Vec(0.0)); - targetValues[i_target - 1] += select(!positiveTranslationDirection,ngbr_target_density * dr[i_cell] / dr[i_cell - 1],Vec(0.0)); - // in the current original cells we will put the rest of the original density - targetValues[i_target] += values[i_cell] - ngbr_target_density; + // Compute polynomial coefficients + Vec a[3]; + // Dz: is a padded array, pointer can point to the beginning, i + VLASOV_STENCIL_WIDTH will get the right cell. + // values: transpose function adds VLASOV_STENCIL_WIDTH to the block index, therefore we substract it here, then + // i + VLASOV_STENCIL_WIDTH will point to the right cell. Complicated! Why! Sad! MVGA! + compute_ppm_coeff_nonuniform(dz, + values + i_trans_ps_blockv_pencil(planeVector, k, i-VLASOV_STENCIL_WIDTH, lengthOfPencil), + h4, VLASOV_STENCIL_WIDTH, a); + + // Compute integral + const Vec ngbr_target_density = + z_2 * ( a[0] + z_2 * ( a[1] + z_2 * a[2] ) ) - + z_1 * ( a[0] + z_1 * ( a[1] + z_1 * a[2] ) ); + // Store mapped density in two target cells + // in the neighbor cell we will put this density + targetValues[i_trans_pt_blockv(planeVector, k, i + 1)] += select( positiveTranslationDirection, + ngbr_target_density * dz[i_source] / dz[i_source + 1],Vec(0.0)); + targetValues[i_trans_pt_blockv(planeVector, k, i - 1 )] += select(!positiveTranslationDirection, + ngbr_target_density * dz[i_source] / dz[i_source - 1],Vec(0.0)); + + // in the current original cells we will put the rest of the original density + targetValues[i_trans_pt_blockv(planeVector, k, i)] += + values[i_trans_ps_blockv_pencil(planeVector, k, i, lengthOfPencil)] - ngbr_target_density; + } + } } - // Store target data into source data - for (uint i=0; i < lengthOfPencil; i++){ + // Write target data into source data + // VLASOV_STENCIL_WIDTH >= nTargetNeighborsPerPencil is required (default 2 >= 1) - uint i_cell = i + nSourceNeighborsPerCell; - uint i_target = i + nTargetNeighborsPerCell; - - values[i_cell] = targetValues[i_target]; - + for (uint i = 0; i < lengthOfPencil + 2 * nTargetNeighborsPerPencil; i++) { + + for (uint k = 0; k < WID; ++k) { + + for (uint planeVector = 0; planeVector < VEC_PER_PLANE; planeVector++) { + int im1 = i - 1; // doing this to shut up compiler warnings + values[i_trans_ps_blockv_pencil(planeVector, k, im1, lengthOfPencil)] = + targetValues[i_trans_pt_blockv(planeVector, k, im1)]; + + } + } + } +} + +void get_seed_ids(const dccrg::Dccrg& mpiGrid, + const vector &localPropagatedCells, + const uint dimension, + vector &seedIds) { + + const bool debug = false; + + //#pragma omp parallel for + for(auto celli: localPropagatedCells) { + int myProcess = mpiGrid.get_process(celli); + // Collect a list of cell ids that do not have a neighbor in the negative direction + // These are the seed ids for the pencils. + vector negativeNeighbors; + // Returns all neighbors as (id, direction-dimension) pairs. + for ( const auto neighbor : mpiGrid.get_face_neighbors_of(celli ) ) { + + if ( mpiGrid.get_process(neighbor.first) == myProcess ) { + // select the neighbor in the negative dimension of the propagation + if (neighbor.second == - (static_cast(dimension) + 1)) { + + // add the id of the neighbor to a list if it's on the same process + negativeNeighbors.push_back(neighbor.first); + + } + + } + } + //cout << endl; + // if no neighbors were found in the negative direction, add this cell id to the seed cells + if (negativeNeighbors.size() == 0) + seedIds.push_back(celli); + } + + // If no seed ids were found, let's assume we have a periodic boundary and + // a single process in the dimension of propagation. In this case we start from + // the first cells of the plane perpendicular to the propagation dimension + if (seedIds.size() == 0) { + for (uint ix = 0; ix < P::xcells_ini; ix++) { + for (uint iy = 0; iy < P::ycells_ini; iy++) { + for (uint iz = 0; iz < P::zcells_ini; iz++) { + CellID seedId; + switch (dimension) { + case 0: + // yz - plane + if(ix == 0) { + seedId = P::xcells_ini * P::ycells_ini * iz + P::xcells_ini * iy + 1; + if(mpiGrid.get_process(seedId) == mpiGrid.get_process(localPropagatedCells[0])) + seedIds.push_back(seedId); + + } + break; + case 1: + // xz - plane + if(iy == 0) { + seedId = P::xcells_ini * P::ycells_ini * iz + ix + 1; + if(mpiGrid.get_process(seedId) == mpiGrid.get_process(localPropagatedCells[0])) + seedIds.push_back(seedId); + + } + break; + case 2: + // xy - plane + if(iz == 0) { + seedId = P::xcells_ini * iy + ix + 1; + if(mpiGrid.get_process(seedId) == mpiGrid.get_process(localPropagatedCells[0])) + seedIds.push_back(seedId); + } + break; + } + } + } + } + } + + if(debug) { + cout << "Number of seed ids is " << seedIds.size() << endl; + cout << "Seed ids are: "; + for (const auto seedId : seedIds) { + cout << seedId << " "; + } + cout << endl; + } +} + + + + +/* Copy the data to the temporary values array, so that the + * dimensions are correctly swapped. Also, copy the same block for + * then neighboring spatial cells (in the dimension). neighbors + * generated with compute_spatial_neighbors_wboundcond). + * + * This function must be thread-safe. + * + * @param source_neighbors Array containing the VLASOV_STENCIL_WIDTH closest + * spatial neighbors of this cell in the propagated dimension. + * @param blockGID Global ID of the velocity block. + * @param int lengthOfPencil Number of spatial cells in pencil + * @param values Vector where loaded data is stored. + * @param cellid_transpose + * @param popID ID of the particle species. + */ +void copy_trans_block_data_amr( + SpatialCell** source_neighbors, + const vmesh::GlobalID blockGID, + int lengthOfPencil, + Vec* values, + const unsigned char* const cellid_transpose, + const uint popID) { + + // Allocate data pointer for all blocks in pencil. Pad on both ends by VLASOV_STENCIL_WIDTH + Realf* blockDataPointer[lengthOfPencil + 2 * VLASOV_STENCIL_WIDTH]; + + for (int b = -VLASOV_STENCIL_WIDTH; b < lengthOfPencil + VLASOV_STENCIL_WIDTH; b++) { + // Get cell pointer and local block id + SpatialCell* srcCell = source_neighbors[b + VLASOV_STENCIL_WIDTH]; + + const vmesh::LocalID blockLID = srcCell->get_velocity_block_local_id(blockGID,popID); + if (blockLID != srcCell->invalid_local_id()) { + // Get data pointer + blockDataPointer[b + VLASOV_STENCIL_WIDTH] = srcCell->get_data(blockLID,popID); + // //prefetch storage pointers to L1 + // _mm_prefetch((char *)(blockDataPointer[b + VLASOV_STENCIL_WIDTH]), _MM_HINT_T0); + // _mm_prefetch((char *)(blockDataPointer[b + VLASOV_STENCIL_WIDTH]) + 64, _MM_HINT_T0); + // _mm_prefetch((char *)(blockDataPointer[b + VLASOV_STENCIL_WIDTH]) + 128, _MM_HINT_T0); + // _mm_prefetch((char *)(blockDataPointer[b + VLASOV_STENCIL_WIDTH]) + 192, _MM_HINT_T0); + // if(VPREC == 8) { + // //prefetch storage pointers to L1 + // _mm_prefetch((char *)(blockDataPointer[b + VLASOV_STENCIL_WIDTH]) + 256, _MM_HINT_T0); + // _mm_prefetch((char *)(blockDataPointer[b + VLASOV_STENCIL_WIDTH]) + 320, _MM_HINT_T0); + // _mm_prefetch((char *)(blockDataPointer[b + VLASOV_STENCIL_WIDTH]) + 384, _MM_HINT_T0); + // _mm_prefetch((char *)(blockDataPointer[b + VLASOV_STENCIL_WIDTH]) + 448, _MM_HINT_T0); + // } + + } else { + blockDataPointer[b + VLASOV_STENCIL_WIDTH] = NULL; + } + } + + // Copy volume averages of this block from all spatial cells: + for (int b = -VLASOV_STENCIL_WIDTH; b < lengthOfPencil + VLASOV_STENCIL_WIDTH; b++) { + if(blockDataPointer[b + VLASOV_STENCIL_WIDTH] != NULL) { + Realv blockValues[WID3]; + const Realf* block_data = blockDataPointer[b + VLASOV_STENCIL_WIDTH]; + // Copy data to a temporary array and transpose values so that mapping is along k direction. + // spatial source_neighbors already taken care of when + // creating source_neighbors table. If a normal spatial cell does not + // simply have the block, its value will be its null_block which + // is fine. This null_block has a value of zero in data, and that + // is thus the velocity space boundary + for (uint i=0; i& mpiGrid, - const vector& localPropagatedCells, - const vector& remoteTargetCells, - const uint dimension, - const Realv dt, - const uint popID) { + const vector& localPropagatedCells, + const vector& remoteTargetCells, + const uint dimension, + const Realv dt, + const uint popID) { Realv dvz,vz_min; uint cell_indices_to_id[3]; /*< used when computing id of target cell in block*/ unsigned char cellid_transpose[WID3]; /*< defines the transpose for the solver internal (transposed) id: i + j*WID + k*WID2 to actual one*/ const uint blocks_per_dim = 1; - - cout << "entering trans_map_1d_amr" << endl; - // return if there's no cells to propagate if(localPropagatedCells.size() == 0) { - //std::cout << "Returning because of no cells" << std::endl; - cerr << "Returning because of no cells" << endl; + cout << "Returning because of no cells" << endl; return false; } // Vector with all cell ids vector allCells(localPropagatedCells); - allCells.insert(allCells.end(), remoteTargetCells.begin(), remoteTargetCells.end()); - - const uint nSourceNeighborsPerCell = 1 + 2 * VLASOV_STENCIL_WIDTH; + allCells.insert(allCells.end(), remoteTargetCells.begin(), remoteTargetCells.end()); // Vectors of pointers to the cell structs - std::vector allCellsPointer(allCells.size()); - std::vector sourceNeighbors(localPropagatedCells.size() * nSourceNeighborsPerCell); - std::vector targetNeighbors(3 * localPropagatedCells.size() ); - - Vec allCellsDz[allCells.size()]; + std::vector allCellsPointer(allCells.size()); // Initialize allCellsPointer -#pragma omp parallel for - for(uint celli = 0; celli < allCells.size(); celli++){ + #pragma omp parallel for + for(uint celli = 0; celli < allCells.size(); celli++){ allCellsPointer[celli] = mpiGrid[allCells[celli]]; - - // At the same time, calculate dz's and store them in an array. - allCellsDz[celli] = P::dz_ini / pow(2.0, mpiGrid.get_refinement_level(celli)); } - - // **************************************************************************** - - // compute pencils => set of pencils (shared datastructure) - vector seedIds; - //#pragma omp parallel for - for(uint celli = 0; celli < localPropagatedCells.size(); celli++){ - CellID localCelli = localPropagatedCells[celli]; - // Collect a list of cell ids that do not have a neighbor in the negative direction - // These are the seed ids for the pencils. - vector negativeNeighbors; - // Returns all neighbors as (id, direction-dimension) pairs. - for ( const auto neighbor : mpiGrid.get_face_neighbors_of(localCelli ) ) { - - // select the neighbor in the negative dimension of the propagation - if (neighbor.second == - (dimension + 1)) - - // add the id of the neighbor to a list - negativeNeighbors.push_back(neighbor.first); - } - // if no neighbors were found in the negative direction, add this cell id to the seed cells - if (negativeNeighbors.size() == 0) - seedIds.push_back(localCelli); - } - - // Empty vectors for internal use of buildPencilsWithNeighbors. Could be default values but - // default vectors are complicated. Should overload buildPencilsWithNeighbors like suggested here - // https://stackoverflow.com/questions/3147274/c-default-argument-for-vectorint - vector ids; - vector path; - - // Output vectors for ready pencils - setOfPencils pencils; - vector pencilSets; - - for (const auto seedId : seedIds) { - // Construct pencils from the seedIds into a set of pencils. - pencils = buildPencilsWithNeighbors(mpiGrid, pencils, seedId, ids, dimension, path); - } - // Add the final set of pencils to the pencilSets - vector. - // Only one set is created for now but we retain support for multiple sets - pencilSets.push_back(pencils); - // **************************************************************************** - - // Fiddle indices x,y,z + // Fiddle indices x,y,z in VELOCITY SPACE switch (dimension) { case 0: // set values in array that is used to convert block indices @@ -762,171 +687,285 @@ bool trans_map_1d_amr(const dccrg::Dccrg& } } } - // **************************************************************************** + // **************************************************************************** - const uint8_t VMESH_REFLEVEL = 0; - - // Get a pointer to the velocity mesh of the first spatial cell - const vmesh::VelocityMesh& vmesh = allCellsPointer[0]->get_velocity_mesh(popID); + // compute pencils => set of pencils (shared datastructure) - // set cell size in dimension direction - dvz = vmesh.getCellSize(VMESH_REFLEVEL)[dimension]; - vz_min = vmesh.getMeshMinLimits()[dimension]; - - // Get a unique sorted list of blockids that are in any of the - // propagated cells. First use set for this, then add to vector (may not - // be the most nice way to do this and in any case we could do it along - // dimension for data locality reasons => copy acc map column code, TODO: FIXME - // TODO: Do this separately for each pencil? - std::unordered_set unionOfBlocksSet; - - for(auto cell : allCellsPointer) { - vmesh::VelocityMesh& vmesh = cell->get_velocity_mesh(popID); - for (vmesh::LocalID block_i=0; block_i< vmesh.size(); ++block_i) { - unionOfBlocksSet.insert(vmesh.getGlobalID(block_i)); + vector seedIds; + get_seed_ids(mpiGrid, localPropagatedCells, dimension, seedIds); + + // Empty vectors for internal use of buildPencilsWithNeighbors. Could be default values but + // default vectors are complicated. Should overload buildPencilsWithNeighbors like suggested here + // https://stackoverflow.com/questions/3147274/c-default-argument-for-vectorint + vector ids; + vector path; + + // Output vectors for ready pencils + setOfPencils pencils; + vector pencilSets; + + for (const auto seedId : seedIds) { + // Construct pencils from the seedIds into a set of pencils. + pencils = buildPencilsWithNeighbors(mpiGrid, pencils, seedId, ids, dimension, path); + } + + // Print out ids of pencils (if needed for debugging) + if (false) { + uint ibeg = 0; + uint iend = 0; + std::cout << "I have created " << pencils.N << " pencils along dimension " << dimension << ":\n"; + std::cout << "(x, y): indices " << std::endl; + std::cout << "-----------------------------------------------------------------" << std::endl; + for (uint i = 0; i < pencils.N; i++) { + iend += pencils.lengthOfPencils[i]; + std::cout << "(" << pencils.x[i] << ", " << pencils.y[i] << "): "; + for (auto j = pencils.ids.begin() + ibeg; j != pencils.ids.begin() + iend; ++j) { + std::cout << *j << " "; } + ibeg = iend; + std::cout << std::endl; } - - std::vector unionOfBlocks; - unionOfBlocks.reserve(unionOfBlocksSet.size()); - for(const auto blockGID: unionOfBlocksSet) { - unionOfBlocks.push_back(blockGID); + } + + + // Add the final set of pencils to the pencilSets - vector. + // Only one set is created for now but we retain support for multiple sets + pencilSets.push_back(pencils); + // **************************************************************************** + + const uint8_t VMESH_REFLEVEL = 0; + + // Get a pointer to the velocity mesh of the first spatial cell + const vmesh::VelocityMesh& vmesh = allCellsPointer[0]->get_velocity_mesh(popID); + + // set cell size in dimension direction + dvz = vmesh.getCellSize(VMESH_REFLEVEL)[dimension]; + vz_min = vmesh.getMeshMinLimits()[dimension]; + + // Get a unique sorted list of blockids that are in any of the + // propagated cells. First use set for this, then add to vector (may not + // be the most nice way to do this and in any case we could do it along + // dimension for data locality reasons => copy acc map column code, TODO: FIXME + // TODO: Do this separately for each pencil? + std::unordered_set unionOfBlocksSet; + + for(auto cell : allCellsPointer) { + vmesh::VelocityMesh& vmesh = cell->get_velocity_mesh(popID); + for (vmesh::LocalID block_i=0; block_i< vmesh.size(); ++block_i) { + unionOfBlocksSet.insert(vmesh.getGlobalID(block_i)); } - // **************************************************************************** - - int t1 = phiprof::initializeTimer("mappingAndStore"); - + } + + std::vector unionOfBlocks; + unionOfBlocks.reserve(unionOfBlocksSet.size()); + for(const auto blockGID: unionOfBlocksSet) { + unionOfBlocks.push_back(blockGID); + } + // **************************************************************************** + + int t1 = phiprof::initializeTimer("mappingAndStore"); + #pragma omp parallel - { - //std::vector targetsValid(localPropagatedCells.size()); - //std::vector allCellsBlockLocalID(allCells.size()); - + { + // Loop over velocity space blocks. Thread this loop (over vspace blocks) with OpenMP. #pragma omp for schedule(guided) - // Loop over velocity space blocks. Thread this loop (over vspace blocks) with OpenMP. - for(uint blocki = 0; blocki < unionOfBlocks.size(); blocki++){ - - phiprof::start(t1); - - // Get global id of the velocity block - vmesh::GlobalID blockGID = unionOfBlocks[blocki]; - - velocity_block_indices_t block_indices; - uint8_t vRefLevel; - vmesh.getIndices(blockGID,vRefLevel, block_indices[0], - block_indices[1], block_indices[2]); + for(uint blocki = 0; blocki < unionOfBlocks.size(); blocki++){ + + phiprof::start(t1); + + // Get global id of the velocity block + vmesh::GlobalID blockGID = unionOfBlocks[blocki]; + + bool debugflag = false; + CellID debugcell; + uint allCellsPointerIndex = 16; + + const vmesh::LocalID debugLID = allCellsPointer[allCellsPointerIndex]->get_velocity_block_local_id(blockGID, popID); + Realf* data = allCellsPointer[allCellsPointerIndex]->get_data(debugLID,popID); + //for (uint i = 0; i < WID3; i++) if (data[i] != 0) debugflag = true; + velocity_block_indices_t block_indices; + uint8_t vRefLevel; + vmesh.getIndices(blockGID,vRefLevel, block_indices[0], + block_indices[1], block_indices[2]); - // Loop over sets of pencils - // This loop only has one iteration for now - for ( auto pencils: pencilSets ) { - - // Allocate targetdata sum(lengths of pencils)*WID3) - Vec targetData[pencils.sumOfLengths * WID3]; + // Loop over sets of pencils + // This loop only has one iteration for now + for ( auto pencils: pencilSets ) { + + std::vector targetBlockData((pencils.sumOfLengths + 2 * pencils.N) * WID3); + // Allocate vectorized targetvecdata sum(lengths of pencils)*WID3 / VECL) + // Add padding by 2 for each pencil + Vec targetVecData[(pencils.sumOfLengths + 2 * pencils.N) * WID3 / VECL]; + + // Initialize targetvecdata to 0 + for( uint i = 0; i < (pencils.sumOfLengths + 2 * pencils.N) * WID3 / VECL; i++ ) { + targetVecData[i] = Vec(0.0); + } - // Initialize targetdata to 0 - for( uint i = 0; i < pencils.sumOfLengths * WID3; i++ ) { - targetData[i] = 0.0; + // TODO: There's probably a smarter way to keep track of where we are writing + // in the target data array. + uint targetDataIndex = 0; + + // Compute spatial neighbors for target cells. + // For targets we need the local cells, plus a padding of 1 cell at both ends + std::vector targetCells(pencils.sumOfLengths + pencils.N * 2 ); + + compute_spatial_target_cells_for_pencils(mpiGrid, pencils, dimension, targetCells.data()); + + // Loop over pencils + uint totalTargetLength = 0; + for(uint pencili = 0; pencili < pencils.N; pencili++){ + + vector pencilIds = pencils.getIds(pencili); + int L = pencils.lengthOfPencils[pencili]; + uint targetLength = L + 2; + uint sourceLength = L + 2 * VLASOV_STENCIL_WIDTH; + + // Compute spatial neighbors for the source cells of the pencil. In + // source cells we have a wider stencil and take into account boundaries. + std::vector sourceCells(sourceLength); + compute_spatial_source_cells_for_pencil(mpiGrid, pencils, pencili, dimension, sourceCells.data()); + + + Vec dz[sourceCells.size()]; + uint i = 0; + for(auto neighbor: sourceCells) { + switch (dimension) { + case(0): + dz[i] = neighbor->SpatialCell::parameters[CellParams::DX]; + break; + case(1): + dz[i] = neighbor->SpatialCell::parameters[CellParams::DY]; + break; + case(2): + dz[i] = neighbor->SpatialCell::parameters[CellParams::DZ]; + break; + } + i++; } - - // TODO: There's probably a smarter way to keep track of where we are writing - // in the target data structure. - uint targetDataIndex = 0; - - // Compute spatial neighbors for target cells. - // For targets we only have actual cells as we do not - // want to propagate boundary cells (array may contain - // INVALID_CELLIDs at boundaries). - for ( auto celli: pencils.ids ) { - compute_spatial_target_neighbors(mpiGrid, localPropagatedCells[celli], dimension, - targetNeighbors.data() + celli * 3); + + // Allocate source data: sourcedata sourcedata) / (proper xy reconstruction in future) + copy_trans_block_data_amr(sourceCells.data(), blockGID, L, sourceVecData, + cellid_transpose, popID); + + // Dz and sourceVecData are both padded by VLASOV_STENCIL_WIDTH + // Dz has 1 value/cell, sourceVecData has WID3 values/cell + propagatePencil(dz, sourceVecData, dimension, blockGID, dt, vmesh, L, debugflag); + + // sourcedata => targetdata[this pencil]) + for (uint i = 0; i < targetLength; i++) { + for (uint k=0; k pencilIds = pencils.getIds(pencili); - for( auto celli: pencilIds) { - compute_spatial_source_neighbors(mpiGrid, localPropagatedCells[celli], - dimension, sourceNeighbors.data() - + celli * nSourceNeighborsPerCell); - } - - Vec * dzPointer = allCellsDz + pencilIds[0]; + // reset blocks in all non-sysboundary neighbor spatial cells for this block id + // At this point the data is saved in targetVecData so we can reset the spatial cells + for (auto *spatial_cell: targetCells) { + // Check for system boundary + if(spatial_cell->sysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY) { + // Get local velocity block id + const vmesh::LocalID blockLID = spatial_cell->get_velocity_block_local_id(blockGID, popID); + // Check for invalid id + if (blockLID != vmesh::VelocityMesh::invalidLocalID()) { + // Get a pointer to the block data + Realf* blockData = spatial_cell->get_data(blockLID, popID); + // Loop over velocity block cells + for(int i = 0; i < WID3; i++) { + blockData[i] = 0.0; + } + } + } + } + + // store_data(target_data => targetCells) :Aggregate data for blockid to original location + // Loop over pencils again + totalTargetLength = 0; + for(uint pencili = 0; pencili < pencils.N; pencili++){ + + int L = pencils.lengthOfPencils[pencili]; + uint targetLength = L + 2; + vector pencilIds = pencils.getIds(pencili); + + bool debugPencilFlag = false; + + // Unpack the vector data + + // Loop over cells in pencil +- 1 padded cell + for ( uint celli = 0; celli < targetLength; ++celli ) { + + // // If the pencil is periodic, we do not write the ghost cells because + // // They are copies of cells that are already in the pencil + // // - It seems that doing this was wrong. Investigate! + // if(pencils.periodic[pencili] && (celli == 0 || celli == targetLength - 1)) + // continue; - // load data(=> sourcedata) / (proper xy reconstruction in future) - // copied from regular code, should work? - int offset = 0; // TODO: Figure out what needs to go here. - copy_trans_block_data(sourceNeighbors.data() + offset, - blockGID, sourceData, cellid_transpose, popID); - + if(celli > 0 && pencilIds[celli - 1] == debugcell) debugPencilFlag = true; - // Calculate cell centered velocity for each v cell in the block - const Vec k = (0,1,2,3); - const Vec cell_vz = (block_indices[dimension] * WID + k + 0.5) * dvz + vz_min; - - const Vec z_translation = dt * cell_vz; - // propagate pencil(blockid = velocities, pencil-ids = dzs ), - propagatePencil(dzPointer, sourceData, z_translation, pencils.lengthOfPencils[pencili], nSourceNeighborsPerCell); - - // sourcedata => targetdata[this pencil]) - for (auto value: sourceData) { - targetData[targetDataIndex] = value; - targetDataIndex++; + Realv vector[VECL]; + // Loop over 1st vspace dimension + for (uint k = 0; k < WID; ++k) { + // Loop over 2nd vspace dimension + for(uint planeVector = 0; planeVector < VEC_PER_PLANE; planeVector++) { + targetVecData[i_trans_pt_blockv(planeVector, k, totalTargetLength + celli - 1)].store(vector); + // Loop over 3rd (vectorized) vspace dimension + for (uint i = 0; i < VECL; i++) { + targetBlockData[(totalTargetLength + celli) * WID3 + + cellid_transpose[i + planeVector * VECL + k * WID2]] + = vector[i]; + } + } } - - // dealloc source data -- Should be automatic since it's declared in this iteration? - } - - // Loop over pencils again - for(uint pencili = 0; pencili < pencils.N; pencili++){ - // store_data(target_data =>) :Aggregate data for blockid to original location + // store values from targetBlockData array to the actual blocks + // Loop over cells in the pencil, including the padded cells of the target array + for ( uint celli = 0; celli < targetLength; celli++ ) { - //store values from target_values array to the actual blocks - for(auto celli: pencils.ids) { - //TODO: Figure out validity check later - //if(targetsValid[celli]) { - for(uint ti = 0; ti < 3; ti++) { - SpatialCell* spatial_cell = targetNeighbors[celli * 3 + ti]; - if(spatial_cell ==NULL) { - //invalid target spatial cell - continue; - } - - // Get local ID of the velocity block - const vmesh::LocalID blockLID = spatial_cell->get_velocity_block_local_id(blockGID, popID); - - if (blockLID == vmesh::VelocityMesh::invalidLocalID()) { - // block does not exist. If so, we do not create it and add stuff to it here. - // We have already created blocks around blocks with content in - // spatial sense, so we have no need to create even more blocks here - // TODO add loss counter - continue; - } - // Pointer to the data field of the velocity block - Realf* blockData = spatial_cell->get_data(blockLID, popID); - // Unpack the vector data to the cell data types - for(int i = 0; i < WID3 ; i++) { - - // Write data into target block - blockData[i] += targetData[(celli * 3 + ti)][i]; - } - } - //} - + uint GID = celli + totalTargetLength; + SpatialCell* spatial_cell = targetCells[GID]; + + if(spatial_cell == NULL) { + // Invalid target spatial cell + continue; } - // dealloc target data -- Should be automatic again? + const vmesh::LocalID blockLID = spatial_cell->get_velocity_block_local_id(blockGID, popID); + if (blockLID == vmesh::VelocityMesh::invalidLocalID()) { + // Invalid local id. + continue; + } + + Realf* blockData = spatial_cell->get_data(blockLID, popID); + for(int i = 0; i < WID3 ; i++) { + blockData[i] += targetBlockData[GID * WID3 + i]; + } } + + if(debugflag && debugPencilFlag) cout << "TotalTargetLength = " << totalTargetLength << endl; + totalTargetLength += targetLength; + + // dealloc target data -- Should be automatic again? } } + + data = allCellsPointer[allCellsPointerIndex]->get_data(debugLID,popID); } + } - return true; - } + //cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; + + return true; +} diff --git a/vlasovsolver/cpu_trans_map_amr.hpp b/vlasovsolver/cpu_trans_map_amr.hpp index 0b5b02b1b..e27e115c1 100644 --- a/vlasovsolver/cpu_trans_map_amr.hpp +++ b/vlasovsolver/cpu_trans_map_amr.hpp @@ -27,14 +27,100 @@ #include "vec.h" #include "../common.h" #include "../spatial_cell.hpp" -//bool do_translate_cell(spatial_cell::SpatialCell* SC); + +struct setOfPencils { + + uint N; // Number of pencils in the set + uint sumOfLengths; + std::vector lengthOfPencils; // Lengths of pencils + std::vector ids; // List of cells + std::vector x,y; // x,y - position + std::vector periodic; + + setOfPencils() { + N = 0; + sumOfLengths = 0; + } + + void addPencil(std::vector idsIn, Real xIn, Real yIn, bool periodicIn) { + + N += 1; + sumOfLengths += idsIn.size(); + lengthOfPencils.push_back(idsIn.size()); + ids.insert(ids.end(),idsIn.begin(),idsIn.end()); + x.push_back(xIn); + y.push_back(yIn); + periodic.push_back(periodicIn); + } + + std::vector getIds(const uint pencilId) const { + + std::vector idsOut; + + if (pencilId > N) { + return idsOut; + } + + CellID ibeg = 0; + for (uint i = 0; i < pencilId; i++) { + ibeg += lengthOfPencils[i]; + } + CellID iend = ibeg + lengthOfPencils[pencilId]; + + for (uint i = ibeg; i <= iend; i++) { + idsOut.push_back(ids[i]); + } + + return idsOut; + } + +}; + +void compute_spatial_source_cells_for_pencil(const dccrg::Dccrg& mpiGrid, + setOfPencils pencils, + const uint iPencil, + const uint dimension, + SpatialCell **sourceCells); + + +void compute_spatial_target_cells_for_pencils(const dccrg::Dccrg& mpiGrid, + setOfPencils& pencils, + const uint dimension, + SpatialCell **targetCells); + +CellID selectNeighbor(const dccrg::Dccrg &grid, + CellID id, int dimension, uint path); + +void propagatePencil(Vec* dz, Vec* values, const uint dimension, const uint blockGID, + const Realv dt, + const vmesh::VelocityMesh &vmesh, + const uint lengthOfPencil, bool debugflag); + +void copy_trans_block_data_amr( + SpatialCell** source_neighbors, + const vmesh::GlobalID blockGID, + int lengthOfPencil, + Vec* values, + const unsigned char* const cellid_transpose, + const uint popID); + + +setOfPencils buildPencilsWithNeighbors( const dccrg::Dccrg &grid, + setOfPencils &pencils, CellID startingId, + std::vector ids, uint dimension, + std::vector path); + +void get_seed_ids(const dccrg::Dccrg& mpiGrid, + const std::vector &localPropagatedCells, + const uint dimension, + std::vector &seedIds); + + bool trans_map_1d_amr(const dccrg::Dccrg& mpiGrid, const std::vector& localPropagatedCells, const std::vector& remoteTargetCells, const uint dimension, const Realv dt, const uint popID); -//void update_remote_mapping_contribution(dccrg::Dccrg& mpiGrid, -// const uint dimension,int direction,const uint popID); #endif From 3b272c0be751d9fa7ea14cf895a499f91d1203b0 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Tue, 18 Sep 2018 10:40:51 +0300 Subject: [PATCH 057/602] Removed unnecessary {}s --- vlasovsolver/cpu_trans_map_amr.cpp | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/vlasovsolver/cpu_trans_map_amr.cpp b/vlasovsolver/cpu_trans_map_amr.cpp index 955c49aa7..dff4a657f 100644 --- a/vlasovsolver/cpu_trans_map_amr.cpp +++ b/vlasovsolver/cpu_trans_map_amr.cpp @@ -130,25 +130,24 @@ CellID selectNeighbor(const dccrg::Dccrg switch( myNeighbors.size() ) { // Since refinement can only increase by 1 level the only possibilities // Should be 0 neighbors, 1 neighbor or 4 neighbors. - case 0 : { + case 0 : // did not find neighbors neighbor = INVALID_CELLID; break; - } - case 1 : { + + case 1 : neighbor = myNeighbors[0]; break; - } - case 4 : { + + case 4 : neighbor = myNeighbors[path]; break; - } - default: { + + default: // something is wrong neighbor = INVALID_CELLID; throw "Invalid neighbor count!"; - break; - } + break; } return neighbor; From 87ae99b5933c9a6f97237cd40464d9d9d9fbfb41 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Tue, 18 Sep 2018 11:20:40 +0300 Subject: [PATCH 058/602] Changed C-style arrays in spatial_cell to c++11 std::arrays. --- poisson_solver/poisson_solver.cpp | 2 +- poisson_solver/poisson_solver_cg.cpp | 54 ++++++++++++------------ poisson_solver/poisson_solver_sor.cpp | 54 ++++++++++++------------ spatial_cell.cpp | 37 +++++++++-------- spatial_cell.hpp | 59 +++++++++++++++------------ sysboundary/antisymmetric.cpp | 2 +- 6 files changed, 108 insertions(+), 100 deletions(-) diff --git a/poisson_solver/poisson_solver.cpp b/poisson_solver/poisson_solver.cpp index f242d032a..d9c54be7e 100644 --- a/poisson_solver/poisson_solver.cpp +++ b/poisson_solver/poisson_solver.cpp @@ -78,7 +78,7 @@ namespace poisson { // Fetch pointers for (size_t c=0; cparameters; + Poisson::localCellParams[c] = mpiGrid[cells[c]]->parameters.data(); } } diff --git a/poisson_solver/poisson_solver_cg.cpp b/poisson_solver/poisson_solver_cg.cpp index e6fda4deb..843bae136 100644 --- a/poisson_solver/poisson_solver_cg.cpp +++ b/poisson_solver/poisson_solver_cg.cpp @@ -186,7 +186,7 @@ namespace poisson { CellCache3D cache; cache.cellID = cells[c]; cache.cell = mpiGrid[cells[c]]; - cache[0] = mpiGrid[cells[c]]->parameters; + cache[0] = mpiGrid[cells[c]]->parameters.data(); #ifdef DEBUG_POISSON_CG if (cache.cell == NULL) { @@ -205,8 +205,8 @@ namespace poisson { case sysboundarytype::NOT_SYSBOUNDARY: // Fetch pointers to this cell's (cell) parameters array, // and pointers to +/- xyz face neighbors' arrays - indices[0] -= 1; cache[1] = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]->parameters; - indices[0] += 2; cache[2] = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]->parameters; + indices[0] -= 1; cache[1] = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]->parameters.data(); + indices[0] += 2; cache[2] = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]->parameters.data(); indices[0] -= 1; if (indices[1] == 2) { @@ -218,10 +218,10 @@ namespace poisson { cache[3] = dummy->get_cell_parameters(); } } else { - indices[1] -= 1; cache[3] = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]->parameters; + indices[1] -= 1; cache[3] = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]->parameters.data(); } - indices[1] += 2; cache[4] = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]->parameters; + indices[1] += 2; cache[4] = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]->parameters.data(); indices[1] -= 1; break; @@ -232,11 +232,11 @@ namespace poisson { indices[0] -= 1; dummy = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]; if (dummy == NULL) cache[1] = bndryCellParams; - else cache[1] = dummy->parameters; + else cache[1] = dummy->parameters.data(); indices[0] += 2; dummy = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]; if (dummy == NULL) cache[2] = bndryCellParams; - else cache[2] = dummy->parameters; + else cache[2] = dummy->parameters.data(); indices[0] -= 1; // Set +/- y-neighbors both point to +y neighbor @@ -249,7 +249,7 @@ namespace poisson { if (dummy == NULL) { cache[4] = bndryCellParams; } else { - cache[4] = dummy->parameters; + cache[4] = dummy->parameters.data(); } indices[1] -= 1; } else { @@ -259,7 +259,7 @@ namespace poisson { if (dummy == NULL) { cache[3] = bndryCellParams; } else { - cache[3] = dummy->parameters; + cache[3] = dummy->parameters.data(); } indices[1] += 1; } @@ -270,24 +270,24 @@ namespace poisson { dummy = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]; //if (dummy == NULL) cache[1] = bndryCellParams; if (dummy == NULL) continue; - else cache[1] = dummy->parameters; + else cache[1] = dummy->parameters.data(); indices[0] += 2; dummy = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]; //if (dummy == NULL) cache[2] = bndryCellParams; if (dummy == NULL) continue; - else cache[2] = dummy->parameters; + else cache[2] = dummy->parameters.data(); indices[0] -= 1; indices[1] -= 1; dummy = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]; //if (dummy == NULL) cache[3] = bndryCellParams; if (dummy == NULL) continue; - else cache[3] = dummy->parameters; + else cache[3] = dummy->parameters.data(); indices[1] += 2; dummy = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]; //if (dummy == NULL) cache[4] = bndryCellParams; if (dummy == NULL) continue; - else cache[4] = dummy->parameters; + else cache[4] = dummy->parameters.data(); indices[1] -= 1; break; } @@ -316,18 +316,18 @@ namespace poisson { // Fetch pointers to this cell's (cell) parameters array, // and pointers to +/- xyz face neighbors' arrays cache.cell = mpiGrid[cells[c]]; - cache[0] = mpiGrid[cells[c]]->parameters; + cache[0] = mpiGrid[cells[c]]->parameters.data(); - indices[0] -= 1; cache[1] = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]->parameters; - indices[0] += 2; cache[2] = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]->parameters; + indices[0] -= 1; cache[1] = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]->parameters.data(); + indices[0] += 2; cache[2] = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]->parameters.data(); indices[0] -= 1; - indices[1] -= 1; cache[3] = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]->parameters; - indices[1] += 2; cache[4] = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]->parameters; + indices[1] -= 1; cache[3] = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]->parameters.data(); + indices[1] += 2; cache[4] = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]->parameters.data(); indices[1] -= 1; - indices[2] -= 1; cache[5] = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]->parameters; - indices[2] += 2; cache[6] = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]->parameters; + indices[2] -= 1; cache[5] = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]->parameters.data(); + indices[2] += 2; cache[6] = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]->parameters.data(); indices[2] -= 1; redCache.push_back(cache); @@ -340,18 +340,18 @@ namespace poisson { // Fetch pointers to this cell's (cell) parameters array, // and pointers to +/- xyz face neighbors' arrays cache.cell = mpiGrid[cells[c]]; - cache[0] = mpiGrid[cells[c]]->parameters; + cache[0] = mpiGrid[cells[c]]->parameters.data(); - indices[0] -= 1; cache[1] = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]->parameters; - indices[0] += 2; cache[2] = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]->parameters; + indices[0] -= 1; cache[1] = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]->parameters.data(); + indices[0] += 2; cache[2] = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]->parameters.data(); indices[0] -= 1; - indices[1] -= 1; cache[3] = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]->parameters; - indices[1] += 2; cache[4] = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]->parameters; + indices[1] -= 1; cache[3] = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]->parameters.data(); + indices[1] += 2; cache[4] = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]->parameters.data(); indices[1] -= 1; - indices[2] -= 1; cache[5] = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]->parameters; - indices[2] += 2; cache[6] = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]->parameters; + indices[2] -= 1; cache[5] = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]->parameters.data(); + indices[2] += 2; cache[6] = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]->parameters.data(); indices[2] -= 1; blackCache.push_back(cache); diff --git a/poisson_solver/poisson_solver_sor.cpp b/poisson_solver/poisson_solver_sor.cpp index d3f7d1fdf..f042e04aa 100644 --- a/poisson_solver/poisson_solver_sor.cpp +++ b/poisson_solver/poisson_solver_sor.cpp @@ -313,7 +313,7 @@ namespace poisson { CellCache3D cache; cache.cellID = cells[c]; cache.cell = mpiGrid[cells[c]]; - cache[0] = mpiGrid[cells[c]]->parameters; + cache[0] = mpiGrid[cells[c]]->parameters.data(); #ifdef DEBUG_POISSON_SOR if (cache.cell == NULL) { @@ -332,12 +332,12 @@ namespace poisson { case sysboundarytype::NOT_SYSBOUNDARY: // Fetch pointers to this cell's (cell) parameters array, // and pointers to +/- xyz face neighbors' arrays - indices[0] -= 1; cache[1] = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]->parameters; - indices[0] += 2; cache[2] = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]->parameters; + indices[0] -= 1; cache[1] = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]->parameters.data(); + indices[0] += 2; cache[2] = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]->parameters.data(); indices[0] -= 1; - indices[1] -= 1; cache[3] = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]->parameters; - indices[1] += 2; cache[4] = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]->parameters; + indices[1] -= 1; cache[3] = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]->parameters.data(); + indices[1] += 2; cache[4] = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]->parameters.data(); indices[1] -= 1; break; @@ -346,11 +346,11 @@ namespace poisson { indices[0] -= 1; dummy = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]; if (dummy == NULL) cache[1] = bndryCellParams; - else cache[1] = dummy->parameters; + else cache[1] = dummy->parameters.data(); indices[0] += 2; dummy = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]; if (dummy == NULL) cache[2] = bndryCellParams; - else cache[2] = dummy->parameters; + else cache[2] = dummy->parameters.data(); indices[0] -= 1; // Set +/- y-neighbors both point to +y neighbor @@ -363,7 +363,7 @@ namespace poisson { if (dummy == NULL) { cache[4] = bndryCellParams; } else { - cache[4] = dummy->parameters; + cache[4] = dummy->parameters.data(); } indices[1] -= 1; } else { @@ -373,7 +373,7 @@ namespace poisson { if (dummy == NULL) { cache[3] = bndryCellParams; } else { - cache[3] = dummy->parameters; + cache[3] = dummy->parameters.data(); } indices[1] += 1; } @@ -383,21 +383,21 @@ namespace poisson { indices[0] -= 1; dummy = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]; if (dummy == NULL) cache[1] = bndryCellParams; - else cache[1] = dummy->parameters; + else cache[1] = dummy->parameters.data(); indices[0] += 2; dummy = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]; if (dummy == NULL) cache[2] = bndryCellParams; - else cache[2] = dummy->parameters; + else cache[2] = dummy->parameters.data(); indices[0] -= 1; indices[1] -= 1; dummy = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]; if (dummy == NULL) cache[3] = bndryCellParams; - else cache[3] = dummy->parameters; + else cache[3] = dummy->parameters.data(); indices[1] += 2; dummy = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]; if (dummy == NULL) cache[4] = bndryCellParams; - else cache[4] = dummy->parameters; + else cache[4] = dummy->parameters.data(); indices[1] -= 1; break; } @@ -429,18 +429,18 @@ namespace poisson { // Fetch pointers to this cell's (cell) parameters array, // and pointers to +/- xyz face neighbors' arrays cache.cell = mpiGrid[cells[c]]; - cache[0] = mpiGrid[cells[c]]->parameters; + cache[0] = mpiGrid[cells[c]]->parameters.data(); - indices[0] -= 1; cache[1] = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]->parameters; - indices[0] += 2; cache[2] = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]->parameters; + indices[0] -= 1; cache[1] = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]->parameters.data(); + indices[0] += 2; cache[2] = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]->parameters.data(); indices[0] -= 1; - indices[1] -= 1; cache[3] = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]->parameters; - indices[1] += 2; cache[4] = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]->parameters; + indices[1] -= 1; cache[3] = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]->parameters.data(); + indices[1] += 2; cache[4] = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]->parameters.data(); indices[1] -= 1; - indices[2] -= 1; cache[5] = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]->parameters; - indices[2] += 2; cache[6] = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]->parameters; + indices[2] -= 1; cache[5] = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]->parameters.data(); + indices[2] += 2; cache[6] = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]->parameters.data(); indices[2] -= 1; redCache.push_back(cache); @@ -451,18 +451,18 @@ namespace poisson { // Fetch pointers to this cell's (cell) parameters array, // and pointers to +/- xyz face neighbors' arrays cache.cell = mpiGrid[cells[c]]; - cache[0] = mpiGrid[cells[c]]->parameters; + cache[0] = mpiGrid[cells[c]]->parameters.data(); - indices[0] -= 1; cache[1] = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]->parameters; - indices[0] += 2; cache[2] = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]->parameters; + indices[0] -= 1; cache[1] = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]->parameters.data(); + indices[0] += 2; cache[2] = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]->parameters.data(); indices[0] -= 1; - indices[1] -= 1; cache[3] = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]->parameters; - indices[1] += 2; cache[4] = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]->parameters; + indices[1] -= 1; cache[3] = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]->parameters.data(); + indices[1] += 2; cache[4] = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]->parameters.data(); indices[1] -= 1; - indices[2] -= 1; cache[5] = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]->parameters; - indices[2] += 2; cache[6] = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]->parameters; + indices[2] -= 1; cache[5] = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]->parameters.data(); + indices[2] += 2; cache[6] = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]->parameters.data(); indices[2] -= 1; blackCache.push_back(cache); diff --git a/spatial_cell.cpp b/spatial_cell.cpp index a719a4870..72d426c9a 100644 --- a/spatial_cell.cpp +++ b/spatial_cell.cpp @@ -79,23 +79,26 @@ namespace spatial_cell { velocity_block_with_no_content_list(other.velocity_block_with_no_content_list), initialized(other.initialized), mpiTransferEnabled(other.mpiTransferEnabled), - populations(other.populations) { - - //copy parameters - for(unsigned int i=0;i< CellParams::N_SPATIAL_CELL_PARAMS;i++){ - parameters[i]=other.parameters[i]; - } - //copy derivatives - for(unsigned int i=0;i< fieldsolver::N_SPATIAL_CELL_DERIVATIVES;i++){ - derivatives[i]=other.derivatives[i]; - } - //copy BVOL derivatives - for(unsigned int i=0;i< bvolderivatives::N_BVOL_DERIVATIVES;i++){ - derivativesBVOL[i]=other.derivativesBVOL[i]; - } - - //set null block data - for (unsigned int i=0; i {}) { + + // //copy parameters + // for(unsigned int i=0;i< CellParams::N_SPATIAL_CELL_PARAMS;i++){ + // parameters[i]=other.parameters[i]; + // } + // //copy derivatives + // for(unsigned int i=0;i< fieldsolver::N_SPATIAL_CELL_DERIVATIVES;i++){ + // derivatives[i]=other.derivatives[i]; + // } + // //copy BVOL derivatives + // for(unsigned int i=0;i< bvolderivatives::N_BVOL_DERIVATIVES;i++){ + // derivativesBVOL[i]=other.derivativesBVOL[i]; + // } + // //set null block data + // for (unsigned int i=0; i derivatives; /**< Derivatives of bulk variables in this spatial cell.*/ + //Real derivativesBVOL[bvolderivatives::N_BVOL_DERIVATIVES]; /**< Derivatives of BVOL needed by the acceleration. + // * Separate array because it does not need to be communicated.*/ + std::array derivativesBVOL; /**< Derivatives of BVOL needed by the acceleration. + * Separate array because it does not need to be communicated.*/ + //Real parameters[CellParams::N_SPATIAL_CELL_PARAMS]; /**< Bulk variables in this spatial cell.*/ + std::array parameters; + //Realf null_block_data[WID3]; + std::array null_block_data; uint64_t ioLocalCellId; /**< Local cell ID used for IO, not needed elsewhere * and thus not being kept up-to-date.*/ @@ -414,7 +419,7 @@ namespace spatial_cell { if (nbrIDs.size() > 0) { // This block has at least one existing neighbor if (refLevelDiff == -1) { // Neighbor is one level coarser, interpolate - if (nbrIDs[0] == invalid_local_id()) ptr = null_block_data; // (this check might not be necessary here) + if (nbrIDs[0] == invalid_local_id()) ptr = null_block_data.data(); // (this check might not be necessary here) else ptr = src + nbrIDs[0]*WID3; for (uint32_t i=0; i 0) { // This block has at least one existing neighbor if (refLevelDiff == -1) { // Neighbor is one level coarser, interpolate - if (nbrIDs[0] == invalid_local_id()) ptr = null_block_data; // (this check might not be necessary here) + if (nbrIDs[0] == invalid_local_id()) ptr = null_block_data.data(); // (this check might not be necessary here) else ptr = src + nbrIDs[0]*WID3; for (uint32_t j=0; j 0) { // This block has at least one existing neighbor if (refLevelDiff == -1) { // Neighbor is one level coarser, interpolate - if (nbrIDs[0] == invalid_local_id()) ptr = null_block_data; // (this check might not be necessary here) + if (nbrIDs[0] == invalid_local_id()) ptr = null_block_data.data(); // (this check might not be necessary here) else ptr = src + nbrIDs[0]*WID3; for (uint32_t k=0; k 0) { if (refLevelDiff == -1) { // nbr one level coarser, interpolate - if (nbrIDs[0] == invalid_local_id()) ptr = null_block_data; + if (nbrIDs[0] == invalid_local_id()) ptr = null_block_data.data(); else ptr = src + nbrIDs[0]*WID3; for (uint32_t i=0; i(i_trgt+i,j+PAD,k+PAD)] = vblock::interp_yz(pos,ptr); } } else if (refLevelDiff == 0) { // nbr at same level, simple data copy - if (nbrIDs[0] == invalid_local_id()) ptr = null_block_data; + if (nbrIDs[0] == invalid_local_id()) ptr = null_block_data.data(); else ptr = src + nbrIDs[0]*WID3; uint32_t i_src = 0; if (i_nbr_off < 0) i_src = WID-PAD; @@ -639,7 +644,7 @@ namespace spatial_cell { } else if (refLevelDiff == +1) { // nbr one level more refined, interpolate from four neighbors for (uint32_t i=0; i 0) { if (refLevelDiff == -1) { // nbr one level coarser, interpolate - if (nbrIDs[0] == invalid_local_id()) ptr = null_block_data; + if (nbrIDs[0] == invalid_local_id()) ptr = null_block_data.data(); else ptr = src + nbrIDs[0]*WID3; for (uint32_t j=0; j(i+PAD,j_trgt+j,k+PAD)] = vblock::interp_xz(pos,ptr); } } else if (refLevelDiff == 0) { // nbr at same level, simple data copy - if (nbrIDs[0] == invalid_local_id()) ptr = null_block_data; + if (nbrIDs[0] == invalid_local_id()) ptr = null_block_data.data(); else ptr = src + nbrIDs[0]*WID3; uint32_t j_src = 0; if (j_nbr_off < 0) j_src = WID-PAD; @@ -686,7 +691,7 @@ namespace spatial_cell { } else if (refLevelDiff == +1) { // nbr one level more refined, interpolate from four neighbors for (uint32_t j=0; j 0) { if (refLevelDiff == -1) { // nbr one level coarser, interpolate - if (nbrIDs[0] == invalid_local_id()) ptr = null_block_data; + if (nbrIDs[0] == invalid_local_id()) ptr = null_block_data.data(); else ptr = src + nbrIDs[0]*WID3; for (uint32_t k=0; k(i+PAD,j+PAD,k_trgt+k)] = vblock::interp_xy(pos,ptr); } } else if (refLevelDiff == 0) { // nbr at same level, simple data copy - if (nbrIDs[0] == invalid_local_id()) ptr = null_block_data; + if (nbrIDs[0] == invalid_local_id()) ptr = null_block_data.data(); else ptr = src + nbrIDs[0]*WID3; uint32_t k_src = 0; if (k_nbr_off < 0) k_src = WID-PAD; @@ -733,7 +738,7 @@ namespace spatial_cell { } else if (refLevelDiff == +1) { // nbr one level more refined, interpolate from four neighbors for (uint32_t k=0; k::invalidLocalID()) return null_block_data; + if (blockLID == vmesh::VelocityMesh::invalidLocalID()) return null_block_data.data(); return populations[popID].blockContainer.getData(blockLID); } @@ -813,7 +818,7 @@ namespace spatial_cell { exit(1); } #endif - if (blockLID == vmesh::VelocityMesh::invalidLocalID()) return null_block_data; + if (blockLID == vmesh::VelocityMesh::invalidLocalID()) return null_block_data.data(); return populations[popID].blockContainer.getData(blockLID); } @@ -872,11 +877,11 @@ namespace spatial_cell { } inline Real* SpatialCell::get_cell_parameters() { - return parameters; + return parameters.data(); } inline const Real* SpatialCell::get_cell_parameters() const { - return parameters; + return parameters.data(); } inline uint8_t SpatialCell::get_maximum_refinement_level(const uint popID) { diff --git a/sysboundary/antisymmetric.cpp b/sysboundary/antisymmetric.cpp index 2976bae3f..a21f48d6f 100644 --- a/sysboundary/antisymmetric.cpp +++ b/sysboundary/antisymmetric.cpp @@ -97,7 +97,7 @@ namespace SBC { const vector& cells = getLocalCells(); for (size_t c=0; csysBoundaryFlag == sysboundarytype::DO_NOT_COMPUTE) continue; - creal* const cellParams = mpiGrid[cells[c]]->parameters; + creal* const cellParams = mpiGrid[cells[c]]->parameters.data(); creal dx = cellParams[CellParams::DX]; creal dy = cellParams[CellParams::DY]; creal dz = cellParams[CellParams::DZ]; From b593784542f6f90e5731eed1fbc112d9c8af1e78 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Tue, 18 Sep 2018 14:28:28 +0300 Subject: [PATCH 059/602] Changes to calls of setBackgroundField in various projects related to the previous change. In order to get the pointer to the data in the c++11 std::array, one has to call the .data() method. --- projects/Diffusion/Diffusion.cpp | 2 +- projects/Dispersion/Dispersion.cpp | 2 +- projects/Distributions/Distributions.cpp | 2 +- projects/Flowthrough/Flowthrough.cpp | 4 ++-- projects/Fluctuations/Fluctuations.cpp | 4 ++-- projects/Harris/Harris.cpp | 2 +- projects/IPShock/IPShock.cpp | 2 +- projects/Larmor/Larmor.cpp | 2 +- projects/Magnetosphere/Magnetosphere.cpp | 16 ++++++++-------- projects/MultiPeak/MultiPeak.cpp | 2 +- projects/Shocktest/Shocktest.cpp | 2 +- projects/Template/Template.cpp | 4 ++-- projects/VelocityBox/VelocityBox.cpp | 2 +- projects/test_fp/test_fp.cpp | 2 +- projects/test_trans/test_trans.cpp | 2 +- .../verificationLarmor/verificationLarmor.cpp | 2 +- sysboundary/outflow.cpp | 2 +- 17 files changed, 27 insertions(+), 27 deletions(-) diff --git a/projects/Diffusion/Diffusion.cpp b/projects/Diffusion/Diffusion.cpp index 28bf2855a..abc7f3ecd 100644 --- a/projects/Diffusion/Diffusion.cpp +++ b/projects/Diffusion/Diffusion.cpp @@ -136,6 +136,6 @@ namespace projects { void Diffusion::setCellBackgroundField(SpatialCell* cell) { ConstantField bgField; bgField.initialize(0,0,this->B0); //bg bx, by,bz - setBackgroundField(bgField,cell->parameters, cell->derivatives,cell->derivativesBVOL); + setBackgroundField(bgField,cell->parameters.data(), cell->derivatives.data(),cell->derivativesBVOL.data()); } } // namespace projects diff --git a/projects/Dispersion/Dispersion.cpp b/projects/Dispersion/Dispersion.cpp index fe35c47c2..c3902f148 100644 --- a/projects/Dispersion/Dispersion.cpp +++ b/projects/Dispersion/Dispersion.cpp @@ -260,6 +260,6 @@ namespace projects { this->B0 * sin(this->angleXY) * cos(this->angleXZ), this->B0 * sin(this->angleXZ)); - setBackgroundField(bgField,cell->parameters, cell->derivatives,cell->derivativesBVOL); + setBackgroundField(bgField,cell->parameters.data(), cell->derivatives.data(),cell->derivativesBVOL.data()); } } // namespace projects diff --git a/projects/Distributions/Distributions.cpp b/projects/Distributions/Distributions.cpp index 39374885d..ad76e9da8 100644 --- a/projects/Distributions/Distributions.cpp +++ b/projects/Distributions/Distributions.cpp @@ -173,7 +173,7 @@ namespace projects { this->By, this->Bz); - setBackgroundField(bgField,cell->parameters, cell->derivatives,cell->derivativesBVOL); + setBackgroundField(bgField,cell->parameters.data(), cell->derivatives.data(),cell->derivativesBVOL.data()); } vector> Distributions::getV0( diff --git a/projects/Flowthrough/Flowthrough.cpp b/projects/Flowthrough/Flowthrough.cpp index 486e0b036..678a18f55 100644 --- a/projects/Flowthrough/Flowthrough.cpp +++ b/projects/Flowthrough/Flowthrough.cpp @@ -211,8 +211,8 @@ namespace projects { void Flowthrough::setCellBackgroundField(spatial_cell::SpatialCell* cell) const { ConstantField bgField; - bgField.initialize(Bx,By,Bz); //bg bx, by,bz - setBackgroundField(bgField,cell->parameters, cell->derivatives,cell->derivativesBVOL); + bgField.initialize(Bx,By,Bz); //bg bx, by,bz + setBackgroundField(bgField,cell->parameters.data(), cell->derivatives.data(),cell->derivativesBVOL.data()); } std::vector > Flowthrough::getV0( diff --git a/projects/Fluctuations/Fluctuations.cpp b/projects/Fluctuations/Fluctuations.cpp index f181cd353..c5b4a09e3 100644 --- a/projects/Fluctuations/Fluctuations.cpp +++ b/projects/Fluctuations/Fluctuations.cpp @@ -183,8 +183,8 @@ namespace projects { bgField.initialize(this->BX0, this->BY0, this->BZ0); - - setBackgroundField(bgField,cell->parameters, cell->derivatives,cell->derivativesBVOL); + + setBackgroundField(bgField,cell->parameters.data(), cell->derivatives.data(),cell->derivativesBVOL.data()); } std::vector > Fluctuations::getV0( diff --git a/projects/Harris/Harris.cpp b/projects/Harris/Harris.cpp index efe01e303..ede212799 100644 --- a/projects/Harris/Harris.cpp +++ b/projects/Harris/Harris.cpp @@ -167,7 +167,7 @@ namespace projects { } void Harris::setCellBackgroundField(SpatialCell *cell) const { - setBackgroundFieldToZero(cell->parameters, cell->derivatives,cell->derivativesBVOL); + setBackgroundFieldToZero(cell->parameters.data(), cell->derivatives.data(),cell->derivativesBVOL.data()); } } // namespace projects diff --git a/projects/IPShock/IPShock.cpp b/projects/IPShock/IPShock.cpp index d3ca4da00..6babb2818 100644 --- a/projects/IPShock/IPShock.cpp +++ b/projects/IPShock/IPShock.cpp @@ -492,7 +492,7 @@ namespace projects { } void IPShock::setCellBackgroundField(spatial_cell::SpatialCell* cell) const { - setBackgroundFieldToZero(cell->parameters, cell->derivatives,cell->derivativesBVOL); + setBackgroundFieldToZero(cell->parameters.data(), cell->derivatives.data(),cell->derivativesBVOL.data()); } }//namespace projects diff --git a/projects/Larmor/Larmor.cpp b/projects/Larmor/Larmor.cpp index 2312ff6df..4d0065c93 100644 --- a/projects/Larmor/Larmor.cpp +++ b/projects/Larmor/Larmor.cpp @@ -161,7 +161,7 @@ namespace projects { this->BY0, this->BZ0); - setBackgroundField(bgField,cell->parameters, cell->derivatives,cell->derivativesBVOL); + setBackgroundField(bgField,cell->parameters.data(), cell->derivatives.data(),cell->derivativesBVOL.data()); } } //namespace projects diff --git a/projects/Magnetosphere/Magnetosphere.cpp b/projects/Magnetosphere/Magnetosphere.cpp index 136ee8538..a19a983f5 100644 --- a/projects/Magnetosphere/Magnetosphere.cpp +++ b/projects/Magnetosphere/Magnetosphere.cpp @@ -235,7 +235,7 @@ namespace projects { /* set 0-centered dipole */ void Magnetosphere::setCellBackgroundField(SpatialCell *cell) const { if(cell->sysBoundaryFlag == sysboundarytype::SET_MAXWELLIAN && this->noDipoleInSW) { - setBackgroundFieldToZero(cell->parameters, cell->derivatives,cell->derivativesBVOL); + setBackgroundFieldToZero(cell->parameters.data(), cell->derivatives.data(),cell->derivativesBVOL.data()); } else { Dipole bgFieldDipole; @@ -248,29 +248,29 @@ namespace projects { switch(this->dipoleType) { case 0: bgFieldDipole.initialize(8e15 *this->dipoleScalingFactor, 0.0, 0.0, 0.0, 0.0 );//set dipole moment - setBackgroundField(bgFieldDipole,cell->parameters, cell->derivatives,cell->derivativesBVOL); + setBackgroundField(bgFieldDipole,cell->parameters.data(), cell->derivatives.data(),cell->derivativesBVOL.data()); break; case 1: bgFieldLineDipole.initialize(126.2e6 *this->dipoleScalingFactor, 0.0, 0.0, 0.0 );//set dipole moment - setBackgroundField(bgFieldLineDipole,cell->parameters, cell->derivatives,cell->derivativesBVOL); + setBackgroundField(bgFieldLineDipole,cell->parameters.data(), cell->derivatives.data(),cell->derivativesBVOL.data()); break; case 2: bgFieldLineDipole.initialize(126.2e6 *this->dipoleScalingFactor, 0.0, 0.0, 0.0 );//set dipole moment - setBackgroundField(bgFieldLineDipole,cell->parameters, cell->derivatives,cell->derivativesBVOL); + setBackgroundField(bgFieldLineDipole,cell->parameters.data(), cell->derivatives.data(),cell->derivativesBVOL.data()); //Append mirror dipole bgFieldLineDipole.initialize(126.2e6 *this->dipoleScalingFactor, this->dipoleMirrorLocationX, 0.0, 0.0 ); - setBackgroundField(bgFieldLineDipole,cell->parameters, cell->derivatives,cell->derivativesBVOL, true); + setBackgroundField(bgFieldLineDipole,cell->parameters.data(), cell->derivatives.data(),cell->derivativesBVOL.data(), true); break; case 3: bgFieldDipole.initialize(8e15 *this->dipoleScalingFactor, 0.0, 0.0, 0.0, 0.0 );//set dipole moment - setBackgroundField(bgFieldDipole,cell->parameters, cell->derivatives,cell->derivativesBVOL); + setBackgroundField(bgFieldDipole,cell->parameters.data(), cell->derivatives.data(),cell->derivativesBVOL.data()); //Append mirror dipole bgFieldDipole.initialize(8e15 *this->dipoleScalingFactor, this->dipoleMirrorLocationX, 0.0, 0.0, 0.0 );//mirror - setBackgroundField(bgFieldDipole,cell->parameters, cell->derivatives,cell->derivativesBVOL, true); + setBackgroundField(bgFieldDipole,cell->parameters.data(), cell->derivatives.data(),cell->derivativesBVOL.data(), true); break; default: - setBackgroundFieldToZero(cell->parameters, cell->derivatives,cell->derivativesBVOL); + setBackgroundFieldToZero(cell->parameters.data(), cell->derivatives.data(),cell->derivativesBVOL.data()); } } diff --git a/projects/MultiPeak/MultiPeak.cpp b/projects/MultiPeak/MultiPeak.cpp index 0f086d63f..ad0e4f58c 100644 --- a/projects/MultiPeak/MultiPeak.cpp +++ b/projects/MultiPeak/MultiPeak.cpp @@ -235,7 +235,7 @@ namespace projects { this->By, this->Bz); - setBackgroundField(bgField,cell->parameters, cell->derivatives,cell->derivativesBVOL); + setBackgroundField(bgField,cell->parameters.data(), cell->derivatives.data(),cell->derivativesBVOL.data()); } std::vector > MultiPeak::getV0( diff --git a/projects/Shocktest/Shocktest.cpp b/projects/Shocktest/Shocktest.cpp index ba3eb85d6..c260b43f5 100644 --- a/projects/Shocktest/Shocktest.cpp +++ b/projects/Shocktest/Shocktest.cpp @@ -226,7 +226,7 @@ namespace projects { void Shocktest::setCellBackgroundField(SpatialCell* cell) { ConstantField bgField; bgField.initialize(0,0,0); //bg bx, by,bz - setBackgroundField(bgField,cell->parameters, cell->derivatives,cell->derivativesBVOL); + setBackgroundField(bgField,cell->parameters.data(), cell->derivatives.data(),cell->derivativesBVOL.data()); } } // Namespace projects diff --git a/projects/Template/Template.cpp b/projects/Template/Template.cpp index d6afa922a..04e9fc863 100644 --- a/projects/Template/Template.cpp +++ b/projects/Template/Template.cpp @@ -75,9 +75,9 @@ namespace projects { Dipole bgField; bgField.initialize(8e15, 0.0, 0.0, 0.0, 0.0); //set dipole moment and location if(cell->sysBoundaryFlag == sysboundarytype::DO_NOT_COMPUTE) { - setBackgroundFieldToZero(cell->parameters, cell->derivatives,cell->derivativesBVOL); + setBackgroundFieldToZero(cell->parameters.data(), cell->derivatives.data(),cell->derivativesBVOL.data()); } else { - setBackgroundField(bgField,cell->parameters, cell->derivatives,cell->derivativesBVOL); + setBackgroundField(bgField,cell->parameters.data(), cell->derivatives.data(),cell->derivativesBVOL.data()); } } diff --git a/projects/VelocityBox/VelocityBox.cpp b/projects/VelocityBox/VelocityBox.cpp index 5ce439cf1..bd1ed9b02 100644 --- a/projects/VelocityBox/VelocityBox.cpp +++ b/projects/VelocityBox/VelocityBox.cpp @@ -115,7 +115,7 @@ namespace projects { this->By, this->Bz); - setBackgroundField(bgField,cell->parameters, cell->derivatives,cell->derivativesBVOL); + setBackgroundField(bgField,cell->parameters.data(), cell->derivatives.data(),cell->derivativesBVOL.data()); } }// namespace projects diff --git a/projects/test_fp/test_fp.cpp b/projects/test_fp/test_fp.cpp index 9f39ae5a8..3f3402ca5 100644 --- a/projects/test_fp/test_fp.cpp +++ b/projects/test_fp/test_fp.cpp @@ -106,7 +106,7 @@ namespace projects { } void test_fp::setCellBackgroundField(spatial_cell::SpatialCell *cell) const { - setBackgroundFieldToZero(cell->parameters, cell->derivatives,cell->derivativesBVOL); + setBackgroundFieldToZero(cell->parameters.data(), cell->derivatives.data(),cell->derivativesBVOL.data()); } void test_fp::calcCellParameters(spatial_cell::SpatialCell* cell,creal& t) { diff --git a/projects/test_trans/test_trans.cpp b/projects/test_trans/test_trans.cpp index 3d02646bd..b85b7878c 100644 --- a/projects/test_trans/test_trans.cpp +++ b/projects/test_trans/test_trans.cpp @@ -139,7 +139,7 @@ namespace projects { void test_trans::setCellBackgroundField(SpatialCell* cell) const { ConstantField bgField; bgField.initialize(0.0,0.0,1e-9); - setBackgroundField(bgField,cell->parameters, cell->derivatives,cell->derivativesBVOL); + setBackgroundField(bgField,cell->parameters.data(), cell->derivatives.data(),cell->derivativesBVOL.data()); } }// namespace projects diff --git a/projects/verificationLarmor/verificationLarmor.cpp b/projects/verificationLarmor/verificationLarmor.cpp index ec03b9285..bff19a35e 100644 --- a/projects/verificationLarmor/verificationLarmor.cpp +++ b/projects/verificationLarmor/verificationLarmor.cpp @@ -133,7 +133,7 @@ namespace projects { this->BY0, this->BZ0); - setBackgroundField(bgField,cell->parameters, cell->derivatives,cell->derivativesBVOL); + setBackgroundField(bgField,cell->parameters.data(), cell->derivatives.data(),cell->derivativesBVOL.data()); } } //namespace projects diff --git a/sysboundary/outflow.cpp b/sysboundary/outflow.cpp index 716bca7fd..f42dacfd4 100644 --- a/sysboundary/outflow.cpp +++ b/sysboundary/outflow.cpp @@ -453,7 +453,7 @@ namespace SBC { const OutflowSpeciesParameters& sP = this->speciesParams[popID]; SpatialCell* cell = mpiGrid[cellID]; - creal* const cellParams = cell->parameters; + creal* const cellParams = cell->parameters.data(); creal dx = cellParams[CellParams::DX]; creal dy = cellParams[CellParams::DY]; creal dz = cellParams[CellParams::DZ]; From 5b992468b51a5e3286c6359aff1222a881e47dc5 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Tue, 18 Sep 2018 15:41:16 +0300 Subject: [PATCH 060/602] Added parameter amrMaxSpatialRefLevel that controls the maximum refinement level that is passed to mpiGrid.initialize. Set it to 0 by default. Added calls to refine a test cell in the middle of the simulation box in vlasiator.cpp (that will do nothing unless amrMaxSpatialRefLevel is increased). --- grid.cpp | 2 +- parameters.cpp | 2 ++ parameters.h | 1 + spatial_cell.hpp | 19 +++++++++++++++++-- vlasiator.cpp | 27 +++++++++++++++++++++------ 5 files changed, 42 insertions(+), 9 deletions(-) diff --git a/grid.cpp b/grid.cpp index 4fd2aa6e4..b212f5d99 100644 --- a/grid.cpp +++ b/grid.cpp @@ -118,7 +118,7 @@ void initializeGrid( comm, &P::loadBalanceAlgorithm[0], neighborhood_size, // neighborhood size - 0, // maximum refinement level + P::amrMaxSpatialRefLevel, // maximum refinement level sysBoundaries.isBoundaryPeriodic(0), sysBoundaries.isBoundaryPeriodic(1), sysBoundaries.isBoundaryPeriodic(2) diff --git a/parameters.cpp b/parameters.cpp index 6cd738ccc..fb1f40ae5 100644 --- a/parameters.cpp +++ b/parameters.cpp @@ -137,6 +137,8 @@ Realf P::amrRefineLimit = 1.0; Realf P::amrCoarsenLimit = 0.5; string P::amrVelRefCriterion = ""; +int P::amrMaxSpatialRefLevel = 0; + bool Parameters::addParameters(){ //the other default parameters we read through the add/get interface Readparameters::add("io.diagnostic_write_interval", "Write diagnostic output every arg time steps",numeric_limits::max()); diff --git a/parameters.h b/parameters.h index 892f7eb92..0b0985725 100644 --- a/parameters.h +++ b/parameters.h @@ -136,6 +136,7 @@ struct Parameters { static Realf amrRefineLimit; /**< If the value of refinement criterion is larger than this value, block should be refined. * The value must be larger than amrCoarsenLimit.*/ static std::string amrVelRefCriterion; /**< Name of the velocity block refinement criterion function.*/ + static int amrMaxSpatialRefLevel; /*! \brief Add the global parameters. * diff --git a/spatial_cell.hpp b/spatial_cell.hpp index 19f744ac9..714d5257d 100644 --- a/spatial_cell.hpp +++ b/spatial_cell.hpp @@ -347,8 +347,9 @@ namespace spatial_cell { static uint64_t mpi_transfer_type; /**< Which data is transferred by the mpi datatype given by spatial cells.*/ static bool mpiTransferAtSysBoundaries; /**< Do we only transfer data at boundaries (true), or in the whole system (false).*/ + SpatialCell& operator=(const SpatialCell& other); private: - SpatialCell& operator=(const SpatialCell&); + //SpatialCell& operator=(const SpatialCell&); bool compute_block_has_content(const vmesh::GlobalID& block,const uint popID) const; void merge_values_recursive(const uint popID,vmesh::GlobalID parentGID,vmesh::GlobalID blockGID,uint8_t refLevel,bool recursive,const Realf* data, @@ -1891,7 +1892,21 @@ namespace spatial_cell { return populations[popID].vmesh.hasGrandParent(blockGID); } - inline SpatialCell& SpatialCell::operator=(const SpatialCell&) { + inline SpatialCell& SpatialCell::operator=(const SpatialCell& other) { + + this->sysBoundaryFlag = other.sysBoundaryFlag; + this->sysBoundaryLayer = other.sysBoundaryLayer; + this->sysBoundaryLayerNew = other.sysBoundaryLayerNew; + this->velocity_block_with_content_list = other.velocity_block_with_content_list; + this->velocity_block_with_no_content_list = other.velocity_block_with_no_content_list; + this->initialized = other.initialized; + this->mpiTransferEnabled = other.mpiTransferEnabled; + this->parameters = other.parameters; + this->derivatives = other.derivatives; + this->derivativesBVOL = other.derivativesBVOL; + this->null_block_data = other.null_block_data; + this->populations = other.populations; + return *this; } diff --git a/vlasiator.cpp b/vlasiator.cpp index 1235af47f..db65b3a2e 100644 --- a/vlasiator.cpp +++ b/vlasiator.cpp @@ -602,12 +602,27 @@ int main(int argn,char* args[]) { } - // std::array coords; - // coords[1] = (P::xmax - P::xmin) / 2.0; - // coords[2] = (P::ymax - P::ymin) / 2.0; - // coords[3] = (P::zmax - P::zmin) / 2.0; - // mpiGrid.refine_completely_at(coords); - // mpiGrid.stop_refining(); + std::array coords; + coords[0] = (P::xmax - P::xmin) / 2.0; + coords[1] = (P::ymax - P::ymin) / 2.0; + coords[2] = (P::zmax - P::zmin) / 2.0; + cout << "Trying to refine at " << coords[0] << ", " << coords[1] << ", " << coords[2] << endl; + CellID myCell = mpiGrid.get_existing_cell(coords); + cout << "Got cell ID " << myCell << endl; + cout << "Maximum refinement level is " << mpiGrid.mapping.get_maximum_refinement_level() << endl; + bool refineSuccess = mpiGrid.refine_completely_at(coords); + std::vector refinedCells = mpiGrid.stop_refining(); + cout << "Result: " << refineSuccess << endl; + + if(refineSuccess) { + cout << "Refined Cells are: "; + for (auto cellid : refinedCells) { + cout << cellid << " "; + } + cout << endl; + + mpiGrid.write_vtk_file("mpiGrid.vtk"); + } phiprof::stop("Initialization"); From 4376040f4852d8379023a6ee84fee80b2a96392e Mon Sep 17 00:00:00 2001 From: tkoskela Date: Fri, 21 Sep 2018 14:36:23 +0300 Subject: [PATCH 061/602] Debugging refinement --- vlasiator.cpp | 45 ++++++++++++++++++++++----------------------- 1 file changed, 22 insertions(+), 23 deletions(-) diff --git a/vlasiator.cpp b/vlasiator.cpp index db65b3a2e..8dfa0e064 100644 --- a/vlasiator.cpp +++ b/vlasiator.cpp @@ -361,7 +361,28 @@ int main(int argn,char* args[]) { phiprof::start("Init grid"); //dccrg::Dccrg mpiGrid; initializeGrid(argn,args,mpiGrid,sysBoundaries,*project); - isSysBoundaryCondDynamic = sysBoundaries.isDynamic(); + isSysBoundaryCondDynamic = sysBoundaries.isDynamic(); + + std::array coords; + coords[0] = (P::xmax - P::xmin) / 2.0; + coords[1] = (P::ymax - P::ymin) / 2.0; + coords[2] = (P::zmax - P::zmin) / 2.0; + cout << "Trying to refine at " << coords[0] << ", " << coords[1] << ", " << coords[2] << endl; + CellID myCell = mpiGrid.get_existing_cell(coords); + cout << "Got cell ID " << myCell << endl; + cout << "Maximum refinement level is " << mpiGrid.mapping.get_maximum_refinement_level() << endl; + bool refineSuccess = mpiGrid.refine_completely_at(coords); + std::vector refinedCells = mpiGrid.stop_refining(); + cout << "Result: " << refineSuccess << endl; + if(refineSuccess) { + cout << "Refined Cells are: "; + for (auto cellid : refinedCells) { + cout << cellid << " "; + } + cout << endl; + mpiGrid.write_vtk_file("mpiGrid.vtk"); + } + recalculateLocalCellsCache(); phiprof::stop("Init grid"); // Initialize data reduction operators. This should be done elsewhere in order to initialize @@ -601,28 +622,6 @@ int main(int argn,char* args[]) { phiprof::stop("propagate-velocity-space-dt/2"); } - - std::array coords; - coords[0] = (P::xmax - P::xmin) / 2.0; - coords[1] = (P::ymax - P::ymin) / 2.0; - coords[2] = (P::zmax - P::zmin) / 2.0; - cout << "Trying to refine at " << coords[0] << ", " << coords[1] << ", " << coords[2] << endl; - CellID myCell = mpiGrid.get_existing_cell(coords); - cout << "Got cell ID " << myCell << endl; - cout << "Maximum refinement level is " << mpiGrid.mapping.get_maximum_refinement_level() << endl; - bool refineSuccess = mpiGrid.refine_completely_at(coords); - std::vector refinedCells = mpiGrid.stop_refining(); - cout << "Result: " << refineSuccess << endl; - - if(refineSuccess) { - cout << "Refined Cells are: "; - for (auto cellid : refinedCells) { - cout << cellid << " "; - } - cout << endl; - - mpiGrid.write_vtk_file("mpiGrid.vtk"); - } phiprof::stop("Initialization"); From 2d10f76f0e5b60f0834208a5ce3afdfdec3570ef Mon Sep 17 00:00:00 2001 From: tkoskela Date: Fri, 21 Sep 2018 14:49:05 +0300 Subject: [PATCH 062/602] Debugging refinement --- vlasiator.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/vlasiator.cpp b/vlasiator.cpp index 8dfa0e064..1e4ed2769 100644 --- a/vlasiator.cpp +++ b/vlasiator.cpp @@ -372,8 +372,9 @@ int main(int argn,char* args[]) { cout << "Got cell ID " << myCell << endl; cout << "Maximum refinement level is " << mpiGrid.mapping.get_maximum_refinement_level() << endl; bool refineSuccess = mpiGrid.refine_completely_at(coords); - std::vector refinedCells = mpiGrid.stop_refining(); + std::vector refinedCells = mpiGrid.stop_refining(); cout << "Result: " << refineSuccess << endl; + mpiGrid.balance_load(); if(refineSuccess) { cout << "Refined Cells are: "; for (auto cellid : refinedCells) { From 18a9ea1ad7e1fedd0d39133420137396beed9ced Mon Sep 17 00:00:00 2001 From: tkoskela Date: Mon, 24 Sep 2018 12:48:20 +0300 Subject: [PATCH 063/602] Added in comments compatible code for new dccrg version. --- grid.cpp | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/grid.cpp b/grid.cpp index b212f5d99..cd9860b4b 100644 --- a/grid.cpp +++ b/grid.cpp @@ -497,9 +497,12 @@ bool adjustVelocityBlocks(dccrg::Dccrg& m // gather spatial neighbor list and create vector with pointers to neighbor spatial cells const vector* neighbors = mpiGrid.get_neighbors_of(cell_id, NEAREST_NEIGHBORHOOD_ID); + //const vector > >* neighbors = mpiGrid.get_neighbors_of(cell_id, NEAREST_NEIGHBORHOOD_ID); vector neighbor_ptrs; neighbor_ptrs.reserve(neighbors->size()); + //for (auto nbrPair : neighbors) { for (vector::const_iterator neighbor_id = neighbors->begin(); neighbor_id != neighbors->end(); ++neighbor_id) { + //CellID neighbor_id = *nbrPair->first; if (*neighbor_id == 0 || *neighbor_id == cell_id) { continue; } @@ -932,10 +935,14 @@ bool validateMesh(dccrg::Dccrg& mpiGrid,c // Get all spatial neighbors const vector* neighbors = mpiGrid.get_neighbors_of(cells[c],NEAREST_NEIGHBORHOOD_ID); + // const vector > >* neighbors = mpiGrid.get_neighbors_of(cells[c],NEAREST_NEIGHBORHOOD_ID); // Iterate over all spatial neighbors for (size_t n=0; nsize(); ++n) { + //for (auto nbrPair : neighbors) { CellID nbrCellID = (*neighbors)[n]; + //CellID nbrCellID = *nbrPair->first; + const SpatialCell* nbr = mpiGrid[nbrCellID]; // Iterate over all blocks in the spatial neighbor, From b8b2595fa9cca24ffe0a46ebe95271e1e240aeda Mon Sep 17 00:00:00 2001 From: tkoskela Date: Mon, 24 Sep 2018 12:49:01 +0300 Subject: [PATCH 064/602] Added cpu_trans_map_amr.hpp into dependencies --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 4ddeb32b5..48e22a2e9 100644 --- a/Makefile +++ b/Makefile @@ -177,7 +177,7 @@ DEPS_VLSVMOVER = ${DEPS_CELL} vlasovsolver/vlasovmover.cpp vlasovsolver/cpu_acc_ DEPS_VLSVMOVER_AMR = ${DEPS_CELL} vlasovsolver_amr/vlasovmover.cpp vlasovsolver_amr/cpu_acc_map.hpp vlasovsolver_amr/cpu_acc_intersections.hpp \ vlasovsolver_amr/cpu_acc_intersections.hpp vlasovsolver_amr/cpu_acc_semilag.hpp vlasovsolver_amr/cpu_acc_transform.hpp \ - vlasovsolver/cpu_moments.h vlasovsolver_amr/cpu_trans_map.hpp velocity_blocks.h + vlasovsolver/cpu_moments.h vlasovsolver_amr/cpu_trans_map.hpp vlasovsolver/cpu_trans_map_amr.hpp velocity_blocks.h #DEPS_PROJECTS = projects/project.h projects/project.cpp \ # projects/MultiPeak/MultiPeak.h projects/MultiPeak/MultiPeak.cpp ${DEPS_CELL} From f5bbe61cdf63aabd8554188f8762dffa3e424637 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Mon, 24 Sep 2018 12:49:33 +0300 Subject: [PATCH 065/602] Testing with neighborhoods, does nothing yet. --- .../build_pencils/grid_test_neighbors.cpp | 32 +++++++++++++++++-- 1 file changed, 29 insertions(+), 3 deletions(-) diff --git a/mini-apps/build_pencils/grid_test_neighbors.cpp b/mini-apps/build_pencils/grid_test_neighbors.cpp index 382ff335e..8c13fb493 100644 --- a/mini-apps/build_pencils/grid_test_neighbors.cpp +++ b/mini-apps/build_pencils/grid_test_neighbors.cpp @@ -320,8 +320,22 @@ int main(int argc, char* argv[]) { const int dimension = 0; const bool doRefine = true; const std::array refinementIds = {{10,14,64,72}}; - + grid.initialize(grid_size, comm, "RANDOM", 1); + + typedef dccrg::Types<3>::neighborhood_item_t neigh_t; + std::vector neighborhood_x; + std::vector neighborhood_y; + + int neighborhood_width = 2; + for (int d = -neighborhood_width; d <= neighborhood_width; d++) { + if (d != 0) { + neighborhood_x.push_back({{d, 0, 0}}); + neighborhood_y.push_back({{0, d, 0}}); + } + } + grid.add_neighborhood(1, neighborhood_x); + grid.add_neighborhood(2, neighborhood_y); grid.balance_load(); @@ -333,7 +347,7 @@ int main(int argc, char* argv[]) { } } } - + grid.balance_load(); auto cells = grid.cells; @@ -386,7 +400,19 @@ int main(int argc, char* argv[]) { ibeg = iend; std::cout << std::endl; } - + + + CellID id = 3; + const vector* neighbors = grid.get_neighbors_of(id, 1); + if (neighbors != NULL) { + std::cout << "Neighbors of cell " << id << std::endl; + for (auto neighbor : *neighbors) { + std::cout << neighbor << std::endl; + } + } + + + std::ofstream outfile; grid.write_vtk_file("test.vtk"); From 5629913753ce8ac02b7cd19b8f0c442b57925712 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Mon, 24 Sep 2018 12:50:05 +0300 Subject: [PATCH 066/602] Commented out refinement calls --- vlasiator.cpp | 41 +++++++++++++++++++++-------------------- 1 file changed, 21 insertions(+), 20 deletions(-) diff --git a/vlasiator.cpp b/vlasiator.cpp index 1e4ed2769..b2148d086 100644 --- a/vlasiator.cpp +++ b/vlasiator.cpp @@ -363,26 +363,27 @@ int main(int argn,char* args[]) { initializeGrid(argn,args,mpiGrid,sysBoundaries,*project); isSysBoundaryCondDynamic = sysBoundaries.isDynamic(); - std::array coords; - coords[0] = (P::xmax - P::xmin) / 2.0; - coords[1] = (P::ymax - P::ymin) / 2.0; - coords[2] = (P::zmax - P::zmin) / 2.0; - cout << "Trying to refine at " << coords[0] << ", " << coords[1] << ", " << coords[2] << endl; - CellID myCell = mpiGrid.get_existing_cell(coords); - cout << "Got cell ID " << myCell << endl; - cout << "Maximum refinement level is " << mpiGrid.mapping.get_maximum_refinement_level() << endl; - bool refineSuccess = mpiGrid.refine_completely_at(coords); - std::vector refinedCells = mpiGrid.stop_refining(); - cout << "Result: " << refineSuccess << endl; - mpiGrid.balance_load(); - if(refineSuccess) { - cout << "Refined Cells are: "; - for (auto cellid : refinedCells) { - cout << cellid << " "; - } - cout << endl; - mpiGrid.write_vtk_file("mpiGrid.vtk"); - } + // std::array coords; + // coords[0] = (P::xmax - P::xmin) / 2.0; + // coords[1] = (P::ymax - P::ymin) / 2.0; + // coords[2] = (P::zmax - P::zmin) / 2.0; + // cout << "Trying to refine at " << coords[0] << ", " << coords[1] << ", " << coords[2] << endl; + // CellID myCell = mpiGrid.get_existing_cell(coords); + // cout << "Got cell ID " << myCell << endl; + // cout << "Maximum refinement level is " << mpiGrid.mapping.get_maximum_refinement_level() << endl; + // bool refineSuccess = mpiGrid.refine_completely_at(coords); + // std::vector refinedCells = mpiGrid.stop_refining(); + // cout << "Result: " << refineSuccess << endl; + // mpiGrid.balance_load(); + // if(refineSuccess) { + // cout << "Refined Cells are: "; + // for (auto cellid : refinedCells) { + // cout << cellid << " "; + // } + // cout << endl; + // mpiGrid.write_vtk_file("mpiGrid.vtk"); + // } + recalculateLocalCellsCache(); phiprof::stop("Init grid"); From eec071516c9e9726ec4c1505020215dbc1bafb7d Mon Sep 17 00:00:00 2001 From: tkoskela Date: Mon, 24 Sep 2018 12:51:55 +0300 Subject: [PATCH 067/602] Added debugging code for get_neighbors_of. Commented out. --- vlasovsolver/cpu_trans_map_amr.cpp | 24 ++++++++++++++++++++---- 1 file changed, 20 insertions(+), 4 deletions(-) diff --git a/vlasovsolver/cpu_trans_map_amr.cpp b/vlasovsolver/cpu_trans_map_amr.cpp index dff4a657f..ece287477 100644 --- a/vlasovsolver/cpu_trans_map_amr.cpp +++ b/vlasovsolver/cpu_trans_map_amr.cpp @@ -690,6 +690,10 @@ bool trans_map_1d_amr(const dccrg::Dccrg& // compute pencils => set of pencils (shared datastructure) + // std::cout << "LocalPropagatedCells: "; + // for (const auto id : localPropagatedCells) std::cout << id << " "; + // std::cout << endl; + vector seedIds; get_seed_ids(mpiGrid, localPropagatedCells, dimension, seedIds); @@ -703,13 +707,17 @@ bool trans_map_1d_amr(const dccrg::Dccrg& setOfPencils pencils; vector pencilSets; + // std::cout << "Starting cell ids for pencils are "; + // for (const auto seedId : seedIds) std::cout << seedId << " "; + // std::cout << endl; + for (const auto seedId : seedIds) { // Construct pencils from the seedIds into a set of pencils. pencils = buildPencilsWithNeighbors(mpiGrid, pencils, seedId, ids, dimension, path); } // Print out ids of pencils (if needed for debugging) - if (false) { + if (true) { uint ibeg = 0; uint iend = 0; std::cout << "I have created " << pencils.N << " pencils along dimension " << dimension << ":\n"; @@ -717,15 +725,24 @@ bool trans_map_1d_amr(const dccrg::Dccrg& std::cout << "-----------------------------------------------------------------" << std::endl; for (uint i = 0; i < pencils.N; i++) { iend += pencils.lengthOfPencils[i]; - std::cout << "(" << pencils.x[i] << ", " << pencils.y[i] << "): "; + std::cout << mpiGrid.get_process(pencils.ids[ibeg])<< " (" << pencils.x[i] << ", " << pencils.y[i] << "): "; for (auto j = pencils.ids.begin() + ibeg; j != pencils.ids.begin() + iend; ++j) { std::cout << *j << " "; } ibeg = iend; std::cout << std::endl; } - } + // CellID id = 56; + // const vector* neighbors = mpiGrid.get_neighbors_of(id, VLASOV_SOLVER_X_NEIGHBORHOOD_ID); + // if (neighbors != NULL) { + // std::cout << "Neighbors of cell " << id << std::endl; + // for (auto neighbor : *neighbors) { + // std::cout << neighbor << std::endl; + // } + // } + + } // Add the final set of pencils to the pencilSets - vector. // Only one set is created for now but we retain support for multiple sets @@ -965,6 +982,5 @@ bool trans_map_1d_amr(const dccrg::Dccrg& } //cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; - return true; } From bb695251a931205645de703fdd69bee01e5c4668 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Mon, 24 Sep 2018 17:17:12 +0300 Subject: [PATCH 068/602] Updated dccrg calls to work with dccrg commit 63699d6 --- grid.cpp | 46 ++++++++++++++++--------------------- projects/projects_common.h | 4 ++-- sysboundary/sysboundary.cpp | 12 +++++----- 3 files changed, 28 insertions(+), 34 deletions(-) diff --git a/grid.cpp b/grid.cpp index cd9860b4b..31b8217d0 100644 --- a/grid.cpp +++ b/grid.cpp @@ -113,18 +113,15 @@ void initializeGrid( geom_params.level_0_cell_length[1] = P::dy_ini; geom_params.level_0_cell_length[2] = P::dz_ini; - mpiGrid.initialize( - grid_length, - comm, - &P::loadBalanceAlgorithm[0], - neighborhood_size, // neighborhood size - P::amrMaxSpatialRefLevel, // maximum refinement level - sysBoundaries.isBoundaryPeriodic(0), - sysBoundaries.isBoundaryPeriodic(1), - sysBoundaries.isBoundaryPeriodic(2) - ); - - mpiGrid.set_geometry(geom_params); + mpiGrid.set_initial_length(grid_length) + .set_load_balancing_method(&P::loadBalanceAlgorithm[0]) + .set_neighborhood_length(neighborhood_size) + .set_maximum_refinement_level(0) + .set_periodic(sysBoundaries.isBoundaryPeriodic(0), + sysBoundaries.isBoundaryPeriodic(1), + sysBoundaries.isBoundaryPeriodic(2)) + .initialize(comm) + .set_geometry(geom_params); // Init velocity mesh on all cells initVelocityGridGeometry(mpiGrid); @@ -496,17 +493,15 @@ bool adjustVelocityBlocks(dccrg::Dccrg& m SpatialCell* cell = mpiGrid[cell_id]; // gather spatial neighbor list and create vector with pointers to neighbor spatial cells - const vector* neighbors = mpiGrid.get_neighbors_of(cell_id, NEAREST_NEIGHBORHOOD_ID); - //const vector > >* neighbors = mpiGrid.get_neighbors_of(cell_id, NEAREST_NEIGHBORHOOD_ID); + const auto* neighbors = mpiGrid.get_neighbors_of(cell_id, NEAREST_NEIGHBORHOOD_ID); vector neighbor_ptrs; neighbor_ptrs.reserve(neighbors->size()); - //for (auto nbrPair : neighbors) { - for (vector::const_iterator neighbor_id = neighbors->begin(); neighbor_id != neighbors->end(); ++neighbor_id) { - //CellID neighbor_id = *nbrPair->first; - if (*neighbor_id == 0 || *neighbor_id == cell_id) { + for ( pair> nbrPair : *neighbors) { + CellID neighbor_id = nbrPair.first; + if (neighbor_id == 0 || neighbor_id == cell_id) { continue; } - neighbor_ptrs.push_back(mpiGrid[*neighbor_id]); + neighbor_ptrs.push_back(mpiGrid[neighbor_id]); } if (getObjectWrapper().particleSpecies[popID].sparse_conserve_mass) { for (size_t i=0; iget_number_of_velocity_blocks(popID)*WID3; ++i) { @@ -934,15 +929,14 @@ bool validateMesh(dccrg::Dccrg& mpiGrid,c SpatialCell* cell = mpiGrid[cells[c]]; // Get all spatial neighbors - const vector* neighbors = mpiGrid.get_neighbors_of(cells[c],NEAREST_NEIGHBORHOOD_ID); - // const vector > >* neighbors = mpiGrid.get_neighbors_of(cells[c],NEAREST_NEIGHBORHOOD_ID); + //const vector* neighbors = mpiGrid.get_neighbors_of(cells[c],NEAREST_NEIGHBORHOOD_ID); + const auto* neighbors = mpiGrid.get_neighbors_of(cells[c], NEAREST_NEIGHBORHOOD_ID); // Iterate over all spatial neighbors - for (size_t n=0; nsize(); ++n) { - //for (auto nbrPair : neighbors) { - CellID nbrCellID = (*neighbors)[n]; - //CellID nbrCellID = *nbrPair->first; - + // for (size_t n=0; nsize(); ++n) { + for (pair > nbrPair : *neighbors) { + // CellID nbrCellID = (*neighbors)[n]; + CellID nbrCellID = nbrPair.first; const SpatialCell* nbr = mpiGrid[nbrCellID]; // Iterate over all blocks in the spatial neighbor, diff --git a/projects/projects_common.h b/projects/projects_common.h index be3a7096f..6e83c68c4 100644 --- a/projects/projects_common.h +++ b/projects/projects_common.h @@ -80,11 +80,11 @@ template CELLID getNeighbour(const dccrg::Dccrg CELLID getNeighbour(const dccrg::Dccrg& mpiGrid,const CELLID& cellID,const int& i,const int& j,const int& k){ - std::vector neighbors = mpiGrid.get_neighbors_of_at_offset(cellID, i, j, k); + auto neighbors = mpiGrid.get_neighbors_of_at_offset(cellID, i, j, k); //FIXME: support refined grids if(neighbors.size() > 0) { - return neighbors[0]; + return neighbors[0].first; } else { return INVALID_CELLID; } diff --git a/sysboundary/sysboundary.cpp b/sysboundary/sysboundary.cpp index 97f0ee12b..6044d76fe 100644 --- a/sysboundary/sysboundary.cpp +++ b/sysboundary/sysboundary.cpp @@ -427,10 +427,10 @@ bool SysBoundary::classifyCells(dccrg::DccrgsysBoundaryLayer=0; /*Initial value*/ if(mpiGrid[cells[i]]->sysBoundaryFlag != sysboundarytype::NOT_SYSBOUNDARY ) { - const std::vector* nbrs = mpiGrid.get_neighbors_of(cells[i],SYSBOUNDARIES_NEIGHBORHOOD_ID); + const auto* nbrs = mpiGrid.get_neighbors_of(cells[i],SYSBOUNDARIES_NEIGHBORHOOD_ID); for(uint j=0; j<(*nbrs).size(); j++) { - if((*nbrs)[j]!=0 ) { - if(mpiGrid[(*nbrs)[j]]->sysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY ) { + if((*nbrs)[j].first!=0 ) { + if(mpiGrid[(*nbrs)[j].first]->sysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY ) { mpiGrid[cells[i]]->sysBoundaryLayer=1; } } @@ -448,10 +448,10 @@ bool SysBoundary::classifyCells(dccrg::DccrgsysBoundaryLayer==0){ - const std::vector* nbrs = mpiGrid.get_neighbors_of(cells[i],SYSBOUNDARIES_NEIGHBORHOOD_ID); + const auto* nbrs = mpiGrid.get_neighbors_of(cells[i],SYSBOUNDARIES_NEIGHBORHOOD_ID); for(uint j=0; j<(*nbrs).size(); j++) { - if((*nbrs)[j]!=0 && (*nbrs)[j]!=cells[i] ) { - if(mpiGrid[(*nbrs)[j]]->sysBoundaryLayer==layer) { + if((*nbrs)[j].first!=0 && (*nbrs)[j].first!=cells[i] ) { + if(mpiGrid[(*nbrs)[j].first]->sysBoundaryLayer==layer) { mpiGrid[cells[i]]->sysBoundaryLayer=layer+1; break; } From 6bd5aa94a33198d80197d93fb760cf93f76f168a Mon Sep 17 00:00:00 2001 From: tkoskela Date: Tue, 25 Sep 2018 16:12:36 +0300 Subject: [PATCH 069/602] Added mapping functions between dccrg and fsgrid for refined mesh --- fieldsolver/gridGlue.cpp | 39 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) diff --git a/fieldsolver/gridGlue.cpp b/fieldsolver/gridGlue.cpp index 2aa2fccb8..7b6337974 100644 --- a/fieldsolver/gridGlue.cpp +++ b/fieldsolver/gridGlue.cpp @@ -287,3 +287,42 @@ void getFsGridMaxDt(FsGrid< fsgrids::technical, 2>& technicalGrid, } } +/* +Map from fsgrid cell ids to dccrg cell ids when they aren't identical (ie. when dccrg has refinement). +*/ +CellID mapFsGridIdToDccrg(FsGrid< fsgrids::technical, 2>& technicalGrid, + dccrg::Dccrg& mpiGrid, + CellID fsgridID) { + + auto cellCoord = technicalGrid.globalIDtoCellCoord(fsgridID); + // theoretically we could directly use cellCoord as indices for + // mpiGrid.get_cell_from_indices, if we knew the refinement level + // of the cell in advance. Going via cartesian coordinates is probably + // faster than iterating through refinement levels until we find the + // correct one. + std::array cartesianCoord; + cartesianCoord[0] = cellCoord[0] * technicalGrid.DX + P::xmin; + cartesianCoord[1] = cellCoord[1] * technicalGrid.DY + P::ymin; + cartesianCoord[2] = cellCoord[2] * technicalGrid.DZ + P::zmin; + CellID dccrgID = mpiGrid.get_existing_cell(cartesianCoord); + return dccrgID; + +} +/* +Map from dccrg cell ids to fsgrid cell ids when they aren't identical (ie. when dccrg has refinement). +*/ + +CellID mapDccrgIdToFsGrid(FsGrid< fsgrids::technical, 2>& technicalGrid, + dccrg::Dccrg& mpiGrid, + CellID dccrgID) { + auto indices = mpiGrid.mapping.get_indices(dccrgID); + // The indices we get from dccrg are directly coordinates at the finest refinement level. + // Therefore, they should match fsgrid coordinates exactly. + std::array cellCoord; + for (uint i = 0;i < 3; ++i) { + cellCoord[i] = indices[i]; + } + CellID fsgridID = technicalGrid.cellCoordtoGlobalID(cellCoord); + return fsgridID; +} + From 1befe76117e5f78f8f8b00f9d2c9c37045a96893 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Tue, 25 Sep 2018 16:39:36 +0300 Subject: [PATCH 070/602] Groundwork for fsgrid interface update. Fsgrid arrays are initialized on the maximum refinement level set in parameters. MPI receives are set up for all fsgrid cells (independent of dccrg cells). --- vlasiator.cpp | 131 ++++++++++++++++++++++++++------------------------ 1 file changed, 67 insertions(+), 64 deletions(-) diff --git a/vlasiator.cpp b/vlasiator.cpp index b2148d086..36a36a5f2 100644 --- a/vlasiator.cpp +++ b/vlasiator.cpp @@ -363,26 +363,26 @@ int main(int argn,char* args[]) { initializeGrid(argn,args,mpiGrid,sysBoundaries,*project); isSysBoundaryCondDynamic = sysBoundaries.isDynamic(); - // std::array coords; - // coords[0] = (P::xmax - P::xmin) / 2.0; - // coords[1] = (P::ymax - P::ymin) / 2.0; - // coords[2] = (P::zmax - P::zmin) / 2.0; - // cout << "Trying to refine at " << coords[0] << ", " << coords[1] << ", " << coords[2] << endl; - // CellID myCell = mpiGrid.get_existing_cell(coords); - // cout << "Got cell ID " << myCell << endl; - // cout << "Maximum refinement level is " << mpiGrid.mapping.get_maximum_refinement_level() << endl; - // bool refineSuccess = mpiGrid.refine_completely_at(coords); - // std::vector refinedCells = mpiGrid.stop_refining(); - // cout << "Result: " << refineSuccess << endl; - // mpiGrid.balance_load(); - // if(refineSuccess) { - // cout << "Refined Cells are: "; - // for (auto cellid : refinedCells) { - // cout << cellid << " "; - // } - // cout << endl; - // mpiGrid.write_vtk_file("mpiGrid.vtk"); - // } + std::array coords; + coords[0] = (P::xmax - P::xmin) / 2.0; + coords[1] = (P::ymax - P::ymin) / 2.0; + coords[2] = (P::zmax - P::zmin) / 2.0; + cout << "Trying to refine at " << coords[0] << ", " << coords[1] << ", " << coords[2] << endl; + CellID myCell = mpiGrid.get_existing_cell(coords); + cout << "Got cell ID " << myCell << endl; + cout << "Maximum refinement level is " << mpiGrid.mapping.get_maximum_refinement_level() << endl; + bool refineSuccess = mpiGrid.refine_completely_at(coords); + std::vector refinedCells = mpiGrid.stop_refining(); + cout << "Result: " << refineSuccess << endl; + mpiGrid.balance_load(); + if(refineSuccess) { + cout << "Refined Cells are: "; + for (auto cellid : refinedCells) { + cout << cellid << " "; + } + cout << endl; + mpiGrid.write_vtk_file("mpiGrid.vtk"); + } recalculateLocalCellsCache(); phiprof::stop("Init grid"); @@ -396,70 +396,73 @@ int main(int argn,char* args[]) { // Initialize simplified Fieldsolver grids. phiprof::start("Init fieldsolver grids"); - const std::array dimensions = {convert(P::xcells_ini), convert(P::ycells_ini), convert(P::zcells_ini)}; + const std::array fsGridDimensions = {convert(P::xcells_ini) * pow(2,P::amrMaxSpatialRefLevel), + convert(P::ycells_ini) * pow(2,P::amrMaxSpatialRefLevel), + convert(P::zcells_ini) * pow(2,P::amrMaxSpatialRefLevel)}; + const int fsGridSize = fsGridDimensions[0] * fsGridDimensions[1] * fsGridDimensions[2]; std::array periodicity{mpiGrid.topology.is_periodic(0), mpiGrid.topology.is_periodic(1), mpiGrid.topology.is_periodic(2)}; - FsGrid< std::array, 2> perBGrid(dimensions, comm, periodicity); - FsGrid< std::array, 2> perBDt2Grid(dimensions, comm, periodicity); - FsGrid< std::array, 2> EGrid(dimensions, comm, periodicity); - FsGrid< std::array, 2> EDt2Grid(dimensions, comm, periodicity); - FsGrid< std::array, 2> EHallGrid(dimensions, comm, periodicity); - FsGrid< std::array, 2> EGradPeGrid(dimensions, comm, periodicity); - FsGrid< std::array, 2> momentsGrid(dimensions, comm, periodicity); - FsGrid< std::array, 2> momentsDt2Grid(dimensions, comm, periodicity); - FsGrid< std::array, 2> dPerBGrid(dimensions, comm, periodicity); - FsGrid< std::array, 2> dMomentsGrid(dimensions, comm, periodicity); - FsGrid< std::array, 2> BgBGrid(dimensions, comm, periodicity); - FsGrid< std::array, 2> volGrid(dimensions, comm, periodicity); - FsGrid< fsgrids::technical, 2> technicalGrid(dimensions, comm, periodicity); + FsGrid< std::array, 2> perBGrid(fsGridDimensions, comm, periodicity); + FsGrid< std::array, 2> perBDt2Grid(fsGridDimensions, comm, periodicity); + FsGrid< std::array, 2> EGrid(fsGridDimensions, comm, periodicity); + FsGrid< std::array, 2> EDt2Grid(fsGridDimensions, comm, periodicity); + FsGrid< std::array, 2> EHallGrid(fsGridDimensions, comm, periodicity); + FsGrid< std::array, 2> EGradPeGrid(fsGridDimensions, comm, periodicity); + FsGrid< std::array, 2> momentsGrid(fsGridDimensions, comm, periodicity); + FsGrid< std::array, 2> momentsDt2Grid(fsGridDimensions, comm, periodicity); + FsGrid< std::array, 2> dPerBGrid(fsGridDimensions, comm, periodicity); + FsGrid< std::array, 2> dMomentsGrid(fsGridDimensions, comm, periodicity); + FsGrid< std::array, 2> BgBGrid(fsGridDimensions, comm, periodicity); + FsGrid< std::array, 2> volGrid(fsGridDimensions, comm, periodicity); + FsGrid< fsgrids::technical, 2> technicalGrid(fsGridDimensions, comm, periodicity); // Set DX,DY and DZ // TODO: This is currently just taking the values from cell 1, and assuming them to be // constant throughout the simulation. perBGrid.DX = perBDt2Grid.DX = EGrid.DX = EDt2Grid.DX = EHallGrid.DX = EGradPeGrid.DX = momentsGrid.DX = momentsDt2Grid.DX = dPerBGrid.DX = dMomentsGrid.DX = BgBGrid.DX = volGrid.DX = technicalGrid.DX - = P::dx_ini; + = P::dx_ini * pow(2,-P::amrMaxSpatialRefLevel); perBGrid.DY = perBDt2Grid.DY = EGrid.DY = EDt2Grid.DY = EHallGrid.DY = EGradPeGrid.DY = momentsGrid.DY = momentsDt2Grid.DY = dPerBGrid.DY = dMomentsGrid.DY = BgBGrid.DY = volGrid.DY = technicalGrid.DY - = P::dy_ini; + = P::dy_ini * pow(2,-P::amrMaxSpatialRefLevel); perBGrid.DZ = perBDt2Grid.DZ = EGrid.DZ = EDt2Grid.DZ = EHallGrid.DZ = EGradPeGrid.DZ = momentsGrid.DZ = momentsDt2Grid.DZ = dPerBGrid.DZ = dMomentsGrid.DZ = BgBGrid.DZ = volGrid.DZ = technicalGrid.DZ - = P::dz_ini; + = P::dz_ini * pow(2,-P::amrMaxSpatialRefLevel); phiprof::stop("Init fieldsolver grids"); phiprof::start("Initial fsgrid coupling"); const std::vector& cells = getLocalCells(); // Couple FSGrids to mpiGrid // TODO: Do we really need to couple *all* of these fields? - perBGrid.setupForGridCoupling(cells.size()); - perBDt2Grid.setupForGridCoupling(cells.size()); - EGrid.setupForGridCoupling(cells.size()); - EDt2Grid.setupForGridCoupling(cells.size()); - EHallGrid.setupForGridCoupling(cells.size()); - EGradPeGrid.setupForGridCoupling(cells.size()); - momentsGrid.setupForGridCoupling(cells.size()); - momentsDt2Grid.setupForGridCoupling(cells.size()); - dPerBGrid.setupForGridCoupling(cells.size()); - dMomentsGrid.setupForGridCoupling(cells.size()); - BgBGrid.setupForGridCoupling(cells.size()); - volGrid.setupForGridCoupling(cells.size()); - technicalGrid.setupForGridCoupling(cells.size()); + perBGrid.setupForGridCoupling(fsGridSize); + perBDt2Grid.setupForGridCoupling(fsGridSize); + EGrid.setupForGridCoupling(fsGridSize); + EDt2Grid.setupForGridCoupling(fsGridSize); + EHallGrid.setupForGridCoupling(fsGridSize); + EGradPeGrid.setupForGridCoupling(fsGridSize); + momentsGrid.setupForGridCoupling(fsGridSize); + momentsDt2Grid.setupForGridCoupling(fsGridSize); + dPerBGrid.setupForGridCoupling(fsGridSize); + dMomentsGrid.setupForGridCoupling(fsGridSize); + BgBGrid.setupForGridCoupling(fsGridSize); + volGrid.setupForGridCoupling(fsGridSize); + technicalGrid.setupForGridCoupling(fsGridSize); // FSGrid cellIds are 0-based, whereas DCCRG cellIds are 1-based, beware - for(auto& i : cells) { - perBGrid.setGridCoupling(i-1, myRank); - perBDt2Grid.setGridCoupling(i-1, myRank); - EGrid.setGridCoupling(i-1, myRank); - EDt2Grid.setGridCoupling(i-1, myRank); - EHallGrid.setGridCoupling(i-1, myRank); - EGradPeGrid.setGridCoupling(i-1, myRank); - momentsGrid.setGridCoupling(i-1, myRank); - momentsDt2Grid.setGridCoupling(i-1, myRank); - dPerBGrid.setGridCoupling(i-1, myRank); - dMomentsGrid.setGridCoupling(i-1, myRank); - BgBGrid.setGridCoupling(i-1, myRank); - volGrid.setGridCoupling(i-1, myRank); - technicalGrid.setGridCoupling(i-1, myRank); + for(auto& cellId : cells) { + perBGrid.setGridCoupling(cellId-1, myRank); + perBDt2Grid.setGridCoupling(cellId-1, myRank); + EGrid.setGridCoupling(cellId-1, myRank); + EDt2Grid.setGridCoupling(cellId-1, myRank); + EHallGrid.setGridCoupling(cellId-1, myRank); + EGradPeGrid.setGridCoupling(cellId-1, myRank); + momentsGrid.setGridCoupling(cellId-1, myRank); + momentsDt2Grid.setGridCoupling(cellId-1, myRank); + dPerBGrid.setGridCoupling(cellId-1, myRank); + dMomentsGrid.setGridCoupling(cellId-1, myRank); + BgBGrid.setGridCoupling(cellId-1, myRank); + volGrid.setGridCoupling(cellId-1, myRank); + technicalGrid.setGridCoupling(cellId-1, myRank); } perBGrid.finishGridCoupling(); perBDt2Grid.finishGridCoupling(); From f904bd66dd73085d6c705b52487a2e3bcc2c3c67 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Wed, 26 Sep 2018 10:42:36 +0300 Subject: [PATCH 071/602] Adding more dependencies in the hopes of making the build work --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 48e22a2e9..d7cdfe563 100644 --- a/Makefile +++ b/Makefile @@ -167,7 +167,7 @@ DEPS_CPU_ACC_TRANSFORM = ${DEPS_COMMON} ${DEPS_CELL} vlasovsolver/cpu_moments.h DEPS_CPU_MOMENTS = ${DEPS_COMMON} ${DEPS_CELL} vlasovmover.h vlasovsolver/cpu_moments.h vlasovsolver/cpu_moments.cpp -DEPS_CPU_TRANS_MAP = ${DEPS_COMMON} ${DEPS_CELL} grid.h vlasovsolver/vec.h vlasovsolver/cpu_trans_map.hpp vlasovsolver/cpu_trans_map.cpp +DEPS_CPU_TRANS_MAP = ${DEPS_COMMON} ${DEPS_CELL} grid.h vlasovsolver/vec.h vlasovsolver/cpu_trans_map.hpp vlasovsolver/cpu_trans_map.cpp vlasovsolver/cpu_trans_map_amr.hpp vlasovsolver/cpu_trans_map_amr.cpp DEPS_CPU_TRANS_MAP_AMR = ${DEPS_COMMON} ${DEPS_CELL} grid.h vlasovsolver/vec.h vlasovsolver/cpu_trans_map.hpp vlasovsolver/cpu_trans_map.cpp vlasovsolver/cpu_trans_map_amr.hpp vlasovsolver/cpu_trans_map_amr.cpp From 9752a2d00b17ba7c5a7b1d681ac740c7de38e046 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Wed, 26 Sep 2018 10:44:11 +0300 Subject: [PATCH 072/602] Set maximum refinement level to that set in the parameters --- grid.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/grid.cpp b/grid.cpp index 31b8217d0..5fdf76e5f 100644 --- a/grid.cpp +++ b/grid.cpp @@ -116,7 +116,7 @@ void initializeGrid( mpiGrid.set_initial_length(grid_length) .set_load_balancing_method(&P::loadBalanceAlgorithm[0]) .set_neighborhood_length(neighborhood_size) - .set_maximum_refinement_level(0) + .set_maximum_refinement_level(P::amrMaxSpatialRefLevel) .set_periodic(sysBoundaries.isBoundaryPeriodic(0), sysBoundaries.isBoundaryPeriodic(1), sysBoundaries.isBoundaryPeriodic(2)) From bdaaa123146ab1a486a21e6a5d57315bf564a5a4 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Mon, 1 Oct 2018 15:24:19 +0300 Subject: [PATCH 073/602] Refined grid compatibility update for gridGlue.cpp and gridGlue.hpp. Assumes fsgrid is at highest possible refinement level everywhere. When dccrg cells are at less than full refinement, a value is fed to all fsgrid cells that cover a dccrg cell. When fsgrid values are mapped to a dccrg cell, a simple average is used. The behaviour should revert back to the previous version when the maximum refinement level is set to 0. Performance is expected to be somewhat worse. This code compiles but has not been tested yet. --- fieldsolver/gridGlue.cpp | 485 +++++++++++++++++++++++---------------- fieldsolver/gridGlue.hpp | 41 ++++ vlasiator.cpp | 80 +++---- 3 files changed, 374 insertions(+), 232 deletions(-) diff --git a/fieldsolver/gridGlue.cpp b/fieldsolver/gridGlue.cpp index 7b6337974..13305a8bb 100644 --- a/fieldsolver/gridGlue.cpp +++ b/fieldsolver/gridGlue.cpp @@ -10,42 +10,41 @@ void feedMomentsIntoFsGrid(dccrg::Dccrg& const std::vector& cells, FsGrid< std::array, 2>& momentsGrid, bool dt2 /*=false*/) { - momentsGrid.setupForTransferIn(cells.size()); - - // Setup transfer buffers - std::vector< std::array > transferBuffer(cells.size()); + int fsgridSize = cells.size() * pow(pow(2,mpiGrid.mapping.get_maximum_refinement_level()),3); + momentsGrid.setupForTransferIn(fsgridSize); // Fill from cellParams - #pragma omp parallel for - for(int i=0; i< cells.size(); i++) { - auto cellParams = mpiGrid[cells[i]]->get_cell_parameters(); - std::array* thisCellData = &transferBuffer[i]; - - if(!dt2) { - thisCellData->at(fsgrids::moments::RHOM) = cellParams[CellParams::RHOM]; - thisCellData->at(fsgrids::moments::RHOQ) = cellParams[CellParams::RHOQ]; - thisCellData->at(fsgrids::moments::VX) = cellParams[CellParams::VX]; - thisCellData->at(fsgrids::moments::VY) = cellParams[CellParams::VY]; - thisCellData->at(fsgrids::moments::VZ) = cellParams[CellParams::VZ]; - thisCellData->at(fsgrids::moments::P_11) = cellParams[CellParams::P_11]; - thisCellData->at(fsgrids::moments::P_22) = cellParams[CellParams::P_22]; - thisCellData->at(fsgrids::moments::P_33) = cellParams[CellParams::P_33]; - } else { - thisCellData->at(fsgrids::moments::RHOM) = cellParams[CellParams::RHOM_DT2]; - thisCellData->at(fsgrids::moments::RHOQ) = cellParams[CellParams::RHOQ_DT2]; - thisCellData->at(fsgrids::moments::VX) = cellParams[CellParams::VX_DT2]; - thisCellData->at(fsgrids::moments::VY) = cellParams[CellParams::VY_DT2]; - thisCellData->at(fsgrids::moments::VZ) = cellParams[CellParams::VZ_DT2]; - thisCellData->at(fsgrids::moments::P_11) = cellParams[CellParams::P_11_DT2]; - thisCellData->at(fsgrids::moments::P_22) = cellParams[CellParams::P_22_DT2]; - thisCellData->at(fsgrids::moments::P_33) = cellParams[CellParams::P_33_DT2]; + // #pragma omp parallel for + for(auto dccrgId : cells) { + auto cellParams = mpiGrid[dccrgId]->get_cell_parameters(); + std::array* thisCellData; + const auto fsgridIds = mapDccrgIdToFsGrid(mpiGrid, momentsGrid.getLocalSize(), dccrgId); + + for (auto fsgridId : fsgridIds) { + if(!dt2) { + thisCellData->at(fsgrids::moments::RHOM) = cellParams[CellParams::RHOM]; + thisCellData->at(fsgrids::moments::RHOQ) = cellParams[CellParams::RHOQ]; + thisCellData->at(fsgrids::moments::VX) = cellParams[CellParams::VX]; + thisCellData->at(fsgrids::moments::VY) = cellParams[CellParams::VY]; + thisCellData->at(fsgrids::moments::VZ) = cellParams[CellParams::VZ]; + thisCellData->at(fsgrids::moments::P_11) = cellParams[CellParams::P_11]; + thisCellData->at(fsgrids::moments::P_22) = cellParams[CellParams::P_22]; + thisCellData->at(fsgrids::moments::P_33) = cellParams[CellParams::P_33]; + } else { + thisCellData->at(fsgrids::moments::RHOM) = cellParams[CellParams::RHOM_DT2]; + thisCellData->at(fsgrids::moments::RHOQ) = cellParams[CellParams::RHOQ_DT2]; + thisCellData->at(fsgrids::moments::VX) = cellParams[CellParams::VX_DT2]; + thisCellData->at(fsgrids::moments::VY) = cellParams[CellParams::VY_DT2]; + thisCellData->at(fsgrids::moments::VZ) = cellParams[CellParams::VZ_DT2]; + thisCellData->at(fsgrids::moments::P_11) = cellParams[CellParams::P_11_DT2]; + thisCellData->at(fsgrids::moments::P_22) = cellParams[CellParams::P_22_DT2]; + thisCellData->at(fsgrids::moments::P_33) = cellParams[CellParams::P_33_DT2]; + } + + momentsGrid.transferDataIn(fsgridId, thisCellData); } } - for(int i=0; i< cells.size(); i++) { - momentsGrid.transferDataIn(cells[i] - 1, &transferBuffer[i]); - } - // Finish the actual transfer momentsGrid.finishTransfersIn(); } @@ -54,43 +53,49 @@ void feedMomentsIntoFsGrid(dccrg::Dccrg& void feedBgFieldsIntoFsGrid(dccrg::Dccrg& mpiGrid, const std::vector& cells, FsGrid< std::array, 2>& bgBGrid) { - bgBGrid.setupForTransferIn(cells.size()); + int fsgridSize = cells.size() * pow(pow(2,mpiGrid.mapping.get_maximum_refinement_level()),3); + bgBGrid.setupForTransferIn(fsgridSize); // Setup transfer buffers - std::vector< std::array > transferBuffer(cells.size()); + std::vector< std::array > transferBuffer(fsgridSize); // Fill from cellParams - #pragma omp parallel for - for(int i=0; i< cells.size(); i++) { - auto cellParams = mpiGrid[cells[i]]->get_cell_parameters(); - auto derivatives = mpiGrid[cells[i]]->derivatives; - auto volumeDerivatives = mpiGrid[cells[i]]->derivativesBVOL; - std::array* thisCellData = &transferBuffer[i]; - - thisCellData->at(fsgrids::bgbfield::BGBX) = cellParams[CellParams::BGBX]; - thisCellData->at(fsgrids::bgbfield::BGBY) = cellParams[CellParams::BGBY]; - thisCellData->at(fsgrids::bgbfield::BGBZ) = cellParams[CellParams::BGBZ]; - thisCellData->at(fsgrids::bgbfield::BGBXVOL) = cellParams[CellParams::BGBXVOL]; - thisCellData->at(fsgrids::bgbfield::BGBYVOL) = cellParams[CellParams::BGBYVOL]; - thisCellData->at(fsgrids::bgbfield::BGBZVOL) = cellParams[CellParams::BGBZVOL]; - - thisCellData->at(fsgrids::bgbfield::dBGBxdy) = derivatives[fieldsolver::dBGBxdy]; - thisCellData->at(fsgrids::bgbfield::dBGBxdz) = derivatives[fieldsolver::dBGBxdz]; - thisCellData->at(fsgrids::bgbfield::dBGBydx) = derivatives[fieldsolver::dBGBydx]; - thisCellData->at(fsgrids::bgbfield::dBGBydz) = derivatives[fieldsolver::dBGBydz]; - thisCellData->at(fsgrids::bgbfield::dBGBzdx) = derivatives[fieldsolver::dBGBzdx]; - thisCellData->at(fsgrids::bgbfield::dBGBzdy) = derivatives[fieldsolver::dBGBzdy]; - - thisCellData->at(fsgrids::bgbfield::dBGBXVOLdy) = volumeDerivatives[bvolderivatives::dBGBXVOLdy]; - thisCellData->at(fsgrids::bgbfield::dBGBXVOLdz) = volumeDerivatives[bvolderivatives::dBGBXVOLdz]; - thisCellData->at(fsgrids::bgbfield::dBGBYVOLdx) = volumeDerivatives[bvolderivatives::dBGBYVOLdx]; - thisCellData->at(fsgrids::bgbfield::dBGBYVOLdz) = volumeDerivatives[bvolderivatives::dBGBYVOLdz]; - thisCellData->at(fsgrids::bgbfield::dBGBZVOLdx) = volumeDerivatives[bvolderivatives::dBGBZVOLdx]; - thisCellData->at(fsgrids::bgbfield::dBGBZVOLdy) = volumeDerivatives[bvolderivatives::dBGBZVOLdy]; - } - - for(int i=0; i< cells.size(); i++) { - bgBGrid.transferDataIn(cells[i] - 1, &transferBuffer[i]); + // TODO: Making this thread-safe requires some tricks. Don't think threading is crucial here. + // #pragma omp parallel for + for(auto dccrgId : cells) { + auto cellParams = mpiGrid[dccrgId]->get_cell_parameters(); + auto derivatives = mpiGrid[dccrgId]->derivatives; + auto volumeDerivatives = mpiGrid[dccrgId]->derivativesBVOL; + + const auto fsgridIds = mapDccrgIdToFsGrid(mpiGrid, bgBGrid.getLocalSize(), dccrgId); + + for (auto fsgridId : fsgridIds) { + + std::array* thisCellData; + + thisCellData->at(fsgrids::bgbfield::BGBX) = cellParams[CellParams::BGBX]; + thisCellData->at(fsgrids::bgbfield::BGBY) = cellParams[CellParams::BGBY]; + thisCellData->at(fsgrids::bgbfield::BGBZ) = cellParams[CellParams::BGBZ]; + thisCellData->at(fsgrids::bgbfield::BGBXVOL) = cellParams[CellParams::BGBXVOL]; + thisCellData->at(fsgrids::bgbfield::BGBYVOL) = cellParams[CellParams::BGBYVOL]; + thisCellData->at(fsgrids::bgbfield::BGBZVOL) = cellParams[CellParams::BGBZVOL]; + + thisCellData->at(fsgrids::bgbfield::dBGBxdy) = derivatives[fieldsolver::dBGBxdy]; + thisCellData->at(fsgrids::bgbfield::dBGBxdz) = derivatives[fieldsolver::dBGBxdz]; + thisCellData->at(fsgrids::bgbfield::dBGBydx) = derivatives[fieldsolver::dBGBydx]; + thisCellData->at(fsgrids::bgbfield::dBGBydz) = derivatives[fieldsolver::dBGBydz]; + thisCellData->at(fsgrids::bgbfield::dBGBzdx) = derivatives[fieldsolver::dBGBzdx]; + thisCellData->at(fsgrids::bgbfield::dBGBzdy) = derivatives[fieldsolver::dBGBzdy]; + + thisCellData->at(fsgrids::bgbfield::dBGBXVOLdy) = volumeDerivatives[bvolderivatives::dBGBXVOLdy]; + thisCellData->at(fsgrids::bgbfield::dBGBXVOLdz) = volumeDerivatives[bvolderivatives::dBGBXVOLdz]; + thisCellData->at(fsgrids::bgbfield::dBGBYVOLdx) = volumeDerivatives[bvolderivatives::dBGBYVOLdx]; + thisCellData->at(fsgrids::bgbfield::dBGBYVOLdz) = volumeDerivatives[bvolderivatives::dBGBYVOLdz]; + thisCellData->at(fsgrids::bgbfield::dBGBZVOLdx) = volumeDerivatives[bvolderivatives::dBGBZVOLdx]; + thisCellData->at(fsgrids::bgbfield::dBGBZVOLdy) = volumeDerivatives[bvolderivatives::dBGBZVOLdy]; + + bgBGrid.transferDataIn(fsgridId, thisCellData); + } } // Finish the actual transfer @@ -102,38 +107,80 @@ void getVolumeFieldsFromFsGrid(FsGrid< std::array& mpiGrid, const std::vector& cells) { + // Setup transfer buffers - std::vector< std::array > transferBuffer(cells.size()); + int fsgridSize = cells.size() * pow(pow(2,mpiGrid.mapping.get_maximum_refinement_level()),3); + std::vector< std::array > transferBuffer(fsgridSize); + std::vector< std::array*> transferBufferPointer; // Setup transfer pointers - volumeFieldsGrid.setupForTransferOut(cells.size()); - for(int i=0; i< cells.size(); i++) { - std::array* thisCellData = &transferBuffer[i]; - volumeFieldsGrid.transferDataOut(cells[i] - 1, thisCellData); + volumeFieldsGrid.setupForTransferOut(fsgridSize); + int k = 0; + for(auto dccrgId : cells) { + const auto fsgridIds = mapDccrgIdToFsGrid(mpiGrid, volumeFieldsGrid.getLocalSize(), dccrgId); + // Store a pointer to the first fsgrid cell that maps to each dccrg Id + transferBufferPointer.push_back(&transferBuffer[k]); + for (auto fsgridId : fsgridIds) { + std::array* thisCellData = &transferBuffer[k++]; + volumeFieldsGrid.transferDataOut(fsgridId, thisCellData); + } } // Do the transfer volumeFieldsGrid.finishTransfersOut(); + // Build a list of index pairs to cellparams and fsgrid + std::vector> iCellParams; + iCellParams.reserve(6); + iCellParams.push_back(std::make_pair(CellParams::PERBXVOL, fsgrids::volfields::PERBXVOL)); + iCellParams.push_back(std::make_pair(CellParams::PERBYVOL, fsgrids::volfields::PERBYVOL)); + iCellParams.push_back(std::make_pair(CellParams::PERBZVOL, fsgrids::volfields::PERBZVOL)); + iCellParams.push_back(std::make_pair(CellParams::EXVOL, fsgrids::volfields::EXVOL)); + iCellParams.push_back(std::make_pair(CellParams::EYVOL, fsgrids::volfields::EYVOL)); + iCellParams.push_back(std::make_pair(CellParams::EZVOL, fsgrids::volfields::EZVOL)); + + // Build lists of index pairs to dccrg and fsgrid + std::vector> iDerivativesBVOL; + iDerivativesBVOL.reserve(6); + iDerivativesBVOL.push_back(std::make_pair(bvolderivatives::dPERBXVOLdy, fsgrids::volfields::dPERBXVOLdy)); + iDerivativesBVOL.push_back(std::make_pair(bvolderivatives::dPERBXVOLdz, fsgrids::volfields::dPERBXVOLdz)); + iDerivativesBVOL.push_back(std::make_pair(bvolderivatives::dPERBYVOLdx, fsgrids::volfields::dPERBYVOLdx)); + iDerivativesBVOL.push_back(std::make_pair(bvolderivatives::dPERBYVOLdz, fsgrids::volfields::dPERBYVOLdz)); + iDerivativesBVOL.push_back(std::make_pair(bvolderivatives::dPERBZVOLdx, fsgrids::volfields::dPERBZVOLdx)); + iDerivativesBVOL.push_back(std::make_pair(bvolderivatives::dPERBZVOLdy, fsgrids::volfields::dPERBZVOLdy)); + // Distribute data from the transfer buffer back into the appropriate mpiGrid places #pragma omp parallel for - for(int i=0; i< cells.size(); i++) { - std::array* thisCellData = &transferBuffer[i]; - auto cellParams = mpiGrid[cells[i]]->get_cell_parameters(); - - cellParams[CellParams::PERBXVOL] = thisCellData->at(fsgrids::volfields::PERBXVOL); - cellParams[CellParams::PERBYVOL] = thisCellData->at(fsgrids::volfields::PERBYVOL); - cellParams[CellParams::PERBZVOL] = thisCellData->at(fsgrids::volfields::PERBZVOL); - cellParams[CellParams::EXVOL] = thisCellData->at(fsgrids::volfields::EXVOL); - cellParams[CellParams::EYVOL] = thisCellData->at(fsgrids::volfields::EYVOL); - cellParams[CellParams::EZVOL] = thisCellData->at(fsgrids::volfields::EZVOL); - mpiGrid[cells[i]]->derivativesBVOL[bvolderivatives::dPERBXVOLdy] = thisCellData->at(fsgrids::volfields::dPERBXVOLdy); - mpiGrid[cells[i]]->derivativesBVOL[bvolderivatives::dPERBXVOLdz] = thisCellData->at(fsgrids::volfields::dPERBXVOLdz); - mpiGrid[cells[i]]->derivativesBVOL[bvolderivatives::dPERBYVOLdx] = thisCellData->at(fsgrids::volfields::dPERBYVOLdx); - mpiGrid[cells[i]]->derivativesBVOL[bvolderivatives::dPERBYVOLdz] = thisCellData->at(fsgrids::volfields::dPERBYVOLdz); - mpiGrid[cells[i]]->derivativesBVOL[bvolderivatives::dPERBZVOLdx] = thisCellData->at(fsgrids::volfields::dPERBZVOLdx); - mpiGrid[cells[i]]->derivativesBVOL[bvolderivatives::dPERBZVOLdy] = thisCellData->at(fsgrids::volfields::dPERBZVOLdy); - } + for(uint i = 0; i < cells.size(); ++i) { + + int dccrgId = cells[i]; + auto cellParams = mpiGrid[dccrgId]->get_cell_parameters(); + + // Calculate the number of fsgrid cells we need to average into the current dccrg cell + auto refLvl = mpiGrid.mapping.get_refinement_level(dccrgId); + int nCells = pow(pow(2,mpiGrid.mapping.get_maximum_refinement_level() - mpiGrid.mapping.get_refinement_level(dccrgId)),3); + + // TODO: Could optimize here by adding a separate branch for nCells == 1 with direct assignment of the value + // Could also do the average in a temporary value and only access grid structure once. + + // Initialize values to 0 + for (auto j : iCellParams) cellParams[j.first] = 0.0; + for (auto j : iDerivativesBVOL) mpiGrid[dccrgId]->derivativesBVOL[j.first] = 0.0; + + for(int iCell = 0; iCell < nCells; ++iCell) { + // The fsgrid cells that cover the i'th dccrg cell are pointed at by + // transferBufferPointer[i] ... transferBufferPointer[i] + nCell. We want to average + // over all of them to get the value for the dccrg cell + std::array* thisCellData = transferBufferPointer[i] + iCell; + + for (auto j : iCellParams) cellParams[j.first] += thisCellData->at(j.second); + for (auto j : iDerivativesBVOL) mpiGrid[dccrgId]->derivativesBVOL[j.first] += thisCellData->at(j.second); + } + // Divide by the number of cells to get the average + for (auto j : iCellParams) cellParams[j.first] /= nCells; + for (auto j : iDerivativesBVOL) mpiGrid[dccrgId]->derivativesBVOL[j.first] /= nCells; + + } } @@ -144,119 +191,156 @@ void getDerivativesFromFsGrid(FsGrid< std::array, const std::vector& cells) { // Setup transfer buffers - std::vector< std::array > dperbTransferBuffer(cells.size()); - std::vector< std::array > dmomentsTransferBuffer(cells.size()); - std::vector< std::array > bgbfieldTransferBuffer(cells.size()); - - // Transfer dperbGrid data - dperbGrid.setupForTransferOut(cells.size()); - for(int i=0; i< cells.size(); i++) { - std::array* thisCellData = &dperbTransferBuffer[i]; - dperbGrid.transferDataOut(cells[i] - 1, thisCellData); + int fsgridSize = cells.size() * pow(pow(2,mpiGrid.mapping.get_maximum_refinement_level()),3); + std::vector< std::array > dperbTransferBuffer(fsgridSize); + std::vector< std::array > dmomentsTransferBuffer(fsgridSize); + std::vector< std::array > bgbfieldTransferBuffer(fsgridSize); + + std::vector< std::array*> dperbTransferBufferPointer; + std::vector< std::array*> dmomentsTransferBufferPointer; + std::vector< std::array*> bgbfieldTransferBufferPointer; + + dperbGrid.setupForTransferOut(fsgridSize); + dmomentsGrid.setupForTransferOut(fsgridSize); + bgbfieldGrid.setupForTransferOut(fsgridSize); + + int k = 0; + for (auto dccrgId : cells) { + + // Assuming same local size in all fsgrids + const auto fsgridIds = mapDccrgIdToFsGrid(mpiGrid, dperbGrid.getLocalSize(), dccrgId); + // Store a pointer to the first fsgrid cell that maps to each dccrg Id + dperbTransferBufferPointer.push_back(&dperbTransferBuffer[k]); + dmomentsTransferBufferPointer.push_back(&dmomentsTransferBuffer[k]); + bgbfieldTransferBufferPointer.push_back(&bgbfieldTransferBuffer[k]); + + for (auto fsgridId : fsgridIds) { + + std::array* dperbCellData = &dperbTransferBuffer[k]; + dperbGrid.transferDataOut(fsgridId, dperbCellData); + std::array* dmomentsCellData = &dmomentsTransferBuffer[k]; + dmomentsGrid.transferDataOut(fsgridId, dmomentsCellData); + std::array* bgbfieldCellData = &bgbfieldTransferBuffer[k++]; + bgbfieldGrid.transferDataOut(fsgridId, bgbfieldCellData); + } } + // Do the transfer dperbGrid.finishTransfersOut(); - - // Transfer dmomentsGrid data - dmomentsGrid.setupForTransferOut(cells.size()); - for(int i=0; i< cells.size(); i++) { - std::array* thisCellData = &dmomentsTransferBuffer[i]; - dmomentsGrid.transferDataOut(cells[i] - 1, thisCellData); - } - // Do the transfer dmomentsGrid.finishTransfersOut(); - - // Transfer bgbfieldGrid data - bgbfieldGrid.setupForTransferOut(cells.size()); - for(int i=0; i< cells.size(); i++) { - std::array* thisCellData = &bgbfieldTransferBuffer[i]; - bgbfieldGrid.transferDataOut(cells[i] - 1, thisCellData); - } - // Do the transfer bgbfieldGrid.finishTransfersOut(); + std::vector> iDmoments; + std::vector> iDperb; + std::vector> iBgbfield; + iDmoments.reserve(24); + iDmoments.push_back(std::make_pair(fieldsolver::drhomdx, fsgrids::dmoments::drhomdx)); + iDmoments.push_back(std::make_pair(fieldsolver::drhomdy, fsgrids::dmoments::drhomdy)); + iDmoments.push_back(std::make_pair(fieldsolver::drhomdz, fsgrids::dmoments::drhomdz)); + iDmoments.push_back(std::make_pair(fieldsolver::drhoqdx, fsgrids::dmoments::drhoqdx)); + iDmoments.push_back(std::make_pair(fieldsolver::drhoqdy, fsgrids::dmoments::drhoqdy)); + iDmoments.push_back(std::make_pair(fieldsolver::drhoqdz, fsgrids::dmoments::drhoqdz)); + iDmoments.push_back(std::make_pair(fieldsolver::dp11dx , fsgrids::dmoments::dp11dx )); + iDmoments.push_back(std::make_pair(fieldsolver::dp11dy , fsgrids::dmoments::dp11dy )); + iDmoments.push_back(std::make_pair(fieldsolver::dp11dz , fsgrids::dmoments::dp11dz )); + iDmoments.push_back(std::make_pair(fieldsolver::dp22dx , fsgrids::dmoments::dp22dx )); + iDmoments.push_back(std::make_pair(fieldsolver::dp22dy , fsgrids::dmoments::dp22dy )); + iDmoments.push_back(std::make_pair(fieldsolver::dp22dz , fsgrids::dmoments::dp22dz )); + iDmoments.push_back(std::make_pair(fieldsolver::dp33dx , fsgrids::dmoments::dp33dx )); + iDmoments.push_back(std::make_pair(fieldsolver::dp33dy , fsgrids::dmoments::dp33dy )); + iDmoments.push_back(std::make_pair(fieldsolver::dp33dz , fsgrids::dmoments::dp33dz )); + iDmoments.push_back(std::make_pair(fieldsolver::dVxdx , fsgrids::dmoments::dVxdx )); + iDmoments.push_back(std::make_pair(fieldsolver::dVxdy , fsgrids::dmoments::dVxdy )); + iDmoments.push_back(std::make_pair(fieldsolver::dVxdz , fsgrids::dmoments::dVxdz )); + iDmoments.push_back(std::make_pair(fieldsolver::dVydx , fsgrids::dmoments::dVydx )); + iDmoments.push_back(std::make_pair(fieldsolver::dVydy , fsgrids::dmoments::dVydy )); + iDmoments.push_back(std::make_pair(fieldsolver::dVydz , fsgrids::dmoments::dVydz )); + iDmoments.push_back(std::make_pair(fieldsolver::dVzdx , fsgrids::dmoments::dVzdx )); + iDmoments.push_back(std::make_pair(fieldsolver::dVzdy , fsgrids::dmoments::dVzdy )); + iDmoments.push_back(std::make_pair(fieldsolver::dVzdz , fsgrids::dmoments::dVzdz )); + + iDperb.reserve(15); + iDperb.push_back(std::make_pair(fieldsolver::dPERBxdy , fsgrids::dperb::dPERBxdy )); + iDperb.push_back(std::make_pair(fieldsolver::dPERBxdz , fsgrids::dperb::dPERBxdz )); + iDperb.push_back(std::make_pair(fieldsolver::dPERBydx , fsgrids::dperb::dPERBydx )); + iDperb.push_back(std::make_pair(fieldsolver::dPERBydz , fsgrids::dperb::dPERBydz )); + iDperb.push_back(std::make_pair(fieldsolver::dPERBzdx , fsgrids::dperb::dPERBzdx )); + iDperb.push_back(std::make_pair(fieldsolver::dPERBzdy , fsgrids::dperb::dPERBzdy )); + iDperb.push_back(std::make_pair(fieldsolver::dPERBxdyy, fsgrids::dperb::dPERBxdyy)); + iDperb.push_back(std::make_pair(fieldsolver::dPERBxdzz, fsgrids::dperb::dPERBxdzz)); + iDperb.push_back(std::make_pair(fieldsolver::dPERBydxx, fsgrids::dperb::dPERBydxx)); + iDperb.push_back(std::make_pair(fieldsolver::dPERBydzz, fsgrids::dperb::dPERBydzz)); + iDperb.push_back(std::make_pair(fieldsolver::dPERBzdxx, fsgrids::dperb::dPERBzdxx)); + iDperb.push_back(std::make_pair(fieldsolver::dPERBzdyy, fsgrids::dperb::dPERBzdyy)); + iDperb.push_back(std::make_pair(fieldsolver::dPERBxdyz, fsgrids::dperb::dPERBxdyz)); + iDperb.push_back(std::make_pair(fieldsolver::dPERBydxz, fsgrids::dperb::dPERBydxz)); + iDperb.push_back(std::make_pair(fieldsolver::dPERBzdxy, fsgrids::dperb::dPERBzdxy)); + + iBgbfield.reserve(6); + iBgbfield.push_back(std::make_pair(fieldsolver::dBGBxdy, fsgrids::bgbfield::dBGBxdy)); + iBgbfield.push_back(std::make_pair(fieldsolver::dBGBxdz, fsgrids::bgbfield::dBGBxdz)); + iBgbfield.push_back(std::make_pair(fieldsolver::dBGBydx, fsgrids::bgbfield::dBGBydx)); + iBgbfield.push_back(std::make_pair(fieldsolver::dBGBydz, fsgrids::bgbfield::dBGBydz)); + iBgbfield.push_back(std::make_pair(fieldsolver::dBGBzdx, fsgrids::bgbfield::dBGBzdx)); + iBgbfield.push_back(std::make_pair(fieldsolver::dBGBzdy, fsgrids::bgbfield::dBGBzdy)); + // Distribute data from the transfer buffers back into the appropriate mpiGrid places #pragma omp parallel for - for(int i=0; i< cells.size(); i++) { - std::array* dperb = &dperbTransferBuffer[i]; - std::array* dmoments = &dmomentsTransferBuffer[i]; - std::array* bgbfield = &bgbfieldTransferBuffer[i]; - auto cellParams = mpiGrid[cells[i]]->get_cell_parameters(); - - mpiGrid[cells[i]]->derivatives[fieldsolver::drhomdx] = dmoments->at(fsgrids::dmoments::drhomdx); - mpiGrid[cells[i]]->derivatives[fieldsolver::drhomdy] = dmoments->at(fsgrids::dmoments::drhomdy); - mpiGrid[cells[i]]->derivatives[fieldsolver::drhomdz] = dmoments->at(fsgrids::dmoments::drhomdz); - mpiGrid[cells[i]]->derivatives[fieldsolver::drhoqdx] = dmoments->at(fsgrids::dmoments::drhoqdx); - mpiGrid[cells[i]]->derivatives[fieldsolver::drhoqdy] = dmoments->at(fsgrids::dmoments::drhoqdy); - mpiGrid[cells[i]]->derivatives[fieldsolver::drhoqdz] = dmoments->at(fsgrids::dmoments::drhoqdz); - mpiGrid[cells[i]]->derivatives[fieldsolver::dp11dx] = dmoments->at(fsgrids::dmoments::dp11dx); - mpiGrid[cells[i]]->derivatives[fieldsolver::dp11dy] = dmoments->at(fsgrids::dmoments::dp11dy); - mpiGrid[cells[i]]->derivatives[fieldsolver::dp11dz] = dmoments->at(fsgrids::dmoments::dp11dz); - mpiGrid[cells[i]]->derivatives[fieldsolver::dp22dx] = dmoments->at(fsgrids::dmoments::dp22dx); - mpiGrid[cells[i]]->derivatives[fieldsolver::dp22dy] = dmoments->at(fsgrids::dmoments::dp22dy); - mpiGrid[cells[i]]->derivatives[fieldsolver::dp22dz] = dmoments->at(fsgrids::dmoments::dp22dz); - mpiGrid[cells[i]]->derivatives[fieldsolver::dp33dx] = dmoments->at(fsgrids::dmoments::dp33dx); - mpiGrid[cells[i]]->derivatives[fieldsolver::dp33dy] = dmoments->at(fsgrids::dmoments::dp33dy); - mpiGrid[cells[i]]->derivatives[fieldsolver::dp33dz] = dmoments->at(fsgrids::dmoments::dp33dz); - - mpiGrid[cells[i]]->derivatives[fieldsolver::dVxdx] = dmoments->at(fsgrids::dmoments::dVxdx); - mpiGrid[cells[i]]->derivatives[fieldsolver::dVxdy] = dmoments->at(fsgrids::dmoments::dVxdy); - mpiGrid[cells[i]]->derivatives[fieldsolver::dVxdz] = dmoments->at(fsgrids::dmoments::dVxdz); - mpiGrid[cells[i]]->derivatives[fieldsolver::dVydx] = dmoments->at(fsgrids::dmoments::dVydx); - mpiGrid[cells[i]]->derivatives[fieldsolver::dVydy] = dmoments->at(fsgrids::dmoments::dVydy); - mpiGrid[cells[i]]->derivatives[fieldsolver::dVydz] = dmoments->at(fsgrids::dmoments::dVydz); - mpiGrid[cells[i]]->derivatives[fieldsolver::dVzdx] = dmoments->at(fsgrids::dmoments::dVzdx); - mpiGrid[cells[i]]->derivatives[fieldsolver::dVzdy] = dmoments->at(fsgrids::dmoments::dVzdy); - mpiGrid[cells[i]]->derivatives[fieldsolver::dVzdz] = dmoments->at(fsgrids::dmoments::dVzdz); - - mpiGrid[cells[i]]->derivatives[fieldsolver::dPERBxdy] = dperb->at(fsgrids::dperb::dPERBxdy); - mpiGrid[cells[i]]->derivatives[fieldsolver::dPERBxdz] = dperb->at(fsgrids::dperb::dPERBxdz); - mpiGrid[cells[i]]->derivatives[fieldsolver::dPERBydx] = dperb->at(fsgrids::dperb::dPERBydx); - mpiGrid[cells[i]]->derivatives[fieldsolver::dPERBydz] = dperb->at(fsgrids::dperb::dPERBydz); - mpiGrid[cells[i]]->derivatives[fieldsolver::dPERBzdx] = dperb->at(fsgrids::dperb::dPERBzdx); - mpiGrid[cells[i]]->derivatives[fieldsolver::dPERBzdy] = dperb->at(fsgrids::dperb::dPERBzdy); - - mpiGrid[cells[i]]->derivatives[fieldsolver::dPERBxdyy] = dperb->at(fsgrids::dperb::dPERBxdyy); - mpiGrid[cells[i]]->derivatives[fieldsolver::dPERBxdzz] = dperb->at(fsgrids::dperb::dPERBxdzz); - mpiGrid[cells[i]]->derivatives[fieldsolver::dPERBydxx] = dperb->at(fsgrids::dperb::dPERBydxx); - mpiGrid[cells[i]]->derivatives[fieldsolver::dPERBydzz] = dperb->at(fsgrids::dperb::dPERBydzz); - mpiGrid[cells[i]]->derivatives[fieldsolver::dPERBzdxx] = dperb->at(fsgrids::dperb::dPERBzdxx); - mpiGrid[cells[i]]->derivatives[fieldsolver::dPERBzdyy] = dperb->at(fsgrids::dperb::dPERBzdyy); - mpiGrid[cells[i]]->derivatives[fieldsolver::dPERBxdyz] = dperb->at(fsgrids::dperb::dPERBxdyz); - mpiGrid[cells[i]]->derivatives[fieldsolver::dPERBydxz] = dperb->at(fsgrids::dperb::dPERBydxz); - mpiGrid[cells[i]]->derivatives[fieldsolver::dPERBzdxy] = dperb->at(fsgrids::dperb::dPERBzdxy); - - mpiGrid[cells[i]]->derivatives[fieldsolver::dBGBxdy] = bgbfield->at(fsgrids::bgbfield::dBGBxdy); - mpiGrid[cells[i]]->derivatives[fieldsolver::dBGBxdz] = bgbfield->at(fsgrids::bgbfield::dBGBxdz); - mpiGrid[cells[i]]->derivatives[fieldsolver::dBGBydx] = bgbfield->at(fsgrids::bgbfield::dBGBydx); - mpiGrid[cells[i]]->derivatives[fieldsolver::dBGBydz] = bgbfield->at(fsgrids::bgbfield::dBGBydz); - mpiGrid[cells[i]]->derivatives[fieldsolver::dBGBzdx] = bgbfield->at(fsgrids::bgbfield::dBGBzdx); - mpiGrid[cells[i]]->derivatives[fieldsolver::dBGBzdy] = bgbfield->at(fsgrids::bgbfield::dBGBzdy); - } + for(uint i = 0; i < cells.size(); ++i) { + + int dccrgId = cells[i]; + + // Calculate the number of fsgrid cells we need to average into the current dccrg cell + auto refLvl = mpiGrid.mapping.get_refinement_level(dccrgId); + int nCells = pow(pow(2,mpiGrid.mapping.get_maximum_refinement_level() - mpiGrid.mapping.get_refinement_level(dccrgId)),3); + + for (auto j : iDmoments) mpiGrid[dccrgId]->derivatives[j.first] = 0.0; + for (auto j : iDperb ) mpiGrid[dccrgId]->derivatives[j.first] = 0.0; + for (auto j : iBgbfield) mpiGrid[dccrgId]->derivatives[j.first] = 0.0; + + for(int iCell = 0; iCell < nCells; ++iCell) { + // The fsgrid cells that cover the i'th dccrg cell are pointed at by + // transferBufferPointer[i] ... transferBufferPointer[i] + nCell. We want to average + // over all of them to get the value for the dccrg cell + + std::array* dperb = dperbTransferBufferPointer[i] + iCell; + std::array* dmoments = dmomentsTransferBufferPointer[i] + iCell; + std::array* bgbfield = bgbfieldTransferBufferPointer[i] + iCell; + + for (auto j : iDmoments) mpiGrid[dccrgId]->derivatives[j.first] += dmoments->at(j.second); + for (auto j : iDperb ) mpiGrid[dccrgId]->derivatives[j.first] += dperb ->at(j.second); + for (auto j : iBgbfield) mpiGrid[dccrgId]->derivatives[j.first] += bgbfield->at(j.second); + } + for (auto j : iDmoments) mpiGrid[dccrgId]->derivatives[j.first] /= nCells; + for (auto j : iDperb ) mpiGrid[dccrgId]->derivatives[j.first] /= nCells; + for (auto j : iBgbfield) mpiGrid[dccrgId]->derivatives[j.first] /= nCells; + + } } void setupTechnicalFsGrid(dccrg::Dccrg& mpiGrid, const std::vector& cells, FsGrid< fsgrids::technical, 2>& technicalGrid) { - technicalGrid.setupForTransferIn(cells.size()); + int fsgridSize = cells.size() * pow(pow(2,mpiGrid.mapping.get_maximum_refinement_level()),3); + + technicalGrid.setupForTransferIn(fsgridSize); - // Fill the transfer buffers from the spatial cell structs - std::vector transferBuffer(cells.size()); + //#pragma omp parallel for + for(auto dccrgId : cells) { - #pragma omp parallel for - for(int i=0; i< cells.size(); i++) { + fsgrids::technical* thisCellData; + const auto fsgridIds = mapDccrgIdToFsGrid(mpiGrid, technicalGrid.getLocalSize(), dccrgId); - fsgrids::technical* thisCellData = &transferBuffer[i]; - // Data needs to be collected from some different places for this grid. - thisCellData->sysBoundaryFlag = mpiGrid[cells[i]]->sysBoundaryFlag; - thisCellData->sysBoundaryLayer = mpiGrid[cells[i]]->sysBoundaryLayer; - //thisCellData->maxFsDt = mpiGrid[i]->get_cell_parameters()[CellParams::MAXFDT]; - thisCellData->maxFsDt = std::numeric_limits::max(); - } - for(int i=0; i< cells.size(); i++) { - technicalGrid.transferDataIn(cells[i] - 1,&transferBuffer[i]); + for (auto fsgridId : fsgridIds) { + // Data needs to be collected from some different places for this grid. + thisCellData->sysBoundaryFlag = mpiGrid[dccrgId]->sysBoundaryFlag; + thisCellData->sysBoundaryLayer = mpiGrid[dccrgId]->sysBoundaryLayer; + thisCellData->maxFsDt = std::numeric_limits::max(); + + technicalGrid.transferDataIn(fsgridId,thisCellData); + } } technicalGrid.finishTransfersIn(); @@ -288,7 +372,7 @@ void getFsGridMaxDt(FsGrid< fsgrids::technical, 2>& technicalGrid, } /* -Map from fsgrid cell ids to dccrg cell ids when they aren't identical (ie. when dccrg has refinement). +Map from fsgrid cell id to dccrg cell id when they aren't identical (ie. when dccrg has refinement). */ CellID mapFsGridIdToDccrg(FsGrid< fsgrids::technical, 2>& technicalGrid, dccrg::Dccrg& mpiGrid, @@ -309,20 +393,37 @@ CellID mapFsGridIdToDccrg(FsGrid< fsgrids::technical, 2>& technicalGrid, } /* -Map from dccrg cell ids to fsgrid cell ids when they aren't identical (ie. when dccrg has refinement). +Map from dccrg cell id to fsgrid cell ids when they aren't identical (ie. when dccrg has refinement). */ -CellID mapDccrgIdToFsGrid(FsGrid< fsgrids::technical, 2>& technicalGrid, - dccrg::Dccrg& mpiGrid, - CellID dccrgID) { - auto indices = mpiGrid.mapping.get_indices(dccrgID); +std::vector mapDccrgIdToFsGrid(dccrg::Dccrg& mpiGrid, + std::array fsgridDims, CellID dccrgID) { + const auto cellLength = mpiGrid.mapping.get_cell_length_in_indices(dccrgID); + const auto gridLength = mpiGrid.length.get(); + const auto maxRefLvl = mpiGrid.mapping.get_maximum_refinement_level(); + + std::vector> indices; + for (uint i = 0; i < cellLength; ++i) { + for (uint j = 0; j < cellLength; ++j) { + for (uint k = 0; k < cellLength; ++k) { + CellID id = dccrgID + i + + (j * gridLength[0] * pow(2,maxRefLvl)) + + (k * gridLength[1] * pow(2,maxRefLvl) * gridLength[0] * pow(2,maxRefLvl)); + auto ids = mpiGrid.mapping.get_indices(id); + std::array cellCoord; + for (uint m = 0; m < 3; m++) { + cellCoord[m] = static_cast(ids[m]); + } + indices.push_back(cellCoord); + } + } + } + std::vector fsgridIDs; // The indices we get from dccrg are directly coordinates at the finest refinement level. // Therefore, they should match fsgrid coordinates exactly. - std::array cellCoord; - for (uint i = 0;i < 3; ++i) { - cellCoord[i] = indices[i]; + for (auto cellCoord: indices) { + fsgridIDs.push_back(cellCoord[0] + cellCoord[1] * fsgridDims[0] + cellCoord[2] * fsgridDims[1]); } - CellID fsgridID = technicalGrid.cellCoordtoGlobalID(cellCoord); - return fsgridID; + return fsgridIDs; } diff --git a/fieldsolver/gridGlue.hpp b/fieldsolver/gridGlue.hpp index 48e793053..b0ae25378 100644 --- a/fieldsolver/gridGlue.hpp +++ b/fieldsolver/gridGlue.hpp @@ -101,6 +101,40 @@ template< unsigned int numFields > void feedFieldDataIntoFsGrid( targetGrid.finishTransfersIn(); } +/*! Transfer field data from DCCRG cellparams into the appropriate FsGrid structure + * \param mpiGrid The DCCRG grid carrying fieldparam data + * \param cells List of local cells + * \param index Index into the cellparams array from which to copy + * \param targetGrid Fieldsolver grid for these quantities + * + * The cellparams with indices from index to index+numFields are copied over, and + * have to be continuous in memory. + * + * This function assumes that proper grid coupling has been set up. + */ + +template< unsigned int numFields > void feedFieldDataIntoFsGridAmr( + dccrg::Dccrg& mpiGrid, + const std::vector& cells, int cellParamsIndex, + FsGrid< std::array, 2>& targetGrid) { + + targetGrid.setupForTransferIn(cells.size() * pow(2,mpiGrid.mapping.get_maximum_refinement_level())); + + for(CellID dccrgId : cells) { + const auto fsgridIds = mapDccrgIdToFsGrid(mpiGrid, targetGrid.getLocalSize(), dccrgId); + // TODO: This assumes that the field data are lying continuous in memory. + // Check definition of CellParams in common.h if unsure. + std::array* cellDataPointer = reinterpret_cast*>( + &(mpiGrid[dccrgId]->get_cell_parameters()[cellParamsIndex])); + for (auto fsgridId : fsgridIds) { + targetGrid.transferDataIn(fsgridId, cellDataPointer); + } + } + + targetGrid.finishTransfersIn(); +} + + /*! Transfer field data from an FsGrid back into the appropriate CellParams slot in DCCRG * \param sourceGrid Fieldsolver grid for these quantities * \param mpiGrid The DCCRG grid carrying fieldparam data @@ -130,3 +164,10 @@ template< unsigned int numFields > void getFieldDataFromFsGrid( sourceGrid.finishTransfersOut(); } +std::vector mapDccrgIdToFsGrid(dccrg::Dccrg& mpiGrid, + std::array fsgridDims, CellID dccrgID); + +CellID mapFsGridIdToDccrg(FsGrid< fsgrids::technical, 2>& technicalGrid, + dccrg::Dccrg& mpiGrid, + CellID fsgridID); + diff --git a/vlasiator.cpp b/vlasiator.cpp index 36a36a5f2..950980301 100644 --- a/vlasiator.cpp +++ b/vlasiator.cpp @@ -401,8 +401,8 @@ int main(int argn,char* args[]) { convert(P::zcells_ini) * pow(2,P::amrMaxSpatialRefLevel)}; const int fsGridSize = fsGridDimensions[0] * fsGridDimensions[1] * fsGridDimensions[2]; std::array periodicity{mpiGrid.topology.is_periodic(0), - mpiGrid.topology.is_periodic(1), - mpiGrid.topology.is_periodic(2)}; + mpiGrid.topology.is_periodic(1), + mpiGrid.topology.is_periodic(2)}; FsGrid< std::array, 2> perBGrid(fsGridDimensions, comm, periodicity); FsGrid< std::array, 2> perBDt2Grid(fsGridDimensions, comm, periodicity); FsGrid< std::array, 2> EGrid(fsGridDimensions, comm, periodicity); @@ -434,49 +434,49 @@ int main(int argn,char* args[]) { // Couple FSGrids to mpiGrid // TODO: Do we really need to couple *all* of these fields? - perBGrid.setupForGridCoupling(fsGridSize); - perBDt2Grid.setupForGridCoupling(fsGridSize); - EGrid.setupForGridCoupling(fsGridSize); - EDt2Grid.setupForGridCoupling(fsGridSize); - EHallGrid.setupForGridCoupling(fsGridSize); - EGradPeGrid.setupForGridCoupling(fsGridSize); - momentsGrid.setupForGridCoupling(fsGridSize); + perBGrid. setupForGridCoupling(fsGridSize); + perBDt2Grid. setupForGridCoupling(fsGridSize); + EGrid. setupForGridCoupling(fsGridSize); + EDt2Grid. setupForGridCoupling(fsGridSize); + EHallGrid. setupForGridCoupling(fsGridSize); + EGradPeGrid. setupForGridCoupling(fsGridSize); + momentsGrid. setupForGridCoupling(fsGridSize); momentsDt2Grid.setupForGridCoupling(fsGridSize); - dPerBGrid.setupForGridCoupling(fsGridSize); - dMomentsGrid.setupForGridCoupling(fsGridSize); - BgBGrid.setupForGridCoupling(fsGridSize); - volGrid.setupForGridCoupling(fsGridSize); - technicalGrid.setupForGridCoupling(fsGridSize); + dPerBGrid. setupForGridCoupling(fsGridSize); + dMomentsGrid. setupForGridCoupling(fsGridSize); + BgBGrid. setupForGridCoupling(fsGridSize); + volGrid. setupForGridCoupling(fsGridSize); + technicalGrid. setupForGridCoupling(fsGridSize); // FSGrid cellIds are 0-based, whereas DCCRG cellIds are 1-based, beware - for(auto& cellId : cells) { - perBGrid.setGridCoupling(cellId-1, myRank); - perBDt2Grid.setGridCoupling(cellId-1, myRank); - EGrid.setGridCoupling(cellId-1, myRank); - EDt2Grid.setGridCoupling(cellId-1, myRank); - EHallGrid.setGridCoupling(cellId-1, myRank); - EGradPeGrid.setGridCoupling(cellId-1, myRank); - momentsGrid.setGridCoupling(cellId-1, myRank); - momentsDt2Grid.setGridCoupling(cellId-1, myRank); - dPerBGrid.setGridCoupling(cellId-1, myRank); - dMomentsGrid.setGridCoupling(cellId-1, myRank); - BgBGrid.setGridCoupling(cellId-1, myRank); - volGrid.setGridCoupling(cellId-1, myRank); - technicalGrid.setGridCoupling(cellId-1, myRank); + for(int cellId = 0;cellId < fsGridSize; ++cellId) { + perBGrid. setGridCoupling(cellId, myRank); + perBDt2Grid. setGridCoupling(cellId, myRank); + EGrid. setGridCoupling(cellId, myRank); + EDt2Grid. setGridCoupling(cellId, myRank); + EHallGrid. setGridCoupling(cellId, myRank); + EGradPeGrid. setGridCoupling(cellId, myRank); + momentsGrid. setGridCoupling(cellId, myRank); + momentsDt2Grid.setGridCoupling(cellId, myRank); + dPerBGrid. setGridCoupling(cellId, myRank); + dMomentsGrid. setGridCoupling(cellId, myRank); + BgBGrid. setGridCoupling(cellId, myRank); + volGrid. setGridCoupling(cellId, myRank); + technicalGrid. setGridCoupling(cellId, myRank); } - perBGrid.finishGridCoupling(); - perBDt2Grid.finishGridCoupling(); - EGrid.finishGridCoupling(); - EDt2Grid.finishGridCoupling(); - EHallGrid.finishGridCoupling(); - EGradPeGrid.finishGridCoupling(); - momentsGrid.finishGridCoupling(); + perBGrid. finishGridCoupling(); + perBDt2Grid. finishGridCoupling(); + EGrid. finishGridCoupling(); + EDt2Grid. finishGridCoupling(); + EHallGrid. finishGridCoupling(); + EGradPeGrid. finishGridCoupling(); + momentsGrid. finishGridCoupling(); momentsDt2Grid.finishGridCoupling(); - dPerBGrid.finishGridCoupling(); - dMomentsGrid.finishGridCoupling(); - BgBGrid.finishGridCoupling(); - volGrid.finishGridCoupling(); - technicalGrid.finishGridCoupling(); + dPerBGrid. finishGridCoupling(); + dMomentsGrid. finishGridCoupling(); + BgBGrid. finishGridCoupling(); + volGrid. finishGridCoupling(); + technicalGrid. finishGridCoupling(); phiprof::stop("Initial fsgrid coupling"); // Transfer initial field configuration into the FsGrids From 2419b0a462295a11219838833fdb4bd63373e140 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Wed, 3 Oct 2018 10:42:38 +0300 Subject: [PATCH 074/602] Added a split() function to setOfPencils that will be used when checking refinement of ghost cells in trans_map_1d_amr. --- fieldsolver/gridGlue.cpp | 34 +++++------ vlasovsolver/cpu_trans_map_amr.cpp | 96 ++++++++++++++++++++++++++---- vlasovsolver/cpu_trans_map_amr.hpp | 15 +++++ 3 files changed, 117 insertions(+), 28 deletions(-) diff --git a/fieldsolver/gridGlue.cpp b/fieldsolver/gridGlue.cpp index 7b6337974..ac568e4f3 100644 --- a/fieldsolver/gridGlue.cpp +++ b/fieldsolver/gridGlue.cpp @@ -308,21 +308,21 @@ CellID mapFsGridIdToDccrg(FsGrid< fsgrids::technical, 2>& technicalGrid, return dccrgID; } -/* -Map from dccrg cell ids to fsgrid cell ids when they aren't identical (ie. when dccrg has refinement). -*/ - -CellID mapDccrgIdToFsGrid(FsGrid< fsgrids::technical, 2>& technicalGrid, - dccrg::Dccrg& mpiGrid, - CellID dccrgID) { - auto indices = mpiGrid.mapping.get_indices(dccrgID); - // The indices we get from dccrg are directly coordinates at the finest refinement level. - // Therefore, they should match fsgrid coordinates exactly. - std::array cellCoord; - for (uint i = 0;i < 3; ++i) { - cellCoord[i] = indices[i]; - } - CellID fsgridID = technicalGrid.cellCoordtoGlobalID(cellCoord); - return fsgridID; -} +// /* +// Map from dccrg cell ids to fsgrid cell ids when they aren't identical (ie. when dccrg has refinement). +// */ + +// CellID mapDccrgIdToFsGrid(FsGrid< fsgrids::technical, 2>& technicalGrid, +// dccrg::Dccrg& mpiGrid, +// CellID dccrgID) { +// auto indices = mpiGrid.mapping.get_indices(dccrgID); +// // The indices we get from dccrg are directly coordinates at the finest refinement level. +// // Therefore, they should match fsgrid coordinates exactly. +// std::array cellCoord; +// for (uint i = 0;i < 3; ++i) { +// cellCoord[i] = indices[i]; +// } +// CellID fsgridID = technicalGrid.cellCoordtoGlobalID(cellCoord); +// return fsgridID; +// } diff --git a/vlasovsolver/cpu_trans_map_amr.cpp b/vlasovsolver/cpu_trans_map_amr.cpp index ece287477..70799a379 100644 --- a/vlasovsolver/cpu_trans_map_amr.cpp +++ b/vlasovsolver/cpu_trans_map_amr.cpp @@ -614,6 +614,60 @@ void copy_trans_block_data_amr( } } +/* +Check whether the ghost cells around the pencil contain higher refinement than the pencil does. +If they do, the pencil must be split to match the finest refined ghost cell. This function checks +One neighbor pair, but takes as an argument the offset from the pencil. Call multiple times for +Multiple ghost cells. + */ +void check_ghost_cells(const dccrg::Dccrg& mpiGrid, + vector& pencilSets, + uint dimension, + uint offset) { + + uint neighborhoodId; + switch (dimension) { + case 0: + neighborhoodId = VLASOV_SOLVER_X_NEIGHBORHOOD_ID; + break; + case 1: + neighborhoodId = VLASOV_SOLVER_Y_NEIGHBORHOOD_ID; + break; + case 2: + neighborhoodId = VLASOV_SOLVER_Z_NEIGHBORHOOD_ID; + break; + } + + for (setOfPencils pencils : pencilSets) { + for (uint pencili = 0; pencili < pencils.N; ++pencili) { + auto ids = pencils.getIds(pencili); + CellID maxId = *std::max_element(ids.begin(),ids.end()); + int maxRefLvl = mpiGrid.mapping.get_refinement_level(maxId); + + const auto* frontNeighbors = mpiGrid.get_neighbors_of(ids.front(),neighborhoodId); + int refLvl = 0; + for (pair> nbrPair: *frontNeighbors) { + if(nbrPair.second[dimension] == -offset) { + refLvl = max(refLvl,mpiGrid.mapping.get_refinement_level(nbrPair.first)); + } + } + + const auto* backNeighbors = mpiGrid.get_neighbors_of(ids.back(),neighborhoodId); + for (pair> nbrPair: *backNeighbors) { + if(nbrPair.second[dimension] == offset) { + refLvl = max(refLvl,mpiGrid.mapping.get_refinement_level(nbrPair.first)); + } + } + + if (refLvl > maxRefLvl) { + // TODO: Double-check that this gives you the right dimensions! + Realv dx = mpiGrid[ids[0]]->SpatialCell::parameters[CellParams::DX]; + Realv dy = mpiGrid[ids[0]]->SpatialCell::parameters[CellParams::DY]; + pencils.split(pencili,dx,dy); + } + } + } +} bool trans_map_1d_amr(const dccrg::Dccrg& mpiGrid, const vector& localPropagatedCells, @@ -714,7 +768,7 @@ bool trans_map_1d_amr(const dccrg::Dccrg& for (const auto seedId : seedIds) { // Construct pencils from the seedIds into a set of pencils. pencils = buildPencilsWithNeighbors(mpiGrid, pencils, seedId, ids, dimension, path); - } + } // Print out ids of pencils (if needed for debugging) if (true) { @@ -733,22 +787,42 @@ bool trans_map_1d_amr(const dccrg::Dccrg& std::cout << std::endl; } - // CellID id = 56; - // const vector* neighbors = mpiGrid.get_neighbors_of(id, VLASOV_SOLVER_X_NEIGHBORHOOD_ID); - // if (neighbors != NULL) { - // std::cout << "Neighbors of cell " << id << std::endl; - // for (auto neighbor : *neighbors) { - // std::cout << neighbor << std::endl; - // } - // } + CellID idX = 55; + const auto* neighborsX = mpiGrid.get_neighbors_of(idX, VLASOV_SOLVER_X_NEIGHBORHOOD_ID); + if (neighborsX != NULL) { + std::cout << "Neighbors of cell " << idX << " in x dimension" << std::endl; + for (auto neighbor : *neighborsX) { + std::cout << neighbor.first << ", "; + for (int n = 0; n < 4; ++n) { + std::cout << neighbor.second[n] << " "; + } + std::cout << std::endl; + } + } + + CellID idY = 46; + const auto* neighborsY = mpiGrid.get_neighbors_of(idY, VLASOV_SOLVER_Y_NEIGHBORHOOD_ID); + if (neighborsY != NULL) { + std::cout << "Neighbors of cell " << idY << " in y dimension" << std::endl; + for (auto neighbor : *neighborsY) { + std::cout << neighbor.first << ", "; + for (int n = 0; n < 4; ++n) { + std::cout << neighbor.second[n] << " "; + } + std::cout << std::endl; + } + } } // Add the final set of pencils to the pencilSets - vector. // Only one set is created for now but we retain support for multiple sets pencilSets.push_back(pencils); - // **************************************************************************** - + + // Check refinement of two ghost cells on each end of each pencil + + // **************************************************************************** + const uint8_t VMESH_REFLEVEL = 0; // Get a pointer to the velocity mesh of the first spatial cell diff --git a/vlasovsolver/cpu_trans_map_amr.hpp b/vlasovsolver/cpu_trans_map_amr.hpp index e27e115c1..476e84f91 100644 --- a/vlasovsolver/cpu_trans_map_amr.hpp +++ b/vlasovsolver/cpu_trans_map_amr.hpp @@ -74,6 +74,21 @@ struct setOfPencils { return idsOut; } + // Split one pencil into four pencils covering the same space. + // dx and dy are the dimensions of the original pencil. + void split(const uint pencilId, const Realv dx, const Realv dy) { + + x[pencilId] += 0.25 * dx; + y[pencilId] += 0.25 * dy; + + auto ids = getIds(pencilId); + + addPencil(ids, x[pencilId] + 0.25 * dx, y[pencilId] , periodic[pencilId]); + addPencil(ids, x[pencilId] , y[pencilId] + 0.25 * dy, periodic[pencilId]); + addPencil(ids, x[pencilId] + 0.25 * dx, y[pencilId] + 0.25 * dy, periodic[pencilId]); + + } + }; void compute_spatial_source_cells_for_pencil(const dccrg::Dccrg& mpiGrid, From 4ce7573d685ee75a307443d4f8db761e9ab7b922 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Wed, 3 Oct 2018 11:43:02 +0300 Subject: [PATCH 075/602] Fixed a bug in setOfPencils.getIds that returned one extra id --- vlasovsolver/cpu_trans_map_amr.hpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vlasovsolver/cpu_trans_map_amr.hpp b/vlasovsolver/cpu_trans_map_amr.hpp index 476e84f91..b20879f2e 100644 --- a/vlasovsolver/cpu_trans_map_amr.hpp +++ b/vlasovsolver/cpu_trans_map_amr.hpp @@ -67,7 +67,7 @@ struct setOfPencils { } CellID iend = ibeg + lengthOfPencils[pencilId]; - for (uint i = ibeg; i <= iend; i++) { + for (uint i = ibeg; i < iend; i++) { idsOut.push_back(ids[i]); } From cd6a92eebc35d0ab783cba7d3da08e57a2fd743a Mon Sep 17 00:00:00 2001 From: tkoskela Date: Wed, 3 Oct 2018 11:52:57 +0300 Subject: [PATCH 076/602] Fixed bugs in check_ghost_cells. Should be ok now. --- vlasovsolver/cpu_trans_map_amr.cpp | 56 +++++++++++++++++++++++++----- 1 file changed, 47 insertions(+), 9 deletions(-) diff --git a/vlasovsolver/cpu_trans_map_amr.cpp b/vlasovsolver/cpu_trans_map_amr.cpp index 70799a379..d3851f1c8 100644 --- a/vlasovsolver/cpu_trans_map_amr.cpp +++ b/vlasovsolver/cpu_trans_map_amr.cpp @@ -202,7 +202,7 @@ setOfPencils buildPencilsWithNeighbors( const dccrg::Dccrg 0) { @@ -623,7 +623,7 @@ Multiple ghost cells. void check_ghost_cells(const dccrg::Dccrg& mpiGrid, vector& pencilSets, uint dimension, - uint offset) { + int offset) { uint neighborhoodId; switch (dimension) { @@ -636,35 +636,71 @@ void check_ghost_cells(const dccrg::Dccrg case 2: neighborhoodId = VLASOV_SOLVER_Z_NEIGHBORHOOD_ID; break; + default: + neighborhoodId = 0; + break; } + std::vector idsToSplit; + for (setOfPencils pencils : pencilSets) { + cout << "pencils.N = " << pencils.N << endl; for (uint pencili = 0; pencili < pencils.N; ++pencili) { + + if(pencils.periodic[pencili]) continue; + auto ids = pencils.getIds(pencili); CellID maxId = *std::max_element(ids.begin(),ids.end()); int maxRefLvl = mpiGrid.mapping.get_refinement_level(maxId); const auto* frontNeighbors = mpiGrid.get_neighbors_of(ids.front(),neighborhoodId); + const auto* backNeighbors = mpiGrid.get_neighbors_of(ids.back() ,neighborhoodId); int refLvl = 0; + for (pair> nbrPair: *frontNeighbors) { if(nbrPair.second[dimension] == -offset) { refLvl = max(refLvl,mpiGrid.mapping.get_refinement_level(nbrPair.first)); } } - const auto* backNeighbors = mpiGrid.get_neighbors_of(ids.back(),neighborhoodId); for (pair> nbrPair: *backNeighbors) { if(nbrPair.second[dimension] == offset) { refLvl = max(refLvl,mpiGrid.mapping.get_refinement_level(nbrPair.first)); } } - + if (refLvl > maxRefLvl) { - // TODO: Double-check that this gives you the right dimensions! - Realv dx = mpiGrid[ids[0]]->SpatialCell::parameters[CellParams::DX]; - Realv dy = mpiGrid[ids[0]]->SpatialCell::parameters[CellParams::DY]; - pencils.split(pencili,dx,dy); + //std::cout << "Found refinement level " << refLvl << " in one of the ghost cells. Splitting pencil " << pencili << endl; + // Let's avoid modifying pencils while we are looping over it. Write down the indices of pencils + // that need to be split and split them later. + idsToSplit.push_back(pencili); + } + } + + for (auto pencili: idsToSplit) { + + + Realv dx = 0.0; + Realv dy = 0.0; + // TODO: Double-check that this gives you the right dimensions! + auto ids = pencils.getIds(pencili); + switch(dimension) { + case 0: + dx = mpiGrid[ids[0]]->SpatialCell::parameters[CellParams::DY]; + dy = mpiGrid[ids[0]]->SpatialCell::parameters[CellParams::DZ]; + break; + case 1: + dx = mpiGrid[ids[0]]->SpatialCell::parameters[CellParams::DX]; + dy = mpiGrid[ids[0]]->SpatialCell::parameters[CellParams::DZ]; + break; + case 2: + dx = mpiGrid[ids[0]]->SpatialCell::parameters[CellParams::DX]; + dy = mpiGrid[ids[0]]->SpatialCell::parameters[CellParams::DY]; + break; } + + pencils.split(pencili,dx,dy); + } } } @@ -820,7 +856,9 @@ bool trans_map_1d_amr(const dccrg::Dccrg& pencilSets.push_back(pencils); // Check refinement of two ghost cells on each end of each pencil - + for (int offset = 1; offset <= VLASOV_STENCIL_WIDTH; ++offset) { + check_ghost_cells(mpiGrid,pencilSets,dimension,offset); + } // **************************************************************************** const uint8_t VMESH_REFLEVEL = 0; From 82ff92bac49e0e7272a94ff105d12659f9d59cb0 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Wed, 3 Oct 2018 14:56:56 +0300 Subject: [PATCH 077/602] Fixed how the number of sends/receives to/from dccrg cells to fsgrid cells is calculated. Cells have to be counted one by one, checking the refinement level of each cell and adding 2**(maxRefLvl-refLvl)**3 cells for each. --- fieldsolver/gridGlue.cpp | 51 ++++++++++++++++++++++++++-------------- fieldsolver/gridGlue.hpp | 7 ++++-- 2 files changed, 39 insertions(+), 19 deletions(-) diff --git a/fieldsolver/gridGlue.cpp b/fieldsolver/gridGlue.cpp index 13305a8bb..75c6dbbdb 100644 --- a/fieldsolver/gridGlue.cpp +++ b/fieldsolver/gridGlue.cpp @@ -6,12 +6,29 @@ #include "../common.h" #include "gridGlue.hpp" +/* +Calculate the number of cells on the maximum refinement level overlapping the list of dccrg cells in cells. +*/ +int getNumberOfCellsOnMaxRefLvl(dccrg::Dccrg& mpiGrid, const std::vector& cells) { + + int nCells = 0; + auto maxRefLvl = mpiGrid.mapping.get_maximum_refinement_level(); + + for (auto cellid : cells) { + auto refLvl = mpiGrid.get_refinement_level(cellid); + nCells += pow(pow(2,maxRefLvl-refLvl),3); + } + + return nCells; + +} + void feedMomentsIntoFsGrid(dccrg::Dccrg& mpiGrid, const std::vector& cells, FsGrid< std::array, 2>& momentsGrid, bool dt2 /*=false*/) { - int fsgridSize = cells.size() * pow(pow(2,mpiGrid.mapping.get_maximum_refinement_level()),3); - momentsGrid.setupForTransferIn(fsgridSize); + int nCells = getNumberOfCellsOnMaxRefLvl(mpiGrid, cells); + momentsGrid.setupForTransferIn(nCells); // Fill from cellParams // #pragma omp parallel for @@ -53,11 +70,11 @@ void feedMomentsIntoFsGrid(dccrg::Dccrg& void feedBgFieldsIntoFsGrid(dccrg::Dccrg& mpiGrid, const std::vector& cells, FsGrid< std::array, 2>& bgBGrid) { - int fsgridSize = cells.size() * pow(pow(2,mpiGrid.mapping.get_maximum_refinement_level()),3); - bgBGrid.setupForTransferIn(fsgridSize); + int nCells = getNumberOfCellsOnMaxRefLvl(mpiGrid, cells); + bgBGrid.setupForTransferIn(nCells); // Setup transfer buffers - std::vector< std::array > transferBuffer(fsgridSize); + std::vector< std::array > transferBuffer(nCells); // Fill from cellParams // TODO: Making this thread-safe requires some tricks. Don't think threading is crucial here. @@ -109,12 +126,12 @@ void getVolumeFieldsFromFsGrid(FsGrid< std::array > transferBuffer(fsgridSize); + int nCells = getNumberOfCellsOnMaxRefLvl(mpiGrid, cells); + std::vector< std::array > transferBuffer(nCells); std::vector< std::array*> transferBufferPointer; // Setup transfer pointers - volumeFieldsGrid.setupForTransferOut(fsgridSize); + volumeFieldsGrid.setupForTransferOut(nCells); int k = 0; for(auto dccrgId : cells) { const auto fsgridIds = mapDccrgIdToFsGrid(mpiGrid, volumeFieldsGrid.getLocalSize(), dccrgId); @@ -191,18 +208,18 @@ void getDerivativesFromFsGrid(FsGrid< std::array, const std::vector& cells) { // Setup transfer buffers - int fsgridSize = cells.size() * pow(pow(2,mpiGrid.mapping.get_maximum_refinement_level()),3); - std::vector< std::array > dperbTransferBuffer(fsgridSize); - std::vector< std::array > dmomentsTransferBuffer(fsgridSize); - std::vector< std::array > bgbfieldTransferBuffer(fsgridSize); + int nCellsOnMaxRefLvl = getNumberOfCellsOnMaxRefLvl(mpiGrid, cells); + std::vector< std::array > dperbTransferBuffer(nCellsOnMaxRefLvl); + std::vector< std::array > dmomentsTransferBuffer(nCellsOnMaxRefLvl); + std::vector< std::array > bgbfieldTransferBuffer(nCellsOnMaxRefLvl); std::vector< std::array*> dperbTransferBufferPointer; std::vector< std::array*> dmomentsTransferBufferPointer; std::vector< std::array*> bgbfieldTransferBufferPointer; - dperbGrid.setupForTransferOut(fsgridSize); - dmomentsGrid.setupForTransferOut(fsgridSize); - bgbfieldGrid.setupForTransferOut(fsgridSize); + dperbGrid.setupForTransferOut(nCellsOnMaxRefLvl); + dmomentsGrid.setupForTransferOut(nCellsOnMaxRefLvl); + bgbfieldGrid.setupForTransferOut(nCellsOnMaxRefLvl); int k = 0; for (auto dccrgId : cells) { @@ -323,9 +340,9 @@ void getDerivativesFromFsGrid(FsGrid< std::array, void setupTechnicalFsGrid(dccrg::Dccrg& mpiGrid, const std::vector& cells, FsGrid< fsgrids::technical, 2>& technicalGrid) { - int fsgridSize = cells.size() * pow(pow(2,mpiGrid.mapping.get_maximum_refinement_level()),3); + int nCells = getNumberOfCellsOnMaxRefLvl(mpiGrid, cells); - technicalGrid.setupForTransferIn(fsgridSize); + technicalGrid.setupForTransferIn(nCells); //#pragma omp parallel for for(auto dccrgId : cells) { diff --git a/fieldsolver/gridGlue.hpp b/fieldsolver/gridGlue.hpp index b0ae25378..32f3e7c01 100644 --- a/fieldsolver/gridGlue.hpp +++ b/fieldsolver/gridGlue.hpp @@ -72,6 +72,9 @@ void feedBgFieldsIntoFsGrid(dccrg::Dccrg& const std::vector& cells, FsGrid< std::array, 2>& BgBGrid); +int getNumberOfCellsOnMaxRefLvl(dccrg::Dccrg& mpiGrid, + const std::vector& cells); + /*! Transfer field data from DCCRG cellparams into the appropriate FsGrid structure * \param mpiGrid The DCCRG grid carrying fieldparam data * \param cells List of local cells @@ -118,7 +121,8 @@ template< unsigned int numFields > void feedFieldDataIntoFsGridAmr( const std::vector& cells, int cellParamsIndex, FsGrid< std::array, 2>& targetGrid) { - targetGrid.setupForTransferIn(cells.size() * pow(2,mpiGrid.mapping.get_maximum_refinement_level())); + int nCells = getNumberOfCellsOnMaxRefLvl(mpiGrid, cells); + targetGrid.setupForTransferIn(nCells); for(CellID dccrgId : cells) { const auto fsgridIds = mapDccrgIdToFsGrid(mpiGrid, targetGrid.getLocalSize(), dccrgId); @@ -170,4 +174,3 @@ std::vector mapDccrgIdToFsGrid(dccrg::Dccrg& technicalGrid, dccrg::Dccrg& mpiGrid, CellID fsgridID); - From 9cfb9e34f6765a983ce148c090319db2369dfdd1 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Thu, 4 Oct 2018 12:29:47 +0300 Subject: [PATCH 078/602] Modified feed-functions to avoid unnecessary data transfers. --- fieldsolver/gridGlue.cpp | 126 ++++++++++++++++++++++----------------- 1 file changed, 70 insertions(+), 56 deletions(-) diff --git a/fieldsolver/gridGlue.cpp b/fieldsolver/gridGlue.cpp index 75c6dbbdb..b053c01e4 100644 --- a/fieldsolver/gridGlue.cpp +++ b/fieldsolver/gridGlue.cpp @@ -30,35 +30,40 @@ void feedMomentsIntoFsGrid(dccrg::Dccrg& int nCells = getNumberOfCellsOnMaxRefLvl(mpiGrid, cells); momentsGrid.setupForTransferIn(nCells); + std::vector< std::array > transferBuffer(cells.size()); + // Fill from cellParams - // #pragma omp parallel for - for(auto dccrgId : cells) { +#pragma omp parallel for + for(uint i = 0; i < cells.size(); ++i) { + CellID dccrgId = cells[i]; auto cellParams = mpiGrid[dccrgId]->get_cell_parameters(); - std::array* thisCellData; - const auto fsgridIds = mapDccrgIdToFsGrid(mpiGrid, momentsGrid.getLocalSize(), dccrgId); + std::array* thisCellData = &transferBuffer[i]; + if(dt2) { + thisCellData->at(fsgrids::moments::RHOM) = cellParams[CellParams::RHOM_DT2]; + thisCellData->at(fsgrids::moments::RHOQ) = cellParams[CellParams::RHOQ_DT2]; + thisCellData->at(fsgrids::moments::VX) = cellParams[CellParams::VX_DT2]; + thisCellData->at(fsgrids::moments::VY) = cellParams[CellParams::VY_DT2]; + thisCellData->at(fsgrids::moments::VZ) = cellParams[CellParams::VZ_DT2]; + thisCellData->at(fsgrids::moments::P_11) = cellParams[CellParams::P_11_DT2]; + thisCellData->at(fsgrids::moments::P_22) = cellParams[CellParams::P_22_DT2]; + thisCellData->at(fsgrids::moments::P_33) = cellParams[CellParams::P_33_DT2]; + } else { + thisCellData->at(fsgrids::moments::RHOM) = cellParams[CellParams::RHOM]; + thisCellData->at(fsgrids::moments::RHOQ) = cellParams[CellParams::RHOQ]; + thisCellData->at(fsgrids::moments::VX) = cellParams[CellParams::VX]; + thisCellData->at(fsgrids::moments::VY) = cellParams[CellParams::VY]; + thisCellData->at(fsgrids::moments::VZ) = cellParams[CellParams::VZ]; + thisCellData->at(fsgrids::moments::P_11) = cellParams[CellParams::P_11]; + thisCellData->at(fsgrids::moments::P_22) = cellParams[CellParams::P_22]; + thisCellData->at(fsgrids::moments::P_33) = cellParams[CellParams::P_33]; + } + } + for (uint i = 0;i < cells.size(); ++i) { + CellID dccrgId = cells[i]; + const auto fsgridIds = mapDccrgIdToFsGrid(mpiGrid, momentsGrid.getLocalSize(), dccrgId); for (auto fsgridId : fsgridIds) { - if(!dt2) { - thisCellData->at(fsgrids::moments::RHOM) = cellParams[CellParams::RHOM]; - thisCellData->at(fsgrids::moments::RHOQ) = cellParams[CellParams::RHOQ]; - thisCellData->at(fsgrids::moments::VX) = cellParams[CellParams::VX]; - thisCellData->at(fsgrids::moments::VY) = cellParams[CellParams::VY]; - thisCellData->at(fsgrids::moments::VZ) = cellParams[CellParams::VZ]; - thisCellData->at(fsgrids::moments::P_11) = cellParams[CellParams::P_11]; - thisCellData->at(fsgrids::moments::P_22) = cellParams[CellParams::P_22]; - thisCellData->at(fsgrids::moments::P_33) = cellParams[CellParams::P_33]; - } else { - thisCellData->at(fsgrids::moments::RHOM) = cellParams[CellParams::RHOM_DT2]; - thisCellData->at(fsgrids::moments::RHOQ) = cellParams[CellParams::RHOQ_DT2]; - thisCellData->at(fsgrids::moments::VX) = cellParams[CellParams::VX_DT2]; - thisCellData->at(fsgrids::moments::VY) = cellParams[CellParams::VY_DT2]; - thisCellData->at(fsgrids::moments::VZ) = cellParams[CellParams::VZ_DT2]; - thisCellData->at(fsgrids::moments::P_11) = cellParams[CellParams::P_11_DT2]; - thisCellData->at(fsgrids::moments::P_22) = cellParams[CellParams::P_22_DT2]; - thisCellData->at(fsgrids::moments::P_33) = cellParams[CellParams::P_33_DT2]; - } - - momentsGrid.transferDataIn(fsgridId, thisCellData); + momentsGrid.transferDataIn(fsgridId, &transferBuffer[i]); } } @@ -74,47 +79,56 @@ void feedBgFieldsIntoFsGrid(dccrg::Dccrg& bgBGrid.setupForTransferIn(nCells); // Setup transfer buffers - std::vector< std::array > transferBuffer(nCells); + std::vector< std::array > transferBuffer(cells.size()); + std::cout << "I am at line " << __LINE__ << " of " << __FILE__ << std::endl; + // Fill from cellParams - // TODO: Making this thread-safe requires some tricks. Don't think threading is crucial here. - // #pragma omp parallel for - for(auto dccrgId : cells) { + // We only need to read data once per dccrg cell here +#pragma omp parallel for + for(uint i = 0; i < cells.size(); ++i) { + CellID dccrgId = cells[i]; auto cellParams = mpiGrid[dccrgId]->get_cell_parameters(); auto derivatives = mpiGrid[dccrgId]->derivatives; auto volumeDerivatives = mpiGrid[dccrgId]->derivativesBVOL; - const auto fsgridIds = mapDccrgIdToFsGrid(mpiGrid, bgBGrid.getLocalSize(), dccrgId); + std::cout << "I am at line " << __LINE__ << " of " << __FILE__ << std::endl; + + std::array* thisCellData = &transferBuffer[i]; + + thisCellData->at(fsgrids::bgbfield::BGBX) = cellParams[CellParams::BGBX]; + thisCellData->at(fsgrids::bgbfield::BGBY) = cellParams[CellParams::BGBY]; + thisCellData->at(fsgrids::bgbfield::BGBZ) = cellParams[CellParams::BGBZ]; + thisCellData->at(fsgrids::bgbfield::BGBXVOL) = cellParams[CellParams::BGBXVOL]; + thisCellData->at(fsgrids::bgbfield::BGBYVOL) = cellParams[CellParams::BGBYVOL]; + thisCellData->at(fsgrids::bgbfield::BGBZVOL) = cellParams[CellParams::BGBZVOL]; + + thisCellData->at(fsgrids::bgbfield::dBGBxdy) = derivatives[fieldsolver::dBGBxdy]; + thisCellData->at(fsgrids::bgbfield::dBGBxdz) = derivatives[fieldsolver::dBGBxdz]; + thisCellData->at(fsgrids::bgbfield::dBGBydx) = derivatives[fieldsolver::dBGBydx]; + thisCellData->at(fsgrids::bgbfield::dBGBydz) = derivatives[fieldsolver::dBGBydz]; + thisCellData->at(fsgrids::bgbfield::dBGBzdx) = derivatives[fieldsolver::dBGBzdx]; + thisCellData->at(fsgrids::bgbfield::dBGBzdy) = derivatives[fieldsolver::dBGBzdy]; + + thisCellData->at(fsgrids::bgbfield::dBGBXVOLdy) = volumeDerivatives[bvolderivatives::dBGBXVOLdy]; + thisCellData->at(fsgrids::bgbfield::dBGBXVOLdz) = volumeDerivatives[bvolderivatives::dBGBXVOLdz]; + thisCellData->at(fsgrids::bgbfield::dBGBYVOLdx) = volumeDerivatives[bvolderivatives::dBGBYVOLdx]; + thisCellData->at(fsgrids::bgbfield::dBGBYVOLdz) = volumeDerivatives[bvolderivatives::dBGBYVOLdz]; + thisCellData->at(fsgrids::bgbfield::dBGBZVOLdx) = volumeDerivatives[bvolderivatives::dBGBZVOLdx]; + thisCellData->at(fsgrids::bgbfield::dBGBZVOLdy) = volumeDerivatives[bvolderivatives::dBGBZVOLdy]; + + std::cout << "I am at line " << __LINE__ << " of " << __FILE__ << std::endl; + } + // Copy data into each fsgrid cell overlapping the dccrg cell + for (uint i = 0; i < cells.size(); ++i) { + CellID dccrgId = cells[i]; + const auto fsgridIds = mapDccrgIdToFsGrid(mpiGrid, bgBGrid.getLocalSize(), dccrgId); for (auto fsgridId : fsgridIds) { - - std::array* thisCellData; - - thisCellData->at(fsgrids::bgbfield::BGBX) = cellParams[CellParams::BGBX]; - thisCellData->at(fsgrids::bgbfield::BGBY) = cellParams[CellParams::BGBY]; - thisCellData->at(fsgrids::bgbfield::BGBZ) = cellParams[CellParams::BGBZ]; - thisCellData->at(fsgrids::bgbfield::BGBXVOL) = cellParams[CellParams::BGBXVOL]; - thisCellData->at(fsgrids::bgbfield::BGBYVOL) = cellParams[CellParams::BGBYVOL]; - thisCellData->at(fsgrids::bgbfield::BGBZVOL) = cellParams[CellParams::BGBZVOL]; - - thisCellData->at(fsgrids::bgbfield::dBGBxdy) = derivatives[fieldsolver::dBGBxdy]; - thisCellData->at(fsgrids::bgbfield::dBGBxdz) = derivatives[fieldsolver::dBGBxdz]; - thisCellData->at(fsgrids::bgbfield::dBGBydx) = derivatives[fieldsolver::dBGBydx]; - thisCellData->at(fsgrids::bgbfield::dBGBydz) = derivatives[fieldsolver::dBGBydz]; - thisCellData->at(fsgrids::bgbfield::dBGBzdx) = derivatives[fieldsolver::dBGBzdx]; - thisCellData->at(fsgrids::bgbfield::dBGBzdy) = derivatives[fieldsolver::dBGBzdy]; - - thisCellData->at(fsgrids::bgbfield::dBGBXVOLdy) = volumeDerivatives[bvolderivatives::dBGBXVOLdy]; - thisCellData->at(fsgrids::bgbfield::dBGBXVOLdz) = volumeDerivatives[bvolderivatives::dBGBXVOLdz]; - thisCellData->at(fsgrids::bgbfield::dBGBYVOLdx) = volumeDerivatives[bvolderivatives::dBGBYVOLdx]; - thisCellData->at(fsgrids::bgbfield::dBGBYVOLdz) = volumeDerivatives[bvolderivatives::dBGBYVOLdz]; - thisCellData->at(fsgrids::bgbfield::dBGBZVOLdx) = volumeDerivatives[bvolderivatives::dBGBZVOLdx]; - thisCellData->at(fsgrids::bgbfield::dBGBZVOLdy) = volumeDerivatives[bvolderivatives::dBGBZVOLdy]; - - bgBGrid.transferDataIn(fsgridId, thisCellData); + bgBGrid.transferDataIn(fsgridId, &transferBuffer[i]); } } - + // Finish the actual transfer bgBGrid.finishTransfersIn(); From 8165fe1a57d886a598e77978cc5dcf615c8457d6 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Thu, 4 Oct 2018 12:56:23 +0300 Subject: [PATCH 079/602] Similar modification as previous commit to setupTechnicalFsGrid --- fieldsolver/gridGlue.cpp | 34 ++++++++++++++++++---------------- 1 file changed, 18 insertions(+), 16 deletions(-) diff --git a/fieldsolver/gridGlue.cpp b/fieldsolver/gridGlue.cpp index b053c01e4..e94c3da3a 100644 --- a/fieldsolver/gridGlue.cpp +++ b/fieldsolver/gridGlue.cpp @@ -80,8 +80,6 @@ void feedBgFieldsIntoFsGrid(dccrg::Dccrg& // Setup transfer buffers std::vector< std::array > transferBuffer(cells.size()); - - std::cout << "I am at line " << __LINE__ << " of " << __FILE__ << std::endl; // Fill from cellParams // We only need to read data once per dccrg cell here @@ -116,8 +114,6 @@ void feedBgFieldsIntoFsGrid(dccrg::Dccrg& thisCellData->at(fsgrids::bgbfield::dBGBYVOLdz) = volumeDerivatives[bvolderivatives::dBGBYVOLdz]; thisCellData->at(fsgrids::bgbfield::dBGBZVOLdx) = volumeDerivatives[bvolderivatives::dBGBZVOLdx]; thisCellData->at(fsgrids::bgbfield::dBGBZVOLdy) = volumeDerivatives[bvolderivatives::dBGBZVOLdy]; - - std::cout << "I am at line " << __LINE__ << " of " << __FILE__ << std::endl; } // Copy data into each fsgrid cell overlapping the dccrg cell @@ -354,26 +350,32 @@ void getDerivativesFromFsGrid(FsGrid< std::array, void setupTechnicalFsGrid(dccrg::Dccrg& mpiGrid, const std::vector& cells, FsGrid< fsgrids::technical, 2>& technicalGrid) { - int nCells = getNumberOfCellsOnMaxRefLvl(mpiGrid, cells); - + int nCells = getNumberOfCellsOnMaxRefLvl(mpiGrid, cells); technicalGrid.setupForTransferIn(nCells); - //#pragma omp parallel for - for(auto dccrgId : cells) { + // Setup transfer buffers + std::vector< fsgrids::technical > transferBuffer(cells.size()); + +#pragma omp parallel for + for(uint i = 0; i < cells.size(); ++i) { - fsgrids::technical* thisCellData; - const auto fsgridIds = mapDccrgIdToFsGrid(mpiGrid, technicalGrid.getLocalSize(), dccrgId); + fsgrids::technical* thisCellData = &transferBuffer[i]; + // Data needs to be collected from some different places for this grid. + thisCellData->sysBoundaryFlag = mpiGrid[cells[i]]->sysBoundaryFlag; + thisCellData->sysBoundaryLayer = mpiGrid[cells[i]]->sysBoundaryLayer; + thisCellData->maxFsDt = std::numeric_limits::max(); + } + for(uint i = 0; i < cells.size(); ++i) { + + const auto fsgridIds = mapDccrgIdToFsGrid(mpiGrid, technicalGrid.getLocalSize(), cells[i]); + for (auto fsgridId : fsgridIds) { - // Data needs to be collected from some different places for this grid. - thisCellData->sysBoundaryFlag = mpiGrid[dccrgId]->sysBoundaryFlag; - thisCellData->sysBoundaryLayer = mpiGrid[dccrgId]->sysBoundaryLayer; - thisCellData->maxFsDt = std::numeric_limits::max(); - - technicalGrid.transferDataIn(fsgridId,thisCellData); + technicalGrid.transferDataIn(fsgridId,&transferBuffer[i]); } } + technicalGrid.finishTransfersIn(); } From dcf2cef381808a26e70f9804f0f3ca102a5ddf0d Mon Sep 17 00:00:00 2001 From: tkoskela Date: Thu, 4 Oct 2018 14:44:55 +0300 Subject: [PATCH 080/602] Removed debugging output --- fieldsolver/gridGlue.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fieldsolver/gridGlue.cpp b/fieldsolver/gridGlue.cpp index e94c3da3a..b2778150d 100644 --- a/fieldsolver/gridGlue.cpp +++ b/fieldsolver/gridGlue.cpp @@ -90,7 +90,7 @@ void feedBgFieldsIntoFsGrid(dccrg::Dccrg& auto derivatives = mpiGrid[dccrgId]->derivatives; auto volumeDerivatives = mpiGrid[dccrgId]->derivativesBVOL; - std::cout << "I am at line " << __LINE__ << " of " << __FILE__ << std::endl; + // std::cout << "I am at line " << __LINE__ << " of " << __FILE__ << std::endl; std::array* thisCellData = &transferBuffer[i]; From 0109790927e29236205f24ed8aab311c1174ee82 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Thu, 4 Oct 2018 14:45:20 +0300 Subject: [PATCH 081/602] Modified fsgrid coupling calls. --- vlasiator.cpp | 101 +++++++++++++++++++++++++++----------------------- 1 file changed, 54 insertions(+), 47 deletions(-) diff --git a/vlasiator.cpp b/vlasiator.cpp index 950980301..4a5eddce2 100644 --- a/vlasiator.cpp +++ b/vlasiator.cpp @@ -392,7 +392,7 @@ int main(int argn,char* args[]) { phiprof::start("Init DROs"); DataReducer outputReducer, diagnosticReducer; initializeDataReducers(&outputReducer, &diagnosticReducer); - phiprof::stop("Init DROs"); + phiprof::stop("Init DROs"); // Initialize simplified Fieldsolver grids. phiprof::start("Init fieldsolver grids"); @@ -403,6 +403,7 @@ int main(int argn,char* args[]) { std::array periodicity{mpiGrid.topology.is_periodic(0), mpiGrid.topology.is_periodic(1), mpiGrid.topology.is_periodic(2)}; + FsGrid< std::array, 2> perBGrid(fsGridDimensions, comm, periodicity); FsGrid< std::array, 2> perBDt2Grid(fsGridDimensions, comm, periodicity); FsGrid< std::array, 2> EGrid(fsGridDimensions, comm, periodicity); @@ -431,39 +432,44 @@ int main(int argn,char* args[]) { phiprof::stop("Init fieldsolver grids"); phiprof::start("Initial fsgrid coupling"); const std::vector& cells = getLocalCells(); - + // Couple FSGrids to mpiGrid // TODO: Do we really need to couple *all* of these fields? - perBGrid. setupForGridCoupling(fsGridSize); - perBDt2Grid. setupForGridCoupling(fsGridSize); - EGrid. setupForGridCoupling(fsGridSize); - EDt2Grid. setupForGridCoupling(fsGridSize); - EHallGrid. setupForGridCoupling(fsGridSize); - EGradPeGrid. setupForGridCoupling(fsGridSize); - momentsGrid. setupForGridCoupling(fsGridSize); - momentsDt2Grid.setupForGridCoupling(fsGridSize); - dPerBGrid. setupForGridCoupling(fsGridSize); - dMomentsGrid. setupForGridCoupling(fsGridSize); - BgBGrid. setupForGridCoupling(fsGridSize); - volGrid. setupForGridCoupling(fsGridSize); - technicalGrid. setupForGridCoupling(fsGridSize); - - // FSGrid cellIds are 0-based, whereas DCCRG cellIds are 1-based, beware - for(int cellId = 0;cellId < fsGridSize; ++cellId) { - perBGrid. setGridCoupling(cellId, myRank); - perBDt2Grid. setGridCoupling(cellId, myRank); - EGrid. setGridCoupling(cellId, myRank); - EDt2Grid. setGridCoupling(cellId, myRank); - EHallGrid. setGridCoupling(cellId, myRank); - EGradPeGrid. setGridCoupling(cellId, myRank); - momentsGrid. setGridCoupling(cellId, myRank); - momentsDt2Grid.setGridCoupling(cellId, myRank); - dPerBGrid. setGridCoupling(cellId, myRank); - dMomentsGrid. setGridCoupling(cellId, myRank); - BgBGrid. setGridCoupling(cellId, myRank); - volGrid. setGridCoupling(cellId, myRank); - technicalGrid. setGridCoupling(cellId, myRank); + perBGrid. setupForGridCoupling(); + perBDt2Grid. setupForGridCoupling(); + EGrid. setupForGridCoupling(); + EDt2Grid. setupForGridCoupling(); + EHallGrid. setupForGridCoupling(); + EGradPeGrid. setupForGridCoupling(); + momentsGrid. setupForGridCoupling(); + momentsDt2Grid.setupForGridCoupling(); + dPerBGrid. setupForGridCoupling(); + dMomentsGrid. setupForGridCoupling(); + BgBGrid. setupForGridCoupling(); + volGrid. setupForGridCoupling(); + technicalGrid. setupForGridCoupling(); + + // Each dccrg cell may have to communicate with multiple fsgrid cells, if they are on a lower refinement level. + // Calculate the corresponding fsgrid ids for each dccrg cell and set coupling for each fsgrid id. + for(auto& dccrgId : cells) { + const auto fsgridIds = mapDccrgIdToFsGrid(mpiGrid, fsGridDimensions, dccrgId); + for (auto fsgridId : fsgridIds) { + perBGrid. setGridCoupling(fsgridId, myRank); + perBDt2Grid. setGridCoupling(fsgridId, myRank); + EGrid. setGridCoupling(fsgridId, myRank); + EDt2Grid. setGridCoupling(fsgridId, myRank); + EHallGrid. setGridCoupling(fsgridId, myRank); + EGradPeGrid. setGridCoupling(fsgridId, myRank); + momentsGrid. setGridCoupling(fsgridId, myRank); + momentsDt2Grid.setGridCoupling(fsgridId, myRank); + dPerBGrid. setGridCoupling(fsgridId, myRank); + dMomentsGrid. setGridCoupling(fsgridId, myRank); + BgBGrid. setGridCoupling(fsgridId, myRank); + volGrid. setGridCoupling(fsgridId, myRank); + technicalGrid. setGridCoupling(fsgridId, myRank); + } } + perBGrid. finishGridCoupling(); perBDt2Grid. finishGridCoupling(); EGrid. finishGridCoupling(); @@ -481,16 +487,17 @@ int main(int argn,char* args[]) { // Transfer initial field configuration into the FsGrids feedFieldDataIntoFsGrid(mpiGrid,cells,CellParams::PERBX,perBGrid); + feedBgFieldsIntoFsGrid(mpiGrid,cells,BgBGrid); BgBGrid.updateGhostCells(); - + setupTechnicalFsGrid(mpiGrid, cells, technicalGrid); technicalGrid.updateGhostCells(); - + // WARNING this means moments and dt2 moments are the same here. feedMomentsIntoFsGrid(mpiGrid, cells, momentsGrid,false); feedMomentsIntoFsGrid(mpiGrid, cells, momentsDt2Grid,false); - + phiprof::start("Init field propagator"); if ( initializeFieldPropagator( @@ -879,19 +886,19 @@ int main(int argn,char* args[]) { // Re-couple fsgrids to updated grid situation phiprof::start("fsgrid-recouple-after-lb"); const vector& cells = getLocalCells(); - perBGrid.setupForGridCoupling(cells.size()); - perBDt2Grid.setupForGridCoupling(cells.size()); - EGrid.setupForGridCoupling(cells.size()); - EDt2Grid.setupForGridCoupling(cells.size()); - EHallGrid.setupForGridCoupling(cells.size()); - EGradPeGrid.setupForGridCoupling(cells.size()); - momentsGrid.setupForGridCoupling(cells.size()); - momentsDt2Grid.setupForGridCoupling(cells.size()); - dPerBGrid.setupForGridCoupling(cells.size()); - dMomentsGrid.setupForGridCoupling(cells.size()); - BgBGrid.setupForGridCoupling(cells.size()); - volGrid.setupForGridCoupling(cells.size()); - technicalGrid.setupForGridCoupling(cells.size()); + perBGrid.setupForGridCoupling(); + perBDt2Grid.setupForGridCoupling(); + EGrid.setupForGridCoupling(); + EDt2Grid.setupForGridCoupling(); + EHallGrid.setupForGridCoupling(); + EGradPeGrid.setupForGridCoupling(); + momentsGrid.setupForGridCoupling(); + momentsDt2Grid.setupForGridCoupling(); + dPerBGrid.setupForGridCoupling(); + dMomentsGrid.setupForGridCoupling(); + BgBGrid.setupForGridCoupling(); + volGrid.setupForGridCoupling(); + technicalGrid.setupForGridCoupling(); // FSGrid cellIds are 0-based, whereas DCCRG cellIds are 1-based, beware for(auto& i : cells) { From cf01fdfc29a41923c77735cef5ef089821ee8843 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Mon, 8 Oct 2018 12:32:37 +0300 Subject: [PATCH 082/602] Bug fixes, mostly to mapDccrgIdToFsGrid. --- fieldsolver/gridGlue.cpp | 39 +++++++++++++++++++++------------------ fieldsolver/gridGlue.hpp | 15 ++++++++------- 2 files changed, 29 insertions(+), 25 deletions(-) diff --git a/fieldsolver/gridGlue.cpp b/fieldsolver/gridGlue.cpp index b2778150d..f1d7e30f4 100644 --- a/fieldsolver/gridGlue.cpp +++ b/fieldsolver/gridGlue.cpp @@ -23,6 +23,8 @@ int getNumberOfCellsOnMaxRefLvl(dccrg::Dccrg& mpiGrid, const std::vector& cells, FsGrid< std::array, 2>& momentsGrid, bool dt2 /*=false*/) { @@ -37,22 +39,24 @@ void feedMomentsIntoFsGrid(dccrg::Dccrg& for(uint i = 0; i < cells.size(); ++i) { CellID dccrgId = cells[i]; auto cellParams = mpiGrid[dccrgId]->get_cell_parameters(); + std::array* thisCellData = &transferBuffer[i]; + if(dt2) { thisCellData->at(fsgrids::moments::RHOM) = cellParams[CellParams::RHOM_DT2]; thisCellData->at(fsgrids::moments::RHOQ) = cellParams[CellParams::RHOQ_DT2]; - thisCellData->at(fsgrids::moments::VX) = cellParams[CellParams::VX_DT2]; - thisCellData->at(fsgrids::moments::VY) = cellParams[CellParams::VY_DT2]; - thisCellData->at(fsgrids::moments::VZ) = cellParams[CellParams::VZ_DT2]; + thisCellData->at(fsgrids::moments::VX) = cellParams[CellParams::VX_DT2]; + thisCellData->at(fsgrids::moments::VY) = cellParams[CellParams::VY_DT2]; + thisCellData->at(fsgrids::moments::VZ) = cellParams[CellParams::VZ_DT2]; thisCellData->at(fsgrids::moments::P_11) = cellParams[CellParams::P_11_DT2]; thisCellData->at(fsgrids::moments::P_22) = cellParams[CellParams::P_22_DT2]; thisCellData->at(fsgrids::moments::P_33) = cellParams[CellParams::P_33_DT2]; } else { thisCellData->at(fsgrids::moments::RHOM) = cellParams[CellParams::RHOM]; thisCellData->at(fsgrids::moments::RHOQ) = cellParams[CellParams::RHOQ]; - thisCellData->at(fsgrids::moments::VX) = cellParams[CellParams::VX]; - thisCellData->at(fsgrids::moments::VY) = cellParams[CellParams::VY]; - thisCellData->at(fsgrids::moments::VZ) = cellParams[CellParams::VZ]; + thisCellData->at(fsgrids::moments::VX) = cellParams[CellParams::VX]; + thisCellData->at(fsgrids::moments::VY) = cellParams[CellParams::VY]; + thisCellData->at(fsgrids::moments::VZ) = cellParams[CellParams::VZ]; thisCellData->at(fsgrids::moments::P_11) = cellParams[CellParams::P_11]; thisCellData->at(fsgrids::moments::P_22) = cellParams[CellParams::P_22]; thisCellData->at(fsgrids::moments::P_33) = cellParams[CellParams::P_33]; @@ -434,29 +438,28 @@ std::vector mapDccrgIdToFsGrid(dccrg::Dccrg indices; + std::vector> allIndices; - std::vector> indices; for (uint i = 0; i < cellLength; ++i) { for (uint j = 0; j < cellLength; ++j) { for (uint k = 0; k < cellLength; ++k) { - CellID id = dccrgID + i - + (j * gridLength[0] * pow(2,maxRefLvl)) - + (k * gridLength[1] * pow(2,maxRefLvl) * gridLength[0] * pow(2,maxRefLvl)); - auto ids = mpiGrid.mapping.get_indices(id); - std::array cellCoord; - for (uint m = 0; m < 3; m++) { - cellCoord[m] = static_cast(ids[m]); - } - indices.push_back(cellCoord); + indices[0] = topLeftIndices[0] + i + 1; + indices[1] = topLeftIndices[1] + j + 1; + indices[2] = topLeftIndices[2] + k + 1; + allIndices.push_back(indices); } } } + std::vector fsgridIDs; // The indices we get from dccrg are directly coordinates at the finest refinement level. // Therefore, they should match fsgrid coordinates exactly. - for (auto cellCoord: indices) { - fsgridIDs.push_back(cellCoord[0] + cellCoord[1] * fsgridDims[0] + cellCoord[2] * fsgridDims[1]); + for (auto cellCoord: allIndices) { + fsgridIDs.push_back(cellCoord[0] + cellCoord[1] * fsgridDims[0] + cellCoord[2] * fsgridDims[1] * fsgridDims[0]); } + return fsgridIDs; } diff --git a/fieldsolver/gridGlue.hpp b/fieldsolver/gridGlue.hpp index 32f3e7c01..9308db888 100644 --- a/fieldsolver/gridGlue.hpp +++ b/fieldsolver/gridGlue.hpp @@ -3,6 +3,14 @@ #include #include +std::vector mapDccrgIdToFsGrid(dccrg::Dccrg& mpiGrid, + std::array fsgridDims, CellID dccrgID); + +CellID mapFsGridIdToDccrg(FsGrid< fsgrids::technical, 2>& technicalGrid, + dccrg::Dccrg& mpiGrid, + CellID fsgridID); + + /*! Take input moments from DCCRG grid and put them into the Fieldsolver grid * \param mpiGrid The DCCRG grid carrying rho, rhoV and P * \param cells List of local cells @@ -167,10 +175,3 @@ template< unsigned int numFields > void getFieldDataFromFsGrid( sourceGrid.finishTransfersOut(); } - -std::vector mapDccrgIdToFsGrid(dccrg::Dccrg& mpiGrid, - std::array fsgridDims, CellID dccrgID); - -CellID mapFsGridIdToDccrg(FsGrid< fsgrids::technical, 2>& technicalGrid, - dccrg::Dccrg& mpiGrid, - CellID fsgridID); From 961cbc5e66a85e0b535643f04f042076fb444ff5 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Mon, 8 Oct 2018 14:46:23 +0300 Subject: [PATCH 083/602] Changed call to feedFieldDataIntoFsGrid to use the amr version. --- vlasiator.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/vlasiator.cpp b/vlasiator.cpp index 4a5eddce2..b58f31468 100644 --- a/vlasiator.cpp +++ b/vlasiator.cpp @@ -383,8 +383,8 @@ int main(int argn,char* args[]) { cout << endl; mpiGrid.write_vtk_file("mpiGrid.vtk"); } - recalculateLocalCellsCache(); + phiprof::stop("Init grid"); // Initialize data reduction operators. This should be done elsewhere in order to initialize @@ -486,7 +486,7 @@ int main(int argn,char* args[]) { phiprof::stop("Initial fsgrid coupling"); // Transfer initial field configuration into the FsGrids - feedFieldDataIntoFsGrid(mpiGrid,cells,CellParams::PERBX,perBGrid); + feedFieldDataIntoFsGridAmr(mpiGrid,cells,CellParams::PERBX,perBGrid); feedBgFieldsIntoFsGrid(mpiGrid,cells,BgBGrid); BgBGrid.updateGhostCells(); From 5bf0632709792b7f0fd5378f4f1e35bb5adf8837 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Thu, 11 Oct 2018 14:32:08 +0300 Subject: [PATCH 084/602] Made amr version the default of feedFieldDataIntoFsGrid --- fieldsolver/gridGlue.hpp | 68 ++++++++++++++++++++++------------------ 1 file changed, 38 insertions(+), 30 deletions(-) diff --git a/fieldsolver/gridGlue.hpp b/fieldsolver/gridGlue.hpp index 9308db888..86a896d52 100644 --- a/fieldsolver/gridGlue.hpp +++ b/fieldsolver/gridGlue.hpp @@ -94,37 +94,25 @@ int getNumberOfCellsOnMaxRefLvl(dccrg::Dccrg void feedFieldDataIntoFsGrid( - dccrg::Dccrg& mpiGrid, - const std::vector& cells, int index, - FsGrid< std::array, 2>& targetGrid) { +// template< unsigned int numFields > void feedFieldDataIntoFsGrid( +// dccrg::Dccrg& mpiGrid, +// const std::vector& cells, int index, +// FsGrid< std::array, 2>& targetGrid) { - targetGrid.setupForTransferIn(cells.size()); +// targetGrid.setupForTransferIn(cells.size()); - for(CellID i : cells) { - // TODO: This assumes that the field data are lying continuous in memory. - // Check definition of CellParams in common.h if unsure. - std::array* cellDataPointer = reinterpret_cast*>( - &(mpiGrid[i]->get_cell_parameters()[index])); - targetGrid.transferDataIn(i - 1, cellDataPointer); - } +// for(CellID i : cells) { +// // TODO: This assumes that the field data are lying continuous in memory. +// // Check definition of CellParams in common.h if unsure. +// std::array* cellDataPointer = reinterpret_cast*>( +// &(mpiGrid[i]->get_cell_parameters()[index])); +// targetGrid.transferDataIn(i - 1, cellDataPointer); +// } - targetGrid.finishTransfersIn(); -} +// targetGrid.finishTransfersIn(); +// } -/*! Transfer field data from DCCRG cellparams into the appropriate FsGrid structure - * \param mpiGrid The DCCRG grid carrying fieldparam data - * \param cells List of local cells - * \param index Index into the cellparams array from which to copy - * \param targetGrid Fieldsolver grid for these quantities - * - * The cellparams with indices from index to index+numFields are copied over, and - * have to be continuous in memory. - * - * This function assumes that proper grid coupling has been set up. - */ - -template< unsigned int numFields > void feedFieldDataIntoFsGridAmr( +template< unsigned int numFields > void feedFieldDataIntoFsGrid( dccrg::Dccrg& mpiGrid, const std::vector& cells, int cellParamsIndex, FsGrid< std::array, 2>& targetGrid) { @@ -158,6 +146,23 @@ template< unsigned int numFields > void feedFieldDataIntoFsGridAmr( * * This function assumes that proper grid coupling has been set up. */ +// template< unsigned int numFields > void getFieldDataFromFsGrid( +// FsGrid< std::array, 2>& sourceGrid, +// dccrg::Dccrg& mpiGrid, +// const std::vector& cells, int index) { + +// sourceGrid.setupForTransferOut(cells.size()); + +// for(CellID i : cells) { +// // TODO: This assumes that the field data are lying continuous in memory. +// // Check definition of CellParams in common.h if unsure. +// std::array* cellDataPointer = reinterpret_cast*>( +// &(mpiGrid[i]->get_cell_parameters()[index])); +// sourceGrid.transferDataOut(i - 1, cellDataPointer); +// } + +// sourceGrid.finishTransfersOut(); +// } template< unsigned int numFields > void getFieldDataFromFsGrid( FsGrid< std::array, 2>& sourceGrid, dccrg::Dccrg& mpiGrid, @@ -165,12 +170,15 @@ template< unsigned int numFields > void getFieldDataFromFsGrid( sourceGrid.setupForTransferOut(cells.size()); - for(CellID i : cells) { + for(CellID dccrgId : cells) { // TODO: This assumes that the field data are lying continuous in memory. // Check definition of CellParams in common.h if unsure. std::array* cellDataPointer = reinterpret_cast*>( - &(mpiGrid[i]->get_cell_parameters()[index])); - sourceGrid.transferDataOut(i - 1, cellDataPointer); + &(mpiGrid[dccrgId]->get_cell_parameters()[index])); + const auto fsgridIds = mapDccrgIdToFsGrid(mpiGrid, sourceGrid.getLocalSize(), dccrgId); + for (auto fsgridId : fsgridIds) { + sourceGrid.transferDataIn(fsgridId, cellDataPointer); + } } sourceGrid.finishTransfersOut(); From 36d5219e14f77bcaaf1c3c62dd97ac41a001618d Mon Sep 17 00:00:00 2001 From: tkoskela Date: Thu, 11 Oct 2018 14:32:37 +0300 Subject: [PATCH 085/602] Modified getFsGridMaxDt to work with refinement --- fieldsolver/gridGlue.cpp | 51 ++++++++++++++++++++++++++++++---------- 1 file changed, 39 insertions(+), 12 deletions(-) diff --git a/fieldsolver/gridGlue.cpp b/fieldsolver/gridGlue.cpp index f1d7e30f4..178151f8d 100644 --- a/fieldsolver/gridGlue.cpp +++ b/fieldsolver/gridGlue.cpp @@ -187,7 +187,6 @@ void getVolumeFieldsFromFsGrid(FsGrid< std::arrayget_cell_parameters(); // Calculate the number of fsgrid cells we need to average into the current dccrg cell - auto refLvl = mpiGrid.mapping.get_refinement_level(dccrgId); int nCells = pow(pow(2,mpiGrid.mapping.get_maximum_refinement_level() - mpiGrid.mapping.get_refinement_level(dccrgId)),3); // TODO: Could optimize here by adding a separate branch for nCells == 1 with direct assignment of the value @@ -375,6 +374,10 @@ void setupTechnicalFsGrid(dccrg::Dccrg& m const auto fsgridIds = mapDccrgIdToFsGrid(mpiGrid, technicalGrid.getLocalSize(), cells[i]); for (auto fsgridId : fsgridIds) { + // std::cout << "fsgridId: " << fsgridId << ", fsgrid Cell Coordinates:"; + // auto coords = technicalGrid.globalIDtoCellCoord(fsgridId); + // for (auto coord : coords) std::cout << " " << coord; + // std::cout << std::endl; technicalGrid.transferDataIn(fsgridId,&transferBuffer[i]); } } @@ -387,14 +390,22 @@ void getFsGridMaxDt(FsGrid< fsgrids::technical, 2>& technicalGrid, dccrg::Dccrg& mpiGrid, const std::vector& cells) { - technicalGrid.setupForTransferOut(cells.size()); + int nCells = getNumberOfCellsOnMaxRefLvl(mpiGrid, cells); + technicalGrid.setupForTransferOut(nCells); // Buffer to store contents of the grid std::vector transferBuffer(cells.size()); + std::vector transferBufferPointer; + int k = 0; for(int i=0; i< cells.size(); i++) { - fsgrids::technical* thisCellData = &transferBuffer[i]; - technicalGrid.transferDataOut(cells[i] - 1, thisCellData); + + transferBufferPointer.push_back(&transferBuffer[k]); + const auto fsgridIds = mapDccrgIdToFsGrid(mpiGrid, technicalGrid.getLocalSize(), cells[i]); + for (auto fsgridId : fsgridIds) { + fsgrids::technical* thisCellData = &transferBuffer[k++]; + technicalGrid.transferDataOut(fsgridId, thisCellData); + } } technicalGrid.finishTransfersOut(); @@ -402,9 +413,27 @@ void getFsGridMaxDt(FsGrid< fsgrids::technical, 2>& technicalGrid, // After the transfer is completed, stuff the recieved maxFDt into the cells. #pragma omp parallel for for(int i=0; i< cells.size(); i++) { - mpiGrid[cells[i]]->get_cell_parameters()[CellParams::MAXFDT] = transferBuffer[i].maxFsDt; - mpiGrid[cells[i]]->get_cell_parameters()[CellParams::FSGRID_RANK] = transferBuffer[i].fsGridRank; - mpiGrid[cells[i]]->get_cell_parameters()[CellParams::FSGRID_BOUNDARYTYPE] = transferBuffer[i].sysBoundaryFlag; + + int dccrgId = cells[i]; + auto cellParams = mpiGrid[dccrgId]->get_cell_parameters(); + + // Calculate the number of fsgrid cells we need to average into the current dccrg cell + int nCells = pow(pow(2,mpiGrid.mapping.get_maximum_refinement_level() - mpiGrid.mapping.get_refinement_level(dccrgId)),3); + + cellParams[CellParams::MAXFDT] = std::numeric_limits::max(); + //cellParams[CellParams::FSGRID_RANK] = 0; + //cellParams[CellParams::FSGRID_BOUNDARYTYPE] = 0; + + for (int iCell = 0; iCell < nCells; ++iCell) { + + fsgrids::technical* thisCellData = transferBufferPointer[i] + iCell; + + cellParams[CellParams::MAXFDT] = std::min(cellParams[CellParams::MAXFDT],thisCellData->maxFsDt); + + //TODO: Implement something for FSGRID_RANK and FSGRID_BOUNDARYTYPE + //cellParams[CellParams::FSGRID_RANK] = thisCellData->fsGridRank; + //cellParams[CellParams::FSGRID_BOUNDARYTYPE] = thisCellData->sysBoundaryFlag; + } } } @@ -445,17 +474,15 @@ std::vector mapDccrgIdToFsGrid(dccrg::Dccrg fsgridIDs; - // The indices we get from dccrg are directly coordinates at the finest refinement level. - // Therefore, they should match fsgrid coordinates exactly. for (auto cellCoord: allIndices) { fsgridIDs.push_back(cellCoord[0] + cellCoord[1] * fsgridDims[0] + cellCoord[2] * fsgridDims[1] * fsgridDims[0]); } From 0be370042253a3d4547da1a467407c4eaf98734b Mon Sep 17 00:00:00 2001 From: tkoskela Date: Thu, 11 Oct 2018 14:37:44 +0300 Subject: [PATCH 086/602] Moved grid refinement to initializeGrid function to initialize refined cells. Added lots of debugging. --- grid.cpp | 24 +++++++++++ parameters.cpp | 2 +- vlasiator.cpp | 66 +++++++++++++++++++----------- vlasovsolver/cpu_trans_map_amr.cpp | 2 +- vlasovsolver/vlasovmover.cpp | 17 ++++++-- 5 files changed, 80 insertions(+), 31 deletions(-) diff --git a/grid.cpp b/grid.cpp index 5fdf76e5f..cd4a73348 100644 --- a/grid.cpp +++ b/grid.cpp @@ -122,6 +122,30 @@ void initializeGrid( sysBoundaries.isBoundaryPeriodic(2)) .initialize(comm) .set_geometry(geom_params); + + if(true) { + std::array coords; + coords[0] = (P::xmax - P::xmin) / 2.0; + coords[1] = (P::ymax - P::ymin) / 2.0; + coords[2] = (P::zmax - P::zmin) / 2.0; + cout << "Trying to refine at " << coords[0] << ", " << coords[1] << ", " << coords[2] << endl; + CellID myCell = mpiGrid.get_existing_cell(coords); + cout << "Got cell ID " << myCell << endl; + cout << "Maximum refinement level is " << mpiGrid.mapping.get_maximum_refinement_level() << endl; + bool refineSuccess = mpiGrid.refine_completely_at(coords); + std::vector refinedCells = mpiGrid.stop_refining(); + std::cout << std::boolalpha <<"Refine result: " << refineSuccess << endl; + mpiGrid.balance_load(); + if(refineSuccess) { + cout << "Refined Cells are: "; + for (auto cellid : refinedCells) { + cout << cellid << " "; + } + cout << endl; + mpiGrid.write_vtk_file("mpiGrid.vtk"); + } + recalculateLocalCellsCache(); + } // Init velocity mesh on all cells initVelocityGridGeometry(mpiGrid); diff --git a/parameters.cpp b/parameters.cpp index fb1f40ae5..e9eb53095 100644 --- a/parameters.cpp +++ b/parameters.cpp @@ -137,7 +137,7 @@ Realf P::amrRefineLimit = 1.0; Realf P::amrCoarsenLimit = 0.5; string P::amrVelRefCriterion = ""; -int P::amrMaxSpatialRefLevel = 0; +int P::amrMaxSpatialRefLevel = 1; bool Parameters::addParameters(){ //the other default parameters we read through the add/get interface diff --git a/vlasiator.cpp b/vlasiator.cpp index b58f31468..e10c85575 100644 --- a/vlasiator.cpp +++ b/vlasiator.cpp @@ -94,6 +94,8 @@ void addTimedBarrier(string name){ bool computeNewTimeStep(dccrg::Dccrg& mpiGrid,Real &newDt, bool &isChanged) { + cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; + phiprof::start("compute-timestep"); //compute maximum time-step, this cannot be done at the first //step as the solvers compute the limits for each cell @@ -133,7 +135,7 @@ bool computeNewTimeStep(dccrg::Dccrg& mpi = blockParams[blockLID*BlockParams::N_VELOCITY_BLOCK_PARAMS+BlockParams::VYCRD] + (i+HALF)*blockParams[blockLID*BlockParams::N_VELOCITY_BLOCK_PARAMS+BlockParams::DVY] + EPS; - const Real Vz + const Real Vz = blockParams[blockLID*BlockParams::N_VELOCITY_BLOCK_PARAMS+BlockParams::VZCRD] + (i+HALF)*blockParams[blockLID*BlockParams::N_VELOCITY_BLOCK_PARAMS+BlockParams::DVZ] + EPS; @@ -171,6 +173,24 @@ bool computeNewTimeStep(dccrg::Dccrg& mpi creal meanVlasovCFL = 0.5*(P::vlasovSolverMaxCFL+ P::vlasovSolverMinCFL); creal meanFieldsCFL = 0.5*(P::fieldSolverMaxCFL+ P::fieldSolverMinCFL); Real subcycleDt; + + cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; + + cout << dtMaxGlobal[0]; + cout << " "; + cout << dtMaxGlobal[1]; + cout << " "; + cout << dtMaxGlobal[2]; + cout << " "; + cout << P::vlasovSolverMaxCFL; + cout << " "; + cout << P::vlasovSolverMinCFL; + cout << " "; + cout << P::fieldSolverMaxCFL; + cout << " "; + cout << P::fieldSolverMinCFL; + cout << endl; + //reduce dt if it is too high for any of the three propagators, or too low for all propagators if(( P::dt > dtMaxGlobal[0] * P::vlasovSolverMaxCFL || @@ -180,6 +200,8 @@ bool computeNewTimeStep(dccrg::Dccrg& mpi P::dt < dtMaxGlobal[1] * P::vlasovSolverMinCFL * P::maxSlAccelerationSubcycles && P::dt < dtMaxGlobal[2] * P::fieldSolverMinCFL * P::maxFieldSolverSubcycles ) ) { + + cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; //new dt computed isChanged=true; @@ -362,28 +384,6 @@ int main(int argn,char* args[]) { //dccrg::Dccrg mpiGrid; initializeGrid(argn,args,mpiGrid,sysBoundaries,*project); isSysBoundaryCondDynamic = sysBoundaries.isDynamic(); - - std::array coords; - coords[0] = (P::xmax - P::xmin) / 2.0; - coords[1] = (P::ymax - P::ymin) / 2.0; - coords[2] = (P::zmax - P::zmin) / 2.0; - cout << "Trying to refine at " << coords[0] << ", " << coords[1] << ", " << coords[2] << endl; - CellID myCell = mpiGrid.get_existing_cell(coords); - cout << "Got cell ID " << myCell << endl; - cout << "Maximum refinement level is " << mpiGrid.mapping.get_maximum_refinement_level() << endl; - bool refineSuccess = mpiGrid.refine_completely_at(coords); - std::vector refinedCells = mpiGrid.stop_refining(); - cout << "Result: " << refineSuccess << endl; - mpiGrid.balance_load(); - if(refineSuccess) { - cout << "Refined Cells are: "; - for (auto cellid : refinedCells) { - cout << cellid << " "; - } - cout << endl; - mpiGrid.write_vtk_file("mpiGrid.vtk"); - } - recalculateLocalCellsCache(); phiprof::stop("Init grid"); @@ -486,7 +486,7 @@ int main(int argn,char* args[]) { phiprof::stop("Initial fsgrid coupling"); // Transfer initial field configuration into the FsGrids - feedFieldDataIntoFsGridAmr(mpiGrid,cells,CellParams::PERBX,perBGrid); + feedFieldDataIntoFsGrid(mpiGrid,cells,CellParams::PERBX,perBGrid); feedBgFieldsIntoFsGrid(mpiGrid,cells,BgBGrid); BgBGrid.updateGhostCells(); @@ -498,6 +498,8 @@ int main(int argn,char* args[]) { feedMomentsIntoFsGrid(mpiGrid, cells, momentsGrid,false); feedMomentsIntoFsGrid(mpiGrid, cells, momentsDt2Grid,false); + cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; + phiprof::start("Init field propagator"); if ( initializeFieldPropagator( @@ -521,6 +523,8 @@ int main(int argn,char* args[]) { exit(1); } phiprof::stop("Init field propagator"); + + cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; // Initialize Poisson solver (if used) if (P::propagatePotential == true) { @@ -535,6 +539,8 @@ int main(int argn,char* args[]) { // Free up memory: readparameters.finalize(); + cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; + if (P::isRestart == false) { // Run Vlasov solver once with zero dt to initialize //per-cell dt limits. In restarts, we read the dt from file. @@ -569,6 +575,8 @@ int main(int argn,char* args[]) { // These should be done by initializeFieldPropagator() if the propagation is turned off. getVolumeFieldsFromFsGrid(volGrid, mpiGrid, cells); phiprof::stop("getVolumeFieldsFromFsGrid"); + + cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; // Save restart data if (P::writeInitialState) { @@ -608,6 +616,8 @@ int main(int argn,char* args[]) { phiprof::stop("write-initial-state"); } + + cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; if (P::isRestart == false) { //compute new dt @@ -621,6 +631,8 @@ int main(int argn,char* args[]) { phiprof::stop("compute-dt"); } + cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; + if (!P::isRestart) { //go forward by dt/2 in V, initializes leapfrog split. In restarts the //the distribution function is already propagated forward in time by dt/2 @@ -997,11 +1009,13 @@ int main(int argn,char* args[]) { } phiprof::start("Spatial-space"); + cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; if( P::propagateVlasovTranslation) { calculateSpatialTranslation(mpiGrid,P::dt); } else { calculateSpatialTranslation(mpiGrid,0.0); } + cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; phiprof::stop("Spatial-space",computedCells,"Cells"); phiprof::start("Compute interp moments"); @@ -1018,7 +1032,7 @@ int main(int argn,char* args[]) { ); phiprof::stop("Compute interp moments"); - // Apply boundary conditions + // Apply boundary conditions if (P::propagateVlasovTranslation || P::propagateVlasovAcceleration ) { phiprof::start("Update system boundaries (Vlasov post-translation)"); sysBoundaries.applySysBoundaryVlasovConditions(mpiGrid, P::t+0.5*P::dt); @@ -1031,6 +1045,8 @@ int main(int argn,char* args[]) { // moments for t + dt are computed (field uses t and t+0.5dt) if (P::propagateField) { phiprof::start("Propagate Fields"); + + cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; phiprof::start("fsgrid-coupling-in"); // Copy moments over into the fsgrid. diff --git a/vlasovsolver/cpu_trans_map_amr.cpp b/vlasovsolver/cpu_trans_map_amr.cpp index ece287477..841498b84 100644 --- a/vlasovsolver/cpu_trans_map_amr.cpp +++ b/vlasovsolver/cpu_trans_map_amr.cpp @@ -721,7 +721,7 @@ bool trans_map_1d_amr(const dccrg::Dccrg& uint ibeg = 0; uint iend = 0; std::cout << "I have created " << pencils.N << " pencils along dimension " << dimension << ":\n"; - std::cout << "(x, y): indices " << std::endl; + std::cout << "mpirank (x, y): indices " << std::endl; std::cout << "-----------------------------------------------------------------" << std::endl; for (uint i = 0; i < pencils.N; i++) { iend += pencils.lengthOfPencils[i]; diff --git a/vlasovsolver/vlasovmover.cpp b/vlasovsolver/vlasovmover.cpp index 09269886a..53f9efdfc 100644 --- a/vlasovsolver/vlasovmover.cpp +++ b/vlasovsolver/vlasovmover.cpp @@ -136,6 +136,7 @@ void calculateSpatialTranslation( update_remote_mapping_contribution(mpiGrid, 1,-1,popID); phiprof::stop("update_remote-y"); } + bailout(true, "", __FILE__, __LINE__); } /*! @@ -165,12 +166,16 @@ void calculateSpatialTranslation( // If dt=0 we are either initializing or distribution functions are not translated. // In both cases go to the end of this function and calculate the moments. if (dt == 0.0) goto momentCalculation; + + std::cout << "I am at line " << __LINE__ << " of " << __FILE__ << std::endl; - phiprof::start("compute_cell_lists"); - remoteTargetCellsx = mpiGrid.get_remote_cells_on_process_boundary(VLASOV_SOLVER_TARGET_X_NEIGHBORHOOD_ID); - remoteTargetCellsy = mpiGrid.get_remote_cells_on_process_boundary(VLASOV_SOLVER_TARGET_Y_NEIGHBORHOOD_ID); - remoteTargetCellsz = mpiGrid.get_remote_cells_on_process_boundary(VLASOV_SOLVER_TARGET_Z_NEIGHBORHOOD_ID); + phiprof::start("compute_cell_lists"); + remoteTargetCellsx = mpiGrid.get_remote_cells_on_process_boundary(VLASOV_SOLVER_TARGET_X_NEIGHBORHOOD_ID); + remoteTargetCellsy = mpiGrid.get_remote_cells_on_process_boundary(VLASOV_SOLVER_TARGET_Y_NEIGHBORHOOD_ID); + remoteTargetCellsz = mpiGrid.get_remote_cells_on_process_boundary(VLASOV_SOLVER_TARGET_Z_NEIGHBORHOOD_ID); + std::cout << "I am at line " << __LINE__ << " of " << __FILE__ << std::endl; + // Figure out which spatial cells are translated, // result independent of particle species. for (size_t c=0; c Date: Thu, 11 Oct 2018 15:56:22 +0300 Subject: [PATCH 087/602] Fixed bug in the pencil builder where the 4th pencil of a refined cell would exit prematurely. --- vlasovsolver/cpu_trans_map_amr.cpp | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/vlasovsolver/cpu_trans_map_amr.cpp b/vlasovsolver/cpu_trans_map_amr.cpp index 841498b84..623aaab2f 100644 --- a/vlasovsolver/cpu_trans_map_amr.cpp +++ b/vlasovsolver/cpu_trans_map_amr.cpp @@ -256,7 +256,7 @@ setOfPencils buildPencilsWithNeighbors( const dccrg::Dccrg& cout << "Returning because of no cells" << endl; return false; } - + + cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; + // Vector with all cell ids vector allCells(localPropagatedCells); allCells.insert(allCells.end(), remoteTargetCells.begin(), remoteTargetCells.end()); @@ -688,6 +693,8 @@ bool trans_map_1d_amr(const dccrg::Dccrg& } // **************************************************************************** + cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; + // compute pencils => set of pencils (shared datastructure) // std::cout << "LocalPropagatedCells: "; @@ -748,7 +755,9 @@ bool trans_map_1d_amr(const dccrg::Dccrg& // Only one set is created for now but we retain support for multiple sets pencilSets.push_back(pencils); // **************************************************************************** - + + cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; + const uint8_t VMESH_REFLEVEL = 0; // Get a pointer to the velocity mesh of the first spatial cell @@ -778,6 +787,8 @@ bool trans_map_1d_amr(const dccrg::Dccrg& unionOfBlocks.push_back(blockGID); } // **************************************************************************** + + cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; int t1 = phiprof::initializeTimer("mappingAndStore"); From 7a7ec09d48e7deaca2db63ae2bd5cffcd0ee9dc7 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Thu, 11 Oct 2018 16:02:36 +0300 Subject: [PATCH 088/602] Changed cpu_trans_map call in z-dimension to cpu_trans_map_amr. Added bunch of debugging statements to be removed later. --- vlasovsolver/vlasovmover.cpp | 23 ++++++++++++++++++++--- 1 file changed, 20 insertions(+), 3 deletions(-) diff --git a/vlasovsolver/vlasovmover.cpp b/vlasovsolver/vlasovmover.cpp index 53f9efdfc..0b938bea0 100644 --- a/vlasovsolver/vlasovmover.cpp +++ b/vlasovsolver/vlasovmover.cpp @@ -77,6 +77,8 @@ void calculateSpatialTranslation( int trans_timer; bool localTargetGridGenerated = false; + std::cout << "I am at line " << __LINE__ << " of " << __FILE__ << std::endl; + // ------------- SLICE - map dist function in Z --------------- // if(P::zcells_ini > 1 ){ trans_timer=phiprof::initializeTimer("transfer-stencil-data-z","MPI"); @@ -86,7 +88,7 @@ void calculateSpatialTranslation( phiprof::stop(trans_timer); phiprof::start("compute-mapping-z"); - trans_map_1d(mpiGrid,local_propagated_cells, remoteTargetCellsz, 2, dt,popID); // map along z// + trans_map_1d_amr(mpiGrid,local_propagated_cells, remoteTargetCellsz, 2, dt,popID); // map along z// phiprof::stop("compute-mapping-z"); trans_timer=phiprof::initializeTimer("update_remote-z","MPI"); @@ -100,16 +102,23 @@ void calculateSpatialTranslation( // ------------- SLICE - map dist function in X --------------- // if(P::xcells_ini > 1 ){ + + cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; + trans_timer=phiprof::initializeTimer("transfer-stencil-data-x","MPI"); phiprof::start(trans_timer); - SpatialCell::set_mpi_transfer_type(Transfer::VEL_BLOCK_DATA); + SpatialCell::set_mpi_transfer_type(Transfer::VEL_BLOCK_DATA); mpiGrid.update_copies_of_remote_neighbors(VLASOV_SOLVER_X_NEIGHBORHOOD_ID); phiprof::stop(trans_timer); + cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; + phiprof::start("compute-mapping-x"); bool foo; foo = trans_map_1d_amr(mpiGrid,local_propagated_cells, remoteTargetCellsx, 0,dt,popID); // map along x// phiprof::stop("compute-mapping-x"); + + cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; trans_timer=phiprof::initializeTimer("update_remote-x","MPI"); phiprof::start("update_remote-x"); @@ -137,6 +146,7 @@ void calculateSpatialTranslation( phiprof::stop("update_remote-y"); } bailout(true, "", __FILE__, __LINE__); + throw; } /*! @@ -202,22 +212,29 @@ void calculateSpatialTranslation( string profName = "translate "+getObjectWrapper().particleSpecies[popID].name; phiprof::start(profName); SpatialCell::setCommunicatedSpecies(popID); + std::cout << "I am at line " << __LINE__ << " of " << __FILE__ << std::endl; calculateSpatialTranslation(mpiGrid,localCells,local_propagated_cells, local_target_cells,remoteTargetCellsx,remoteTargetCellsy, remoteTargetCellsz,dt,popID); phiprof::stop(profName); } + std::cout << "I am at line " << __LINE__ << " of " << __FILE__ << std::endl; + // Mapping complete, update moments and maximum dt limits // momentCalculation: calculateMoments_R_maxdt(mpiGrid,localCells,true); + std::cout << "I am at line " << __LINE__ << " of " << __FILE__ << std::endl; + Real minDT = 1e300; for (size_t c=0; cparameters[CellParams::MAXRDT] < minDT) minDT = mpiGrid[localCells[c]]->parameters[CellParams::MAXRDT]; } - phiprof::stop("semilag-trans"); + + std::cout << "I am at line " << __LINE__ << " of " << __FILE__ << std::endl; + phiprof::stop("semilag-trans"); } /* From 7007c912938490bf72675fe9b965fd8b318e1377 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Fri, 12 Oct 2018 14:15:47 +0300 Subject: [PATCH 089/602] Added debugging info from neighbors --- vlasovsolver/cpu_trans_map_amr.cpp | 40 ++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) diff --git a/vlasovsolver/cpu_trans_map_amr.cpp b/vlasovsolver/cpu_trans_map_amr.cpp index 623aaab2f..93dfd3812 100644 --- a/vlasovsolver/cpu_trans_map_amr.cpp +++ b/vlasovsolver/cpu_trans_map_amr.cpp @@ -740,6 +740,46 @@ bool trans_map_1d_amr(const dccrg::Dccrg& std::cout << std::endl; } + CellID idX = 114; + const auto* neighborsX = mpiGrid.get_neighbors_of(idX, VLASOV_SOLVER_X_NEIGHBORHOOD_ID); + if (neighborsX != NULL) { + std::cout << "Neighbors of cell " << idX << " in x dimension" << std::endl; + for (auto neighbor : *neighborsX) { + std::cout << neighbor.first << ", "; + for (int n = 0; n < 4; ++n) { + std::cout << neighbor.second[n] << " "; + } + std::cout << std::endl; + } + } + + CellID idY = 114; + const auto* neighborsY = mpiGrid.get_neighbors_of(idY, VLASOV_SOLVER_Y_NEIGHBORHOOD_ID); + if (neighborsY != NULL) { + std::cout << "Neighbors of cell " << idY << " in y dimension" << std::endl; + for (auto neighbor : *neighborsY) { + std::cout << neighbor.first << ", "; + for (int n = 0; n < 4; ++n) { + std::cout << neighbor.second[n] << " "; + } + std::cout << std::endl; + } + } + + CellID idZ = 114; + const auto* neighborsZ = mpiGrid.get_neighbors_of(idZ, VLASOV_SOLVER_Z_NEIGHBORHOOD_ID); + if (neighborsZ != NULL) { + std::cout << "Neighbors of cell " << idZ << " in z dimension" << std::endl; + for (auto neighbor : *neighborsZ) { + std::cout << neighbor.first << ", "; + for (int n = 0; n < 4; ++n) { + std::cout << neighbor.second[n] << " "; + } + std::cout << std::endl; + } + } + + // CellID id = 56; // const vector* neighbors = mpiGrid.get_neighbors_of(id, VLASOV_SOLVER_X_NEIGHBORHOOD_ID); // if (neighbors != NULL) { From 10362b6c187b33bfe8a88fe8ec787f25808aa5e4 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Fri, 12 Oct 2018 15:13:03 +0300 Subject: [PATCH 090/602] New implementations for copySpatialSource/TargetCells functions that use the get_neighbors_of grid function. --- vlasovsolver/cpu_trans_map_amr.cpp | 139 ++++++++++++++++------------- 1 file changed, 79 insertions(+), 60 deletions(-) diff --git a/vlasovsolver/cpu_trans_map_amr.cpp b/vlasovsolver/cpu_trans_map_amr.cpp index 93dfd3812..238f541e0 100644 --- a/vlasovsolver/cpu_trans_map_amr.cpp +++ b/vlasovsolver/cpu_trans_map_amr.cpp @@ -26,7 +26,7 @@ using namespace spatial_cell; #define i_trans_ps_blockv_pencil(planeVectorIndex, planeIndex, blockIndex, lengthOfPencil) ( (blockIndex) + VLASOV_STENCIL_WIDTH + ( (planeVectorIndex) + (planeIndex) * VEC_PER_PLANE ) * ( lengthOfPencil + 2 * VLASOV_STENCIL_WIDTH) ) -void compute_spatial_source_cells_for_pencil(const dccrg::Dccrg& mpiGrid, +void computeSpatialSourceCellsForPencil(const dccrg::Dccrg& mpiGrid, setOfPencils pencils, const uint iPencil, const uint dimension, @@ -36,26 +36,36 @@ void compute_spatial_source_cells_for_pencil(const dccrg::Dccrg ids = pencils.getIds(iPencil); - for (int iCell = -VLASOV_STENCIL_WIDTH; iCell < L + VLASOV_STENCIL_WIDTH; iCell++) { - CellID cellID = ids[min(max(iCell, 0), L - 1)]; - - int i = 0; - if(iCell <= 0) i = iCell; - if(iCell >= L) i = iCell - (L - 1); - - switch (dimension) { - case 0: - sourceCells[iCell + VLASOV_STENCIL_WIDTH] = get_spatial_neighbor_pointer(mpiGrid, cellID, false, i, 0, 0); - break; - case 1: - sourceCells[iCell + VLASOV_STENCIL_WIDTH] = get_spatial_neighbor_pointer(mpiGrid, cellID, false, 0, i, 0); - break; - case 2: - sourceCells[iCell + VLASOV_STENCIL_WIDTH] = get_spatial_neighbor_pointer(mpiGrid, cellID, false, 0, 0, i); - break; - } + int neighborhood = 0; + switch (dimension) { + case 0: + neighborhood = VLASOV_SOLVER_TARGET_X_NEIGHBORHOOD_ID; + break; + case 1: + neighborhood = VLASOV_SOLVER_TARGET_Y_NEIGHBORHOOD_ID; + break; + case 2: + neighborhood = VLASOV_SOLVER_TARGET_Z_NEIGHBORHOOD_ID; + break; + } + + // Get pointers for each cell id of the pencil + for (int i = VLASOV_STENCIL_WIDTH; i < L + VLASOV_STENCIL_WIDTH; ++i) { + sourceCells[i] = mpiGrid[ids[i]]; } + // Insert pointers for neighbors of ids.front() and ids.back() + auto* frontNbrPairs = mpiGrid.get_neighbors_of(ids.front(), neighborhood); + auto* backNbrPairs = mpiGrid.get_neighbors_of(ids.back(), neighborhood); + + for (int i = 0; i < VLASOV_STENCIL_WIDTH; ++i) { + // Add VLASOV_STENCIL_WIDTH neighbors from the front of the pencil to the beginning of the array + sourceCells[i] = mpiGrid[(*frontNbrPairs)[i].first]; + + // Add VLASOV_STENCIL_WIDTH neighbors from the back of the pencil to the end of the array + sourceCells[L + i] = mpiGrid[(*backNbrPairs)[VLASOV_STENCIL_WIDTH + 1 + i].first]; + } + SpatialCell* last_good_cell = mpiGrid[ids.front()]; /*loop to neative side and replace all invalid cells with the closest good cell*/ for(int i = -1;i>=-VLASOV_STENCIL_WIDTH;i--){ @@ -76,36 +86,45 @@ void compute_spatial_source_cells_for_pencil(const dccrg::Dccrg& mpiGrid, - setOfPencils& pencils, - const uint dimension, - SpatialCell **targetCells){ - +void computeSpatialTargetCellsForPencils(const dccrg::Dccrg& mpiGrid, + setOfPencils& pencils, + const uint dimension, + SpatialCell **targetCells){ + uint GID = 0; + // Loop over pencils for(uint iPencil = 0; iPencil < pencils.N; iPencil++){ + // L = length of the pencil iPencil int L = pencils.lengthOfPencils[iPencil]; - vector ids = pencils.getIds(iPencil); - for (int iCell = -1; iCell <= L; iCell++) { - CellID cellID = ids[min(max(iCell,0),L - 1)]; - - int i = 0; - if(iCell == -1) i = -1; - if(iCell == L) i = 1; - - switch (dimension) { - case 0: - targetCells[GID + iCell + 1] = get_spatial_neighbor_pointer(mpiGrid, cellID, false, i, 0, 0); - break; - case 1: - targetCells[GID + iCell + 1] = get_spatial_neighbor_pointer(mpiGrid, cellID, false, 0, i, 0); - break; - case 2: - targetCells[GID + iCell + 1] = get_spatial_neighbor_pointer(mpiGrid, cellID, false, 0, 0, i); - break; - } + + int neighborhood = 0; + switch (dimension) { + case 0: + neighborhood = VLASOV_SOLVER_TARGET_X_NEIGHBORHOOD_ID; + break; + case 1: + neighborhood = VLASOV_SOLVER_TARGET_Y_NEIGHBORHOOD_ID; + break; + case 2: + neighborhood = VLASOV_SOLVER_TARGET_Z_NEIGHBORHOOD_ID; + break; + } + + // Get pointers for each cell id of the pencil + for (int i = 1; i < L + 1; ++i) { + targetCells[GID + i] = mpiGrid[ids[i]]; } + + // Insert pointers for neighbors of ids.front() and ids.back() + auto frontNbrPairs = mpiGrid.get_neighbors_of(ids.front(), neighborhood); + auto backNbrPairs = mpiGrid.get_neighbors_of(ids.back(), neighborhood); + + targetCells[GID] = mpiGrid[frontNbrPairs->back().first]; + targetCells[GID + L + 1] = mpiGrid[backNbrPairs->front().first]; + + // Incerment global id by L + 2 ghost cells. GID += (L + 2); } } @@ -778,17 +797,6 @@ bool trans_map_1d_amr(const dccrg::Dccrg& std::cout << std::endl; } } - - - // CellID id = 56; - // const vector* neighbors = mpiGrid.get_neighbors_of(id, VLASOV_SOLVER_X_NEIGHBORHOOD_ID); - // if (neighbors != NULL) { - // std::cout << "Neighbors of cell " << id << std::endl; - // for (auto neighbor : *neighbors) { - // std::cout << neighbor << std::endl; - // } - // } - } // Add the final set of pencils to the pencilSets - vector. @@ -854,7 +862,9 @@ bool trans_map_1d_amr(const dccrg::Dccrg& uint8_t vRefLevel; vmesh.getIndices(blockGID,vRefLevel, block_indices[0], block_indices[1], block_indices[2]); - + + cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; + // Loop over sets of pencils // This loop only has one iteration for now for ( auto pencils: pencilSets ) { @@ -864,11 +874,15 @@ bool trans_map_1d_amr(const dccrg::Dccrg& // Add padding by 2 for each pencil Vec targetVecData[(pencils.sumOfLengths + 2 * pencils.N) * WID3 / VECL]; + cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; + // Initialize targetvecdata to 0 for( uint i = 0; i < (pencils.sumOfLengths + 2 * pencils.N) * WID3 / VECL; i++ ) { targetVecData[i] = Vec(0.0); } + cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; + // TODO: There's probably a smarter way to keep track of where we are writing // in the target data array. uint targetDataIndex = 0; @@ -876,8 +890,12 @@ bool trans_map_1d_amr(const dccrg::Dccrg& // Compute spatial neighbors for target cells. // For targets we need the local cells, plus a padding of 1 cell at both ends std::vector targetCells(pencils.sumOfLengths + pencils.N * 2 ); + + cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; - compute_spatial_target_cells_for_pencils(mpiGrid, pencils, dimension, targetCells.data()); + computeSpatialTargetCellsForPencils(mpiGrid, pencils, dimension, targetCells.data()); + + cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; // Loop over pencils uint totalTargetLength = 0; @@ -891,9 +909,8 @@ bool trans_map_1d_amr(const dccrg::Dccrg& // Compute spatial neighbors for the source cells of the pencil. In // source cells we have a wider stencil and take into account boundaries. std::vector sourceCells(sourceLength); - compute_spatial_source_cells_for_pencil(mpiGrid, pencils, pencili, dimension, sourceCells.data()); - - + computeSpatialSourceCellsForPencil(mpiGrid, pencils, pencili, dimension, sourceCells.data()); + Vec dz[sourceCells.size()]; uint i = 0; for(auto neighbor: sourceCells) { @@ -956,6 +973,8 @@ bool trans_map_1d_amr(const dccrg::Dccrg& } } } + + cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; // store_data(target_data => targetCells) :Aggregate data for blockid to original location // Loop over pencils again @@ -1032,6 +1051,6 @@ bool trans_map_1d_amr(const dccrg::Dccrg& } } - //cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; + cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; return true; } From 4fcecbe21d7f359ccfe1edfb7f3866ad9a8365d9 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Tue, 16 Oct 2018 15:38:55 +0300 Subject: [PATCH 091/602] Fixed bug in getIds that I swear I fixed before. It returned one index too many. --- vlasovsolver/cpu_trans_map_amr.hpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vlasovsolver/cpu_trans_map_amr.hpp b/vlasovsolver/cpu_trans_map_amr.hpp index e27e115c1..a3933b6a4 100644 --- a/vlasovsolver/cpu_trans_map_amr.hpp +++ b/vlasovsolver/cpu_trans_map_amr.hpp @@ -67,7 +67,7 @@ struct setOfPencils { } CellID iend = ibeg + lengthOfPencils[pencilId]; - for (uint i = ibeg; i <= iend; i++) { + for (uint i = ibeg; i < iend; i++) { idsOut.push_back(ids[i]); } From 29af3aea8ae481b2fffd96722616d21cc1ba2d1d Mon Sep 17 00:00:00 2001 From: tkoskela Date: Tue, 16 Oct 2018 15:39:27 +0300 Subject: [PATCH 092/602] Updated update_remote_mapping_contribution to use mpiGrid.get_neighbors_of instead of Sebastian's local function. --- vlasovsolver/cpu_trans_map.cpp | 66 ++++++++++++++++++++++++++-------- 1 file changed, 52 insertions(+), 14 deletions(-) diff --git a/vlasovsolver/cpu_trans_map.cpp b/vlasovsolver/cpu_trans_map.cpp index 22134d78a..73ac6ce2c 100644 --- a/vlasovsolver/cpu_trans_map.cpp +++ b/vlasovsolver/cpu_trans_map.cpp @@ -648,6 +648,8 @@ void update_remote_mapping_contribution( vector receive_cells; vector send_cells; vector receiveBuffers; + + // std::cout << "I am at line " << __LINE__ << " of " << __FILE__ << std::endl; //normalize if(direction > 0) direction = 1; @@ -659,31 +661,61 @@ void update_remote_mapping_contribution( ccell->neighbor_number_of_blocks = 0; } + //std::cout << "I am at line " << __LINE__ << " of " << __FILE__ << std::endl; + //TODO: prepare arrays, make parallel by avoidin push_back and by checking also for other stuff - for (size_t c=0; cneighbor_block_data = ccell->get_data(popID); ccell->neighbor_number_of_blocks = 0; CellID p_ngbr,m_ngbr; + // switch (dimension) { + // case 0: + // //p_ngbr is target, if in boundaries then it is not updated + // p_ngbr=get_spatial_neighbor(mpiGrid, local_cells[c], false, direction, 0, 0); + // //m_ngbr is source, first boundary layer is propagated so that it flows into system + // m_ngbr=get_spatial_neighbor(mpiGrid, local_cells[c], true, -direction, 0, 0); + // break; + // case 1: + // //p_ngbr is target, if in boundaries then it is not update + // p_ngbr=get_spatial_neighbor(mpiGrid, local_cells[c], false, 0, direction, 0); + // //m_ngbr is source, first boundary layer is propagated so that it flows into system + // m_ngbr=get_spatial_neighbor(mpiGrid, local_cells[c], true, 0, -direction, 0); + // break; + // case 2: + // //p_ngbr is target, if in boundaries then it is not update + // p_ngbr=get_spatial_neighbor(mpiGrid, local_cells[c], false, 0, 0, direction); + // //m_ngbr is source, first boundary layer is propagated so that it flows into system + // m_ngbr=get_spatial_neighbor(mpiGrid, local_cells[c], true, 0, 0, -direction); + // break; + // default: + // cerr << "Dimension wrong at (impossible!) "<< __FILE__ <<":" << __LINE__<front().first; + p_ngbr = NbrPairVector->back().first; + + //std::cout << "I am at line " << __LINE__ << " of " << __FILE__ << std::endl; + //internal cell, not much to do if (mpiGrid.is_local(p_ngbr) && mpiGrid.is_local(m_ngbr)) continue; @@ -715,7 +747,9 @@ void update_remote_mapping_contribution( receiveBuffers.push_back(mcell->neighbor_block_data); } } - + + //std::cout << "I am at line " << __LINE__ << " of " << __FILE__ << std::endl; + // Do communication SpatialCell::setCommunicatedSpecies(popID); SpatialCell::set_mpi_transfer_type(Transfer::NEIGHBOR_VEL_BLOCK_DATA); @@ -733,6 +767,8 @@ void update_remote_mapping_contribution( if(direction < 0) mpiGrid.update_copies_of_remote_neighbors(SHIFT_M_Z_NEIGHBORHOOD_ID); break; } + + //std::cout << "I am at line " << __LINE__ << " of " << __FILE__ << std::endl; #pragma omp parallel { @@ -762,7 +798,9 @@ void update_remote_mapping_contribution( } } } - + + //std::cout << "I am at line " << __LINE__ << " of " << __FILE__ << std::endl; + //and finally free temporary receive buffer for (size_t c=0; c < receiveBuffers.size(); ++c) { aligned_free(receiveBuffers[c]); From b1437828962693e84843ebde44204326eb353abb Mon Sep 17 00:00:00 2001 From: tkoskela Date: Tue, 16 Oct 2018 15:40:40 +0300 Subject: [PATCH 093/602] Fixed neighborhood ids in calculateSourceCells. Cleaups elsewhere. --- vlasovsolver/cpu_trans_map_amr.cpp | 113 ++++++++++++++++++----------- 1 file changed, 70 insertions(+), 43 deletions(-) diff --git a/vlasovsolver/cpu_trans_map_amr.cpp b/vlasovsolver/cpu_trans_map_amr.cpp index 238f541e0..d53ed3797 100644 --- a/vlasovsolver/cpu_trans_map_amr.cpp +++ b/vlasovsolver/cpu_trans_map_amr.cpp @@ -39,50 +39,58 @@ void computeSpatialSourceCellsForPencil(const dccrg::Dccrg=-VLASOV_STENCIL_WIDTH;i--){ - if(sourceCells[i + VLASOV_STENCIL_WIDTH] == NULL) - sourceCells[i + VLASOV_STENCIL_WIDTH] = last_good_cell; - else - last_good_cell = sourceCells[i + VLASOV_STENCIL_WIDTH]; + sourceCells[L + VLASOV_STENCIL_WIDTH + i] = mpiGrid[(*backNbrPairs)[VLASOV_STENCIL_WIDTH + i].first]; } - last_good_cell = mpiGrid[ids.back()]; - /*loop to positive side and replace all invalid cells with the closest good cell*/ - for(int i = L + 1; i <= L + VLASOV_STENCIL_WIDTH; i++){ - if(sourceCells[i + VLASOV_STENCIL_WIDTH] == NULL) - sourceCells[i + VLASOV_STENCIL_WIDTH] = last_good_cell; - else - last_good_cell = sourceCells[i + VLASOV_STENCIL_WIDTH]; - } + // std::cout << std::endl; + + // SpatialCell* last_good_cell = mpiGrid[ids.front()]; + // /*loop to neative side and replace all invalid cells with the closest good cell*/ + // for(int i = -1;i>=-VLASOV_STENCIL_WIDTH;i--){ + // if(sourceCells[i + VLASOV_STENCIL_WIDTH] == NULL) + // sourceCells[i + VLASOV_STENCIL_WIDTH] = last_good_cell; + // else + // last_good_cell = sourceCells[i + VLASOV_STENCIL_WIDTH]; + // } + // last_good_cell = mpiGrid[ids.back()]; + // /*loop to positive side and replace all invalid cells with the closest good cell*/ + // for(int i = L + 1; i <= L + VLASOV_STENCIL_WIDTH; i++){ + // if(sourceCells[i + VLASOV_STENCIL_WIDTH] == NULL) + // sourceCells[i + VLASOV_STENCIL_WIDTH] = last_good_cell; + // else + // last_good_cell = sourceCells[i + VLASOV_STENCIL_WIDTH]; + // } } /*compute spatial target neighbors for pencils of size N. No boundary cells are included*/ @@ -111,19 +119,27 @@ void computeSpatialTargetCellsForPencils(const dccrg::Dccrgback().first]; - targetCells[GID + L + 1] = mpiGrid[backNbrPairs->front().first]; - + // std::cout << "Ghost cells: "; + // std::cout << frontNbrPairs->front().first << " "; + // std::cout << backNbrPairs->back().first << std::endl; + targetCells[GID] = mpiGrid[frontNbrPairs->front().first]; + targetCells[GID + L + 1] = mpiGrid[backNbrPairs->back().first]; + // Incerment global id by L + 2 ghost cells. GID += (L + 2); } @@ -643,7 +659,8 @@ bool trans_map_1d_amr(const dccrg::Dccrg& const uint dimension, const Realv dt, const uint popID) { - + + const bool printLines = false; Realv dvz,vz_min; uint cell_indices_to_id[3]; /*< used when computing id of target cell in block*/ unsigned char cellid_transpose[WID3]; /*< defines the transpose for the solver internal (transposed) id: i + j*WID + k*WID2 to actual one*/ @@ -654,7 +671,7 @@ bool trans_map_1d_amr(const dccrg::Dccrg& return false; } - cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; + //cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; // Vector with all cell ids vector allCells(localPropagatedCells); @@ -712,7 +729,7 @@ bool trans_map_1d_amr(const dccrg::Dccrg& } // **************************************************************************** - cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; + //cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; // compute pencils => set of pencils (shared datastructure) @@ -743,7 +760,7 @@ bool trans_map_1d_amr(const dccrg::Dccrg& } // Print out ids of pencils (if needed for debugging) - if (true) { + if (false) { uint ibeg = 0; uint iend = 0; std::cout << "I have created " << pencils.N << " pencils along dimension " << dimension << ":\n"; @@ -804,7 +821,7 @@ bool trans_map_1d_amr(const dccrg::Dccrg& pencilSets.push_back(pencils); // **************************************************************************** - cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; + if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; const uint8_t VMESH_REFLEVEL = 0; @@ -836,7 +853,7 @@ bool trans_map_1d_amr(const dccrg::Dccrg& } // **************************************************************************** - cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; + if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; int t1 = phiprof::initializeTimer("mappingAndStore"); @@ -863,7 +880,7 @@ bool trans_map_1d_amr(const dccrg::Dccrg& vmesh.getIndices(blockGID,vRefLevel, block_indices[0], block_indices[1], block_indices[2]); - cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; + if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; // Loop over sets of pencils // This loop only has one iteration for now @@ -874,14 +891,14 @@ bool trans_map_1d_amr(const dccrg::Dccrg& // Add padding by 2 for each pencil Vec targetVecData[(pencils.sumOfLengths + 2 * pencils.N) * WID3 / VECL]; - cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; + if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; // Initialize targetvecdata to 0 for( uint i = 0; i < (pencils.sumOfLengths + 2 * pencils.N) * WID3 / VECL; i++ ) { targetVecData[i] = Vec(0.0); } - cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; + if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; // TODO: There's probably a smarter way to keep track of where we are writing // in the target data array. @@ -891,11 +908,11 @@ bool trans_map_1d_amr(const dccrg::Dccrg& // For targets we need the local cells, plus a padding of 1 cell at both ends std::vector targetCells(pencils.sumOfLengths + pencils.N * 2 ); - cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; + if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; computeSpatialTargetCellsForPencils(mpiGrid, pencils, dimension, targetCells.data()); - cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; + if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; // Loop over pencils uint totalTargetLength = 0; @@ -910,6 +927,8 @@ bool trans_map_1d_amr(const dccrg::Dccrg& // source cells we have a wider stencil and take into account boundaries. std::vector sourceCells(sourceLength); computeSpatialSourceCellsForPencil(mpiGrid, pencils, pencili, dimension, sourceCells.data()); + + if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; Vec dz[sourceCells.size()]; uint i = 0; @@ -927,19 +946,27 @@ bool trans_map_1d_amr(const dccrg::Dccrg& } i++; } + + if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; // Allocate source data: sourcedata sourcedata) / (proper xy reconstruction in future) copy_trans_block_data_amr(sourceCells.data(), blockGID, L, sourceVecData, - cellid_transpose, popID); + cellid_transpose, popID); + + if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; // Dz and sourceVecData are both padded by VLASOV_STENCIL_WIDTH // Dz has 1 value/cell, sourceVecData has WID3 values/cell propagatePencil(dz, sourceVecData, dimension, blockGID, dt, vmesh, L, debugflag); + if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; + // sourcedata => targetdata[this pencil]) for (uint i = 0; i < targetLength; i++) { for (uint k=0; k& } } - cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; + if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; // store_data(target_data => targetCells) :Aggregate data for blockid to original location // Loop over pencils again @@ -1051,6 +1078,6 @@ bool trans_map_1d_amr(const dccrg::Dccrg& } } - cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; + if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; return true; } From 255b7640552a9a2fc686691311a76b2f073cdc20 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Tue, 16 Oct 2018 15:41:37 +0300 Subject: [PATCH 094/602] Cleaned up writes --- vlasiator.cpp | 61 +++++++++++++++++++++++++++++++-------------------- 1 file changed, 37 insertions(+), 24 deletions(-) diff --git a/vlasiator.cpp b/vlasiator.cpp index e10c85575..807984b57 100644 --- a/vlasiator.cpp +++ b/vlasiator.cpp @@ -94,7 +94,7 @@ void addTimedBarrier(string name){ bool computeNewTimeStep(dccrg::Dccrg& mpiGrid,Real &newDt, bool &isChanged) { - cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; + // cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; phiprof::start("compute-timestep"); //compute maximum time-step, this cannot be done at the first @@ -174,22 +174,22 @@ bool computeNewTimeStep(dccrg::Dccrg& mpi creal meanFieldsCFL = 0.5*(P::fieldSolverMaxCFL+ P::fieldSolverMinCFL); Real subcycleDt; - cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; - - cout << dtMaxGlobal[0]; - cout << " "; - cout << dtMaxGlobal[1]; - cout << " "; - cout << dtMaxGlobal[2]; - cout << " "; - cout << P::vlasovSolverMaxCFL; - cout << " "; - cout << P::vlasovSolverMinCFL; - cout << " "; - cout << P::fieldSolverMaxCFL; - cout << " "; - cout << P::fieldSolverMinCFL; - cout << endl; + // cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; + + // cout << dtMaxGlobal[0]; + // cout << " "; + // cout << dtMaxGlobal[1]; + // cout << " "; + // cout << dtMaxGlobal[2]; + // cout << " "; + // cout << P::vlasovSolverMaxCFL; + // cout << " "; + // cout << P::vlasovSolverMinCFL; + // cout << " "; + // cout << P::fieldSolverMaxCFL; + // cout << " "; + // cout << P::fieldSolverMinCFL; + // cout << endl; //reduce dt if it is too high for any of the three propagators, or too low for all propagators @@ -201,7 +201,7 @@ bool computeNewTimeStep(dccrg::Dccrg& mpi P::dt < dtMaxGlobal[2] * P::fieldSolverMinCFL * P::maxFieldSolverSubcycles ) ) { - cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; + // cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; //new dt computed isChanged=true; @@ -524,7 +524,7 @@ int main(int argn,char* args[]) { } phiprof::stop("Init field propagator"); - cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; + // cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; // Initialize Poisson solver (if used) if (P::propagatePotential == true) { @@ -539,7 +539,7 @@ int main(int argn,char* args[]) { // Free up memory: readparameters.finalize(); - cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; + // cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; if (P::isRestart == false) { // Run Vlasov solver once with zero dt to initialize @@ -576,7 +576,7 @@ int main(int argn,char* args[]) { getVolumeFieldsFromFsGrid(volGrid, mpiGrid, cells); phiprof::stop("getVolumeFieldsFromFsGrid"); - cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; + // cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; // Save restart data if (P::writeInitialState) { @@ -617,7 +617,7 @@ int main(int argn,char* args[]) { phiprof::stop("write-initial-state"); } - cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; + // cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; if (P::isRestart == false) { //compute new dt @@ -631,7 +631,7 @@ int main(int argn,char* args[]) { phiprof::stop("compute-dt"); } - cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; + // cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; if (!P::isRestart) { //go forward by dt/2 in V, initializes leapfrog split. In restarts the @@ -1031,6 +1031,8 @@ int main(int argn,char* args[]) { CellParams::P_33_DT2 ); phiprof::stop("Compute interp moments"); + + // cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; // Apply boundary conditions if (P::propagateVlasovTranslation || P::propagateVlasovAcceleration ) { @@ -1040,13 +1042,14 @@ int main(int argn,char* args[]) { addTimedBarrier("barrier-boundary-conditions"); } + // cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; // Propagate fields forward in time by dt. This needs to be done before the // moments for t + dt are computed (field uses t and t+0.5dt) if (P::propagateField) { phiprof::start("Propagate Fields"); - cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; + // cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; phiprof::start("fsgrid-coupling-in"); // Copy moments over into the fsgrid. @@ -1095,9 +1098,13 @@ int main(int argn,char* args[]) { calculateAcceleration(mpiGrid, 0.0); } +// cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; + phiprof::stop("Velocity-space",computedCells,"Cells"); addTimedBarrier("barrier-after-acceleration"); +// cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; + phiprof::start("Compute interp moments"); // *here we compute rho and rho_v for timestep t + dt, so next // timestep * // @@ -1114,11 +1121,15 @@ int main(int argn,char* args[]) { ); phiprof::stop("Compute interp moments"); +// cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; + phiprof::stop("Propagate",computedCells,"Cells"); phiprof::start("Project endTimeStep"); project->hook(hook::END_OF_TIME_STEP, mpiGrid); phiprof::stop("Project endTimeStep"); + +// cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; // Check timestep if (P::dt < P::bailout_min_dt) { @@ -1133,6 +1144,8 @@ int main(int argn,char* args[]) { } double after = MPI_Wtime(); + + cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; phiprof::stop("Simulation"); phiprof::start("Finalization"); From 45447832443f017bb04663a3cc0d798413ee1d3f Mon Sep 17 00:00:00 2001 From: tkoskela Date: Tue, 16 Oct 2018 15:42:01 +0300 Subject: [PATCH 095/602] cleaned up writes --- vlasovsolver/vlasovmover.cpp | 36 +++++++++++++++++------------------- 1 file changed, 17 insertions(+), 19 deletions(-) diff --git a/vlasovsolver/vlasovmover.cpp b/vlasovsolver/vlasovmover.cpp index 0b938bea0..3be29b28c 100644 --- a/vlasovsolver/vlasovmover.cpp +++ b/vlasovsolver/vlasovmover.cpp @@ -77,7 +77,7 @@ void calculateSpatialTranslation( int trans_timer; bool localTargetGridGenerated = false; - std::cout << "I am at line " << __LINE__ << " of " << __FILE__ << std::endl; + // std::cout << "I am at line " << __LINE__ << " of " << __FILE__ << std::endl; // ------------- SLICE - map dist function in Z --------------- // if(P::zcells_ini > 1 ){ @@ -86,7 +86,7 @@ void calculateSpatialTranslation( SpatialCell::set_mpi_transfer_type(Transfer::VEL_BLOCK_DATA); mpiGrid.update_copies_of_remote_neighbors(VLASOV_SOLVER_Z_NEIGHBORHOOD_ID); phiprof::stop(trans_timer); - + phiprof::start("compute-mapping-z"); trans_map_1d_amr(mpiGrid,local_propagated_cells, remoteTargetCellsz, 2, dt,popID); // map along z// phiprof::stop("compute-mapping-z"); @@ -100,10 +100,10 @@ void calculateSpatialTranslation( } + // std::cout << "I am at line " << __LINE__ << " of " << __FILE__ << std::endl; + // ------------- SLICE - map dist function in X --------------- // - if(P::xcells_ini > 1 ){ - - cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; + if(P::xcells_ini > 1 ){ trans_timer=phiprof::initializeTimer("transfer-stencil-data-x","MPI"); phiprof::start(trans_timer); @@ -111,21 +111,19 @@ void calculateSpatialTranslation( mpiGrid.update_copies_of_remote_neighbors(VLASOV_SOLVER_X_NEIGHBORHOOD_ID); phiprof::stop(trans_timer); - cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; - phiprof::start("compute-mapping-x"); bool foo; foo = trans_map_1d_amr(mpiGrid,local_propagated_cells, remoteTargetCellsx, 0,dt,popID); // map along x// phiprof::stop("compute-mapping-x"); - cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; - trans_timer=phiprof::initializeTimer("update_remote-x","MPI"); phiprof::start("update_remote-x"); update_remote_mapping_contribution(mpiGrid, 0,+1,popID); update_remote_mapping_contribution(mpiGrid, 0,-1,popID); phiprof::stop("update_remote-x"); } + + // std::cout << "I am at line " << __LINE__ << " of " << __FILE__ << std::endl; // ------------- SLICE - map dist function in Y --------------- // if(P::ycells_ini > 1 ){ @@ -145,8 +143,8 @@ void calculateSpatialTranslation( update_remote_mapping_contribution(mpiGrid, 1,-1,popID); phiprof::stop("update_remote-y"); } - bailout(true, "", __FILE__, __LINE__); - throw; + // bailout(true, "", __FILE__, __LINE__); + // throw; } /*! @@ -177,14 +175,14 @@ void calculateSpatialTranslation( // In both cases go to the end of this function and calculate the moments. if (dt == 0.0) goto momentCalculation; - std::cout << "I am at line " << __LINE__ << " of " << __FILE__ << std::endl; + // std::cout << "I am at line " << __LINE__ << " of " << __FILE__ << std::endl; phiprof::start("compute_cell_lists"); remoteTargetCellsx = mpiGrid.get_remote_cells_on_process_boundary(VLASOV_SOLVER_TARGET_X_NEIGHBORHOOD_ID); remoteTargetCellsy = mpiGrid.get_remote_cells_on_process_boundary(VLASOV_SOLVER_TARGET_Y_NEIGHBORHOOD_ID); remoteTargetCellsz = mpiGrid.get_remote_cells_on_process_boundary(VLASOV_SOLVER_TARGET_Z_NEIGHBORHOOD_ID); - std::cout << "I am at line " << __LINE__ << " of " << __FILE__ << std::endl; + // std::cout << "I am at line " << __LINE__ << " of " << __FILE__ << std::endl; // Figure out which spatial cells are translated, // result independent of particle species. @@ -194,7 +192,7 @@ void calculateSpatialTranslation( } } - std::cout << "I am at line " << __LINE__ << " of " << __FILE__ << std::endl; + // std::cout << "I am at line " << __LINE__ << " of " << __FILE__ << std::endl; // Figure out target spatial cells, result // independent of particle species. @@ -205,27 +203,27 @@ void calculateSpatialTranslation( } phiprof::stop("compute_cell_lists"); - std::cout << "I am at line " << __LINE__ << " of " << __FILE__ << std::endl; + // std::cout << "I am at line " << __LINE__ << " of " << __FILE__ << std::endl; // Translate all particle species for (uint popID=0; popIDparameters[CellParams::MAXRDT]; } - std::cout << "I am at line " << __LINE__ << " of " << __FILE__ << std::endl; + // std::cout << "I am at line " << __LINE__ << " of " << __FILE__ << std::endl; phiprof::stop("semilag-trans"); } From 68e309f3043202cf3587d2e17f6e5dc922b68355 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Tue, 16 Oct 2018 15:47:36 +0300 Subject: [PATCH 096/602] Fixed setGridCoupling calls in re-loadbalance during simulation. --- vlasiator.cpp | 58 +++++++++++++++++++++++++++++++++++---------------- 1 file changed, 40 insertions(+), 18 deletions(-) diff --git a/vlasiator.cpp b/vlasiator.cpp index 807984b57..81232522d 100644 --- a/vlasiator.cpp +++ b/vlasiator.cpp @@ -631,8 +631,6 @@ int main(int argn,char* args[]) { phiprof::stop("compute-dt"); } - // cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; - if (!P::isRestart) { //go forward by dt/2 in V, initializes leapfrog split. In restarts the //the distribution function is already propagated forward in time by dt/2 @@ -652,6 +650,8 @@ int main(int argn,char* args[]) { // *********************************** // ***** INITIALIZATION COMPLETE ***** // *********************************** + + cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; // Main simulation loop: if (myRank == MASTER_RANK) logFile << "(MAIN): Starting main simulation loop." << endl << writeVerbose; @@ -911,23 +911,45 @@ int main(int argn,char* args[]) { BgBGrid.setupForGridCoupling(); volGrid.setupForGridCoupling(); technicalGrid.setupForGridCoupling(); - - // FSGrid cellIds are 0-based, whereas DCCRG cellIds are 1-based, beware - for(auto& i : cells) { - perBGrid.setGridCoupling(i-1, myRank); - perBDt2Grid.setGridCoupling(i-1, myRank); - EGrid.setGridCoupling(i-1, myRank); - EDt2Grid.setGridCoupling(i-1, myRank); - EHallGrid.setGridCoupling(i-1, myRank); - EGradPeGrid.setGridCoupling(i-1, myRank); - momentsGrid.setGridCoupling(i-1, myRank); - momentsDt2Grid.setGridCoupling(i-1, myRank); - dPerBGrid.setGridCoupling(i-1, myRank); - dMomentsGrid.setGridCoupling(i-1, myRank); - BgBGrid.setGridCoupling(i-1, myRank); - volGrid.setGridCoupling(i-1, myRank); - technicalGrid.setGridCoupling(i-1, myRank); + + // Each dccrg cell may have to communicate with multiple fsgrid cells, if they are on a lower refinement level. + // Calculate the corresponding fsgrid ids for each dccrg cell and set coupling for each fsgrid id. + for(auto& dccrgId : cells) { + const auto fsgridIds = mapDccrgIdToFsGrid(mpiGrid, fsGridDimensions, dccrgId); + for (auto fsgridId : fsgridIds) { + perBGrid. setGridCoupling(fsgridId, myRank); + perBDt2Grid. setGridCoupling(fsgridId, myRank); + EGrid. setGridCoupling(fsgridId, myRank); + EDt2Grid. setGridCoupling(fsgridId, myRank); + EHallGrid. setGridCoupling(fsgridId, myRank); + EGradPeGrid. setGridCoupling(fsgridId, myRank); + momentsGrid. setGridCoupling(fsgridId, myRank); + momentsDt2Grid.setGridCoupling(fsgridId, myRank); + dPerBGrid. setGridCoupling(fsgridId, myRank); + dMomentsGrid. setGridCoupling(fsgridId, myRank); + BgBGrid. setGridCoupling(fsgridId, myRank); + volGrid. setGridCoupling(fsgridId, myRank); + technicalGrid. setGridCoupling(fsgridId, myRank); + } } + + // // FSGrid cellIds are 0-based, whereas DCCRG cellIds are 1-based, beware + // for(auto& i : cells) { + // perBGrid.setGridCoupling(i-1, myRank); + // perBDt2Grid.setGridCoupling(i-1, myRank); + // EGrid.setGridCoupling(i-1, myRank); + // EDt2Grid.setGridCoupling(i-1, myRank); + // EHallGrid.setGridCoupling(i-1, myRank); + // EGradPeGrid.setGridCoupling(i-1, myRank); + // momentsGrid.setGridCoupling(i-1, myRank); + // momentsDt2Grid.setGridCoupling(i-1, myRank); + // dPerBGrid.setGridCoupling(i-1, myRank); + // dMomentsGrid.setGridCoupling(i-1, myRank); + // BgBGrid.setGridCoupling(i-1, myRank); + // volGrid.setGridCoupling(i-1, myRank); + // technicalGrid.setGridCoupling(i-1, myRank); + // } + perBGrid.finishGridCoupling(); perBDt2Grid.finishGridCoupling(); EGrid.finishGridCoupling(); From 25de7a42cd08fa9edaa8ab38f9d5bf8fde26bdf2 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Tue, 16 Oct 2018 16:39:04 +0300 Subject: [PATCH 097/602] Cleanup --- vlasiator.cpp | 55 +++++++++++++++++++++++++++++++++++++++++++-------- 1 file changed, 47 insertions(+), 8 deletions(-) diff --git a/vlasiator.cpp b/vlasiator.cpp index 81232522d..cc3529f87 100644 --- a/vlasiator.cpp +++ b/vlasiator.cpp @@ -651,7 +651,7 @@ int main(int argn,char* args[]) { // ***** INITIALIZATION COMPLETE ***** // *********************************** - cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; + // cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; // Main simulation loop: if (myRank == MASTER_RANK) logFile << "(MAIN): Starting main simulation loop." << endl << writeVerbose; @@ -695,6 +695,8 @@ int main(int argn,char* args[]) { while(P::tstep <= P::tstep_max && P::t-P::dt <= P::t_max+DT_EPSILON && wallTimeRestartCounter <= P::exitAfterRestarts) { + + // cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; addTimedBarrier("barrier-loop-start"); @@ -706,6 +708,8 @@ int main(int argn,char* args[]) { checkExternalCommands(); } phiprof::stop("checkExternalCommands"); + + cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; //write out phiprof profiles and logs with a lower interval than normal //diagnostic (every 10 diagnostic intervals). @@ -735,6 +739,8 @@ int main(int argn,char* args[]) { } logFile << writeVerbose; phiprof::stop("logfile-io"); + + // cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; // Check whether diagnostic output has to be produced if (P::diagnosticInterval != 0 && P::tstep % P::diagnosticInterval == 0) { @@ -761,6 +767,9 @@ int main(int argn,char* args[]) { } phiprof::stop("diagnostic-io"); } + + // cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; + bool extractFsGridFields = true; // write system, loop through write classes for (uint i = 0; i < P::systemWriteTimeInterval.size(); i++) { @@ -813,11 +822,15 @@ int main(int argn,char* args[]) { phiprof::stop("write-system"); } } + + // cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; // Reduce globalflags::bailingOut from all processes phiprof::start("Bailout-allreduce"); MPI_Allreduce(&(globalflags::bailingOut), &(doBailout), 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD); phiprof::stop("Bailout-allreduce"); + + // cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; // Write restart data if needed // Combined with checking of additional load balancing to have only one collective call. @@ -844,6 +857,8 @@ int main(int argn,char* args[]) { globalflags::balanceLoad = false; } } + // cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; + MPI_Bcast( &doNow, 2 , MPI_INT , MASTER_RANK ,MPI_COMM_WORLD); writeRestartNow = doNow[0]; doNow[0] = 0; @@ -853,6 +868,8 @@ int main(int argn,char* args[]) { } phiprof::stop("compute-is-restart-written-and-extra-LB"); + // cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; + if (writeRestartNow >= 1){ phiprof::start("write-restart"); if (writeRestartNow == 1) { @@ -873,6 +890,8 @@ int main(int argn,char* args[]) { phiprof::stop("IO"); addTimedBarrier("barrier-end-io"); + +// cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; //no need to propagate if we are on the final step, we just //wanted to make sure all IO is done even for final step @@ -885,6 +904,7 @@ int main(int argn,char* args[]) { //Re-loadbalance if needed //TODO - add LB measure and do LB if it exceeds threshold if((P::tstep % P::rebalanceInterval == 0 && P::tstep > P::tstep_min) || overrideRebalanceNow == true) { + // if(false) { logFile << "(LB): Start load balance, tstep = " << P::tstep << " t = " << P::t << endl << writeVerbose; balanceLoad(mpiGrid, sysBoundaries); addTimedBarrier("barrier-end-load-balance"); @@ -895,9 +915,17 @@ int main(int argn,char* args[]) { logFile << "(LB): ... done!" << endl << writeVerbose; P::prepareForRebalance = false; + //cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; + // Re-couple fsgrids to updated grid situation phiprof::start("fsgrid-recouple-after-lb"); + const vector& cells = getLocalCells(); + + cout << "Cells are: "; + for(auto id : cells) cout << id << " "; + cout << endl; + perBGrid.setupForGridCoupling(); perBDt2Grid.setupForGridCoupling(); EGrid.setupForGridCoupling(); @@ -912,11 +940,15 @@ int main(int argn,char* args[]) { volGrid.setupForGridCoupling(); technicalGrid.setupForGridCoupling(); + //cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; + // Each dccrg cell may have to communicate with multiple fsgrid cells, if they are on a lower refinement level. // Calculate the corresponding fsgrid ids for each dccrg cell and set coupling for each fsgrid id. for(auto& dccrgId : cells) { const auto fsgridIds = mapDccrgIdToFsGrid(mpiGrid, fsGridDimensions, dccrgId); + cout << "Fsgrid ids for cell " << dccrgId << " are: "; for (auto fsgridId : fsgridIds) { + cout << fsgridId << " "; perBGrid. setGridCoupling(fsgridId, myRank); perBDt2Grid. setGridCoupling(fsgridId, myRank); EGrid. setGridCoupling(fsgridId, myRank); @@ -931,7 +963,10 @@ int main(int argn,char* args[]) { volGrid. setGridCoupling(fsgridId, myRank); technicalGrid. setGridCoupling(fsgridId, myRank); } + cout << endl; } + + // cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; // // FSGrid cellIds are 0-based, whereas DCCRG cellIds are 1-based, beware // for(auto& i : cells) { @@ -968,6 +1003,8 @@ int main(int argn,char* args[]) { overrideRebalanceNow = false; } + // cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; + //get local cells const vector& cells = getLocalCells(); @@ -1031,13 +1068,13 @@ int main(int argn,char* args[]) { } phiprof::start("Spatial-space"); - cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; + // cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; if( P::propagateVlasovTranslation) { calculateSpatialTranslation(mpiGrid,P::dt); } else { calculateSpatialTranslation(mpiGrid,0.0); } - cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; + // cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; phiprof::stop("Spatial-space",computedCells,"Cells"); phiprof::start("Compute interp moments"); @@ -1120,12 +1157,12 @@ int main(int argn,char* args[]) { calculateAcceleration(mpiGrid, 0.0); } -// cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; + // cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; phiprof::stop("Velocity-space",computedCells,"Cells"); addTimedBarrier("barrier-after-acceleration"); -// cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; + // cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; phiprof::start("Compute interp moments"); // *here we compute rho and rho_v for timestep t + dt, so next @@ -1143,7 +1180,7 @@ int main(int argn,char* args[]) { ); phiprof::stop("Compute interp moments"); -// cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; + // cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; phiprof::stop("Propagate",computedCells,"Cells"); @@ -1151,8 +1188,8 @@ int main(int argn,char* args[]) { project->hook(hook::END_OF_TIME_STEP, mpiGrid); phiprof::stop("Project endTimeStep"); -// cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; - + // cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; + // Check timestep if (P::dt < P::bailout_min_dt) { stringstream s; @@ -1163,6 +1200,8 @@ int main(int argn,char* args[]) { P::meshRepartitioned = false; ++P::tstep; P::t += P::dt; + + // cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; } double after = MPI_Wtime(); From a771257d95bd03c0efef5e10da4a3bfba631e8b2 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Wed, 17 Oct 2018 14:46:48 +0300 Subject: [PATCH 098/602] Cleaned up debugging code --- vlasovsolver/cpu_trans_map_amr.cpp | 100 +++++++++++++---------------- 1 file changed, 44 insertions(+), 56 deletions(-) diff --git a/vlasovsolver/cpu_trans_map_amr.cpp b/vlasovsolver/cpu_trans_map_amr.cpp index d53ed3797..942961970 100644 --- a/vlasovsolver/cpu_trans_map_amr.cpp +++ b/vlasovsolver/cpu_trans_map_amr.cpp @@ -380,7 +380,7 @@ setOfPencils buildPencilsWithNeighbors( const dccrg::Dccrg &vmesh, const uint lengthOfPencil, bool debugflag) { + const vmesh::VelocityMesh &vmesh, const uint lengthOfPencil) { // Get velocity data from vmesh that we need later to calculate the translation velocity_block_indices_t block_indices; @@ -760,7 +760,7 @@ bool trans_map_1d_amr(const dccrg::Dccrg& } // Print out ids of pencils (if needed for debugging) - if (false) { + if (true) { uint ibeg = 0; uint iend = 0; std::cout << "I have created " << pencils.N << " pencils along dimension " << dimension << ":\n"; @@ -776,45 +776,45 @@ bool trans_map_1d_amr(const dccrg::Dccrg& std::cout << std::endl; } - CellID idX = 114; - const auto* neighborsX = mpiGrid.get_neighbors_of(idX, VLASOV_SOLVER_X_NEIGHBORHOOD_ID); - if (neighborsX != NULL) { - std::cout << "Neighbors of cell " << idX << " in x dimension" << std::endl; - for (auto neighbor : *neighborsX) { - std::cout << neighbor.first << ", "; - for (int n = 0; n < 4; ++n) { - std::cout << neighbor.second[n] << " "; - } - std::cout << std::endl; - } - } - - CellID idY = 114; - const auto* neighborsY = mpiGrid.get_neighbors_of(idY, VLASOV_SOLVER_Y_NEIGHBORHOOD_ID); - if (neighborsY != NULL) { - std::cout << "Neighbors of cell " << idY << " in y dimension" << std::endl; - for (auto neighbor : *neighborsY) { - std::cout << neighbor.first << ", "; - for (int n = 0; n < 4; ++n) { - std::cout << neighbor.second[n] << " "; - } - std::cout << std::endl; - } - } - - CellID idZ = 114; - const auto* neighborsZ = mpiGrid.get_neighbors_of(idZ, VLASOV_SOLVER_Z_NEIGHBORHOOD_ID); - if (neighborsZ != NULL) { - std::cout << "Neighbors of cell " << idZ << " in z dimension" << std::endl; - for (auto neighbor : *neighborsZ) { - std::cout << neighbor.first << ", "; - for (int n = 0; n < 4; ++n) { - std::cout << neighbor.second[n] << " "; - } - std::cout << std::endl; - } - } - } + // CellID idX = 114; + // const auto* neighborsX = mpiGrid.get_neighbors_of(idX, VLASOV_SOLVER_X_NEIGHBORHOOD_ID); + // if (neighborsX != NULL) { + // std::cout << "Neighbors of cell " << idX << " in x dimension" << std::endl; + // for (auto neighbor : *neighborsX) { + // std::cout << neighbor.first << ", "; + // for (int n = 0; n < 4; ++n) { + // std::cout << neighbor.second[n] << " "; + // } + // std::cout << std::endl; + // } + // } + + // CellID idY = 114; + // const auto* neighborsY = mpiGrid.get_neighbors_of(idY, VLASOV_SOLVER_Y_NEIGHBORHOOD_ID); + // if (neighborsY != NULL) { + // std::cout << "Neighbors of cell " << idY << " in y dimension" << std::endl; + // for (auto neighbor : *neighborsY) { + // std::cout << neighbor.first << ", "; + // for (int n = 0; n < 4; ++n) { + // std::cout << neighbor.second[n] << " "; + // } + // std::cout << std::endl; + // } + // } + + // CellID idZ = 114; + // const auto* neighborsZ = mpiGrid.get_neighbors_of(idZ, VLASOV_SOLVER_Z_NEIGHBORHOOD_ID); + // if (neighborsZ != NULL) { + // std::cout << "Neighbors of cell " << idZ << " in z dimension" << std::endl; + // for (auto neighbor : *neighborsZ) { + // std::cout << neighbor.first << ", "; + // for (int n = 0; n < 4; ++n) { + // std::cout << neighbor.second[n] << " "; + // } + // std::cout << std::endl; + // } + // } + } // Add the final set of pencils to the pencilSets - vector. // Only one set is created for now but we retain support for multiple sets @@ -868,13 +868,6 @@ bool trans_map_1d_amr(const dccrg::Dccrg& // Get global id of the velocity block vmesh::GlobalID blockGID = unionOfBlocks[blocki]; - bool debugflag = false; - CellID debugcell; - uint allCellsPointerIndex = 16; - - const vmesh::LocalID debugLID = allCellsPointer[allCellsPointerIndex]->get_velocity_block_local_id(blockGID, popID); - Realf* data = allCellsPointer[allCellsPointerIndex]->get_data(debugLID,popID); - //for (uint i = 0; i < WID3; i++) if (data[i] != 0) debugflag = true; velocity_block_indices_t block_indices; uint8_t vRefLevel; vmesh.getIndices(blockGID,vRefLevel, block_indices[0], @@ -963,7 +956,7 @@ bool trans_map_1d_amr(const dccrg::Dccrg& // Dz and sourceVecData are both padded by VLASOV_STENCIL_WIDTH // Dz has 1 value/cell, sourceVecData has WID3 values/cell - propagatePencil(dz, sourceVecData, dimension, blockGID, dt, vmesh, L, debugflag); + propagatePencil(dz, sourceVecData, dimension, blockGID, dt, vmesh, L); if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; @@ -979,7 +972,7 @@ bool trans_map_1d_amr(const dccrg::Dccrg& } } totalTargetLength += targetLength; - // dealloc source data -- Should be automatic since it's declared in this iteration? + // dealloc source data -- Should be automatic since it's declared in this loop iteration? } // reset blocks in all non-sysboundary neighbor spatial cells for this block id @@ -1023,9 +1016,7 @@ bool trans_map_1d_amr(const dccrg::Dccrg& // // They are copies of cells that are already in the pencil // // - It seems that doing this was wrong. Investigate! // if(pencils.periodic[pencili] && (celli == 0 || celli == targetLength - 1)) - // continue; - - if(celli > 0 && pencilIds[celli - 1] == debugcell) debugPencilFlag = true; + // continue; Realv vector[VECL]; // Loop over 1st vspace dimension @@ -1067,14 +1058,11 @@ bool trans_map_1d_amr(const dccrg::Dccrg& } } - if(debugflag && debugPencilFlag) cout << "TotalTargetLength = " << totalTargetLength << endl; totalTargetLength += targetLength; // dealloc target data -- Should be automatic again? } } - - data = allCellsPointer[allCellsPointerIndex]->get_data(debugLID,popID); } } From 04ffe319dd86780f9383a8dce82c2f4602daad14 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Wed, 17 Oct 2018 15:19:57 +0300 Subject: [PATCH 099/602] Bug fix: size of transferBuffer was wrong in getFsGridMaxDt --- fieldsolver/gridGlue.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fieldsolver/gridGlue.cpp b/fieldsolver/gridGlue.cpp index 178151f8d..4e4dc971d 100644 --- a/fieldsolver/gridGlue.cpp +++ b/fieldsolver/gridGlue.cpp @@ -394,7 +394,7 @@ void getFsGridMaxDt(FsGrid< fsgrids::technical, 2>& technicalGrid, technicalGrid.setupForTransferOut(nCells); // Buffer to store contents of the grid - std::vector transferBuffer(cells.size()); + std::vector transferBuffer(nCells); std::vector transferBufferPointer; int k = 0; From 1f4a42fc01678fe2f7a35b058b170e472225ed1a Mon Sep 17 00:00:00 2001 From: tkoskela Date: Thu, 18 Oct 2018 11:51:41 +0300 Subject: [PATCH 100/602] Debugging - for merge --- vlasiator.cpp | 197 +++++++++++++++++------------ vlasovsolver/cpu_trans_map_amr.cpp | 73 ++++++++--- vlasovsolver/vlasovmover.cpp | 7 +- 3 files changed, 175 insertions(+), 102 deletions(-) diff --git a/vlasiator.cpp b/vlasiator.cpp index cc3529f87..9baac0a6f 100644 --- a/vlasiator.cpp +++ b/vlasiator.cpp @@ -93,8 +93,6 @@ void addTimedBarrier(string name){ } bool computeNewTimeStep(dccrg::Dccrg& mpiGrid,Real &newDt, bool &isChanged) { - - // cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; phiprof::start("compute-timestep"); //compute maximum time-step, this cannot be done at the first @@ -174,8 +172,6 @@ bool computeNewTimeStep(dccrg::Dccrg& mpi creal meanFieldsCFL = 0.5*(P::fieldSolverMaxCFL+ P::fieldSolverMinCFL); Real subcycleDt; - // cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; - // cout << dtMaxGlobal[0]; // cout << " "; // cout << dtMaxGlobal[1]; @@ -201,7 +197,6 @@ bool computeNewTimeStep(dccrg::Dccrg& mpi P::dt < dtMaxGlobal[2] * P::fieldSolverMinCFL * P::maxFieldSolverSubcycles ) ) { - // cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; //new dt computed isChanged=true; @@ -273,6 +268,7 @@ int main(int argn,char* args[]) { typedef Parameters P; Real newDt; bool dtIsChanged; + const bool printLines = false; // Init MPI: int required=MPI_THREAD_FUNNELED; @@ -386,7 +382,7 @@ int main(int argn,char* args[]) { isSysBoundaryCondDynamic = sysBoundaries.isDynamic(); phiprof::stop("Init grid"); - + // Initialize data reduction operators. This should be done elsewhere in order to initialize // user-defined operators: phiprof::start("Init DROs"); @@ -432,6 +428,14 @@ int main(int argn,char* args[]) { phiprof::stop("Init fieldsolver grids"); phiprof::start("Initial fsgrid coupling"); const std::vector& cells = getLocalCells(); + + // for (auto cell: cells) { + // creal dx = mpiGrid[cell]->parameters[CellParams::DX]; + // creal dy = mpiGrid[cell]->parameters[CellParams::DY]; + // creal dz = mpiGrid[cell]->parameters[CellParams::DZ]; + + // std::cout << "cell " << cell << " dx,dy,dz = " << dx << ", " << dy << ", " << dz << std::endl; + // } // Couple FSGrids to mpiGrid // TODO: Do we really need to couple *all* of these fields? @@ -498,7 +502,7 @@ int main(int argn,char* args[]) { feedMomentsIntoFsGrid(mpiGrid, cells, momentsGrid,false); feedMomentsIntoFsGrid(mpiGrid, cells, momentsDt2Grid,false); - cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; + if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; phiprof::start("Init field propagator"); if ( @@ -524,7 +528,7 @@ int main(int argn,char* args[]) { } phiprof::stop("Init field propagator"); - // cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; + if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; // Initialize Poisson solver (if used) if (P::propagatePotential == true) { @@ -539,7 +543,7 @@ int main(int argn,char* args[]) { // Free up memory: readparameters.finalize(); - // cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; + if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; if (P::isRestart == false) { // Run Vlasov solver once with zero dt to initialize @@ -576,7 +580,7 @@ int main(int argn,char* args[]) { getVolumeFieldsFromFsGrid(volGrid, mpiGrid, cells); phiprof::stop("getVolumeFieldsFromFsGrid"); - // cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; + if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; // Save restart data if (P::writeInitialState) { @@ -617,12 +621,15 @@ int main(int argn,char* args[]) { phiprof::stop("write-initial-state"); } - // cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; + if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; if (P::isRestart == false) { //compute new dt phiprof::start("compute-dt"); getFsGridMaxDt(technicalGrid, mpiGrid, cells); + + if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; + computeNewTimeStep(mpiGrid,newDt,dtIsChanged); if (P::dynamicTimestep == true && dtIsChanged == true) { // Only actually update the timestep if dynamicTimestep is on @@ -631,6 +638,8 @@ int main(int argn,char* args[]) { phiprof::stop("compute-dt"); } + if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; + if (!P::isRestart) { //go forward by dt/2 in V, initializes leapfrog split. In restarts the //the distribution function is already propagated forward in time by dt/2 @@ -651,7 +660,7 @@ int main(int argn,char* args[]) { // ***** INITIALIZATION COMPLETE ***** // *********************************** - // cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; + if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; // Main simulation loop: if (myRank == MASTER_RANK) logFile << "(MAIN): Starting main simulation loop." << endl << writeVerbose; @@ -696,7 +705,7 @@ int main(int argn,char* args[]) { P::t-P::dt <= P::t_max+DT_EPSILON && wallTimeRestartCounter <= P::exitAfterRestarts) { - // cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; + if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; addTimedBarrier("barrier-loop-start"); @@ -709,7 +718,7 @@ int main(int argn,char* args[]) { } phiprof::stop("checkExternalCommands"); - cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; + if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; //write out phiprof profiles and logs with a lower interval than normal //diagnostic (every 10 diagnostic intervals). @@ -740,7 +749,7 @@ int main(int argn,char* args[]) { logFile << writeVerbose; phiprof::stop("logfile-io"); - // cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; + if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; // Check whether diagnostic output has to be produced if (P::diagnosticInterval != 0 && P::tstep % P::diagnosticInterval == 0) { @@ -768,7 +777,7 @@ int main(int argn,char* args[]) { phiprof::stop("diagnostic-io"); } - // cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; + if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; bool extractFsGridFields = true; // write system, loop through write classes @@ -823,14 +832,14 @@ int main(int argn,char* args[]) { } } - // cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; + if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; // Reduce globalflags::bailingOut from all processes phiprof::start("Bailout-allreduce"); MPI_Allreduce(&(globalflags::bailingOut), &(doBailout), 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD); phiprof::stop("Bailout-allreduce"); - // cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; + if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; // Write restart data if needed // Combined with checking of additional load balancing to have only one collective call. @@ -857,7 +866,7 @@ int main(int argn,char* args[]) { globalflags::balanceLoad = false; } } - // cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; + if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; MPI_Bcast( &doNow, 2 , MPI_INT , MASTER_RANK ,MPI_COMM_WORLD); writeRestartNow = doNow[0]; @@ -868,7 +877,7 @@ int main(int argn,char* args[]) { } phiprof::stop("compute-is-restart-written-and-extra-LB"); - // cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; + if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; if (writeRestartNow >= 1){ phiprof::start("write-restart"); @@ -891,7 +900,7 @@ int main(int argn,char* args[]) { phiprof::stop("IO"); addTimedBarrier("barrier-end-io"); -// cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; + if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; //no need to propagate if we are on the final step, we just //wanted to make sure all IO is done even for final step @@ -903,8 +912,8 @@ int main(int argn,char* args[]) { //Re-loadbalance if needed //TODO - add LB measure and do LB if it exceeds threshold - if((P::tstep % P::rebalanceInterval == 0 && P::tstep > P::tstep_min) || overrideRebalanceNow == true) { - // if(false) { + #warning Re-loadbalance has been disabled temporarily for amr debugging + if(((P::tstep % P::rebalanceInterval == 0 && P::tstep > P::tstep_min) || overrideRebalanceNow) && false) { logFile << "(LB): Start load balance, tstep = " << P::tstep << " t = " << P::t << endl << writeVerbose; balanceLoad(mpiGrid, sysBoundaries); addTimedBarrier("barrier-end-load-balance"); @@ -915,32 +924,32 @@ int main(int argn,char* args[]) { logFile << "(LB): ... done!" << endl << writeVerbose; P::prepareForRebalance = false; - //cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; + if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; // Re-couple fsgrids to updated grid situation phiprof::start("fsgrid-recouple-after-lb"); const vector& cells = getLocalCells(); - cout << "Cells are: "; + cout << "Local cells are: "; for(auto id : cells) cout << id << " "; cout << endl; - perBGrid.setupForGridCoupling(); - perBDt2Grid.setupForGridCoupling(); - EGrid.setupForGridCoupling(); - EDt2Grid.setupForGridCoupling(); - EHallGrid.setupForGridCoupling(); - EGradPeGrid.setupForGridCoupling(); - momentsGrid.setupForGridCoupling(); + perBGrid. setupForGridCoupling(); + perBDt2Grid. setupForGridCoupling(); + EGrid. setupForGridCoupling(); + EDt2Grid. setupForGridCoupling(); + EHallGrid. setupForGridCoupling(); + EGradPeGrid. setupForGridCoupling(); + momentsGrid. setupForGridCoupling(); momentsDt2Grid.setupForGridCoupling(); - dPerBGrid.setupForGridCoupling(); - dMomentsGrid.setupForGridCoupling(); - BgBGrid.setupForGridCoupling(); - volGrid.setupForGridCoupling(); - technicalGrid.setupForGridCoupling(); + dPerBGrid. setupForGridCoupling(); + dMomentsGrid. setupForGridCoupling(); + BgBGrid. setupForGridCoupling(); + volGrid. setupForGridCoupling(); + technicalGrid. setupForGridCoupling(); - //cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; + if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; // Each dccrg cell may have to communicate with multiple fsgrid cells, if they are on a lower refinement level. // Calculate the corresponding fsgrid ids for each dccrg cell and set coupling for each fsgrid id. @@ -959,51 +968,34 @@ int main(int argn,char* args[]) { momentsDt2Grid.setGridCoupling(fsgridId, myRank); dPerBGrid. setGridCoupling(fsgridId, myRank); dMomentsGrid. setGridCoupling(fsgridId, myRank); - BgBGrid. setGridCoupling(fsgridId, myRank); + BgBGrid. setGridCoupling(fsgridId, myRank); volGrid. setGridCoupling(fsgridId, myRank); technicalGrid. setGridCoupling(fsgridId, myRank); } cout << endl; } - - // cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; - - // // FSGrid cellIds are 0-based, whereas DCCRG cellIds are 1-based, beware - // for(auto& i : cells) { - // perBGrid.setGridCoupling(i-1, myRank); - // perBDt2Grid.setGridCoupling(i-1, myRank); - // EGrid.setGridCoupling(i-1, myRank); - // EDt2Grid.setGridCoupling(i-1, myRank); - // EHallGrid.setGridCoupling(i-1, myRank); - // EGradPeGrid.setGridCoupling(i-1, myRank); - // momentsGrid.setGridCoupling(i-1, myRank); - // momentsDt2Grid.setGridCoupling(i-1, myRank); - // dPerBGrid.setGridCoupling(i-1, myRank); - // dMomentsGrid.setGridCoupling(i-1, myRank); - // BgBGrid.setGridCoupling(i-1, myRank); - // volGrid.setGridCoupling(i-1, myRank); - // technicalGrid.setGridCoupling(i-1, myRank); - // } + //cout << endl; + if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; - perBGrid.finishGridCoupling(); - perBDt2Grid.finishGridCoupling(); - EGrid.finishGridCoupling(); - EDt2Grid.finishGridCoupling(); - EHallGrid.finishGridCoupling(); - EGradPeGrid.finishGridCoupling(); - momentsGrid.finishGridCoupling(); + perBGrid. finishGridCoupling(); + perBDt2Grid. finishGridCoupling(); + EGrid. finishGridCoupling(); + EDt2Grid. finishGridCoupling(); + EHallGrid. finishGridCoupling(); + EGradPeGrid. finishGridCoupling(); + momentsGrid. finishGridCoupling(); momentsDt2Grid.finishGridCoupling(); - dPerBGrid.finishGridCoupling(); - dMomentsGrid.finishGridCoupling(); - BgBGrid.finishGridCoupling(); - volGrid.finishGridCoupling(); - technicalGrid.finishGridCoupling(); + dPerBGrid. finishGridCoupling(); + dMomentsGrid. finishGridCoupling(); + BgBGrid. finishGridCoupling(); + volGrid. finishGridCoupling(); + technicalGrid. finishGridCoupling(); phiprof::stop("fsgrid-recouple-after-lb"); overrideRebalanceNow = false; } - // cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; + if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; //get local cells const vector& cells = getLocalCells(); @@ -1068,13 +1060,56 @@ int main(int argn,char* args[]) { } phiprof::start("Spatial-space"); - // cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; + if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; + + cout << "Before spatialTranslation, "; + Real nSum = 0.0; + Real nSumRef = 0.0; + + for(auto cell: cells) { + + creal rho = mpiGrid[cell]->parameters[CellParams::RHOM_R]; + creal dx = mpiGrid[cell]->parameters[CellParams::DX]; + creal dy = mpiGrid[cell]->parameters[CellParams::DY]; + creal dz = mpiGrid[cell]->parameters[CellParams::DZ]; + if(cell >= 114) { + nSumRef += rho*dx*dy*dz; + cout << "Cell " << cell << " n = " << rho*dx*dy*dz << endl; + } else { + nSum += rho*dx*dy*dz; + cout << "Cell " << cell << " n = " << rho*dx*dy*dz << endl; + } + } + cout << "nSum = " << nSum << ", nSumRef = " << nSumRef << endl; + if( P::propagateVlasovTranslation) { calculateSpatialTranslation(mpiGrid,P::dt); } else { calculateSpatialTranslation(mpiGrid,0.0); } - // cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; + + cout << "After spatialTranslation, "; + nSum = 0.0; + nSumRef = 0.0; + for(auto cell: cells) { + creal rho = mpiGrid[cell]->parameters[CellParams::RHOM_R]; + creal dx = mpiGrid[cell]->parameters[CellParams::DX]; + creal dy = mpiGrid[cell]->parameters[CellParams::DY]; + creal dz = mpiGrid[cell]->parameters[CellParams::DZ]; + + if(cell >= 114) { + nSumRef += rho*dx*dy*dz; + cout << "Cell " << cell << " n = " << rho*dx*dy*dz << endl; + } else { + nSum += rho*dx*dy*dz; + cout << "Cell " << cell << " n = " << rho*dx*dy*dz << endl; + } + } + cout << "nSum = " << nSum << ", nSumRef = " << nSumRef << endl; + + bailout(true, "", __FILE__, __LINE__); + + if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; phiprof::stop("Spatial-space",computedCells,"Cells"); phiprof::start("Compute interp moments"); @@ -1091,7 +1126,7 @@ int main(int argn,char* args[]) { ); phiprof::stop("Compute interp moments"); - // cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; + if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; // Apply boundary conditions if (P::propagateVlasovTranslation || P::propagateVlasovAcceleration ) { @@ -1101,14 +1136,14 @@ int main(int argn,char* args[]) { addTimedBarrier("barrier-boundary-conditions"); } - // cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; + if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; // Propagate fields forward in time by dt. This needs to be done before the // moments for t + dt are computed (field uses t and t+0.5dt) if (P::propagateField) { phiprof::start("Propagate Fields"); - // cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; + if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; phiprof::start("fsgrid-coupling-in"); // Copy moments over into the fsgrid. @@ -1157,12 +1192,12 @@ int main(int argn,char* args[]) { calculateAcceleration(mpiGrid, 0.0); } - // cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; + if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; phiprof::stop("Velocity-space",computedCells,"Cells"); addTimedBarrier("barrier-after-acceleration"); - // cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; + if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; phiprof::start("Compute interp moments"); // *here we compute rho and rho_v for timestep t + dt, so next @@ -1180,7 +1215,7 @@ int main(int argn,char* args[]) { ); phiprof::stop("Compute interp moments"); - // cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; + if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; phiprof::stop("Propagate",computedCells,"Cells"); @@ -1188,7 +1223,7 @@ int main(int argn,char* args[]) { project->hook(hook::END_OF_TIME_STEP, mpiGrid); phiprof::stop("Project endTimeStep"); - // cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; + if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; // Check timestep if (P::dt < P::bailout_min_dt) { @@ -1201,7 +1236,7 @@ int main(int argn,char* args[]) { ++P::tstep; P::t += P::dt; - // cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; + if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; } double after = MPI_Wtime(); diff --git a/vlasovsolver/cpu_trans_map_amr.cpp b/vlasovsolver/cpu_trans_map_amr.cpp index 942961970..ddfa48ed3 100644 --- a/vlasovsolver/cpu_trans_map_amr.cpp +++ b/vlasovsolver/cpu_trans_map_amr.cpp @@ -27,10 +27,10 @@ using namespace spatial_cell; #define i_trans_ps_blockv_pencil(planeVectorIndex, planeIndex, blockIndex, lengthOfPencil) ( (blockIndex) + VLASOV_STENCIL_WIDTH + ( (planeVectorIndex) + (planeIndex) * VEC_PER_PLANE ) * ( lengthOfPencil + 2 * VLASOV_STENCIL_WIDTH) ) void computeSpatialSourceCellsForPencil(const dccrg::Dccrg& mpiGrid, - setOfPencils pencils, - const uint iPencil, - const uint dimension, - SpatialCell **sourceCells){ + setOfPencils pencils, + const uint iPencil, + const uint dimension, + SpatialCell **sourceCells){ // L = length of the pencil iPencil int L = pencils.lengthOfPencils[iPencil]; @@ -49,22 +49,33 @@ void computeSpatialSourceCellsForPencil(const dccrg::Dccrg& Vec dz[sourceCells.size()]; uint i = 0; - for(auto neighbor: sourceCells) { + for(auto cell: sourceCells) { switch (dimension) { case(0): - dz[i] = neighbor->SpatialCell::parameters[CellParams::DX]; + dz[i] = cell->SpatialCell::parameters[CellParams::DX]; break; case(1): - dz[i] = neighbor->SpatialCell::parameters[CellParams::DY]; + dz[i] = cell->SpatialCell::parameters[CellParams::DY]; break; case(2): - dz[i] = neighbor->SpatialCell::parameters[CellParams::DZ]; + dz[i] = cell->SpatialCell::parameters[CellParams::DZ]; break; } i++; @@ -953,7 +964,35 @@ bool trans_map_1d_amr(const dccrg::Dccrg& cellid_transpose, popID); if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; - + + bool printMoreStuff = false; + for (auto id: pencilIds) { + if (id == 13 || id == 15) { + printMoreStuff = true; + } + } + + if(printMoreStuff) { + cout << "Ids are: "; + for(auto id: pencilIds) cout << id << " "; + cout << endl; + + cout << "dz's are: "; + for(uint i = 0; i < sourceCells.size(); ++i) { + cout << (int)(dz[i][0]*1e-4)/100.0 << " "; + } + cout << endl; + + // cout << "X,Y,Z are: " << endl; + // for(auto cell: sourceCells) { + // cout << (int)(cell->SpatialCell::parameters[CellParams::XCRD]*1e-4)/100.0 << ","; + // cout << (int)(cell->SpatialCell::parameters[CellParams::YCRD]*1e-4)/100.0 << ","; + // cout << (int)(cell->SpatialCell::parameters[CellParams::ZCRD]*1e-4)/100.0 << endl; + // } + + cout << endl; + } + // Dz and sourceVecData are both padded by VLASOV_STENCIL_WIDTH // Dz has 1 value/cell, sourceVecData has WID3 values/cell propagatePencil(dz, sourceVecData, dimension, blockGID, dt, vmesh, L); diff --git a/vlasovsolver/vlasovmover.cpp b/vlasovsolver/vlasovmover.cpp index 3be29b28c..44e9d7e8b 100644 --- a/vlasovsolver/vlasovmover.cpp +++ b/vlasovsolver/vlasovmover.cpp @@ -80,7 +80,7 @@ void calculateSpatialTranslation( // std::cout << "I am at line " << __LINE__ << " of " << __FILE__ << std::endl; // ------------- SLICE - map dist function in Z --------------- // - if(P::zcells_ini > 1 ){ + if(P::zcells_ini > 1 && false){ trans_timer=phiprof::initializeTimer("transfer-stencil-data-z","MPI"); phiprof::start(trans_timer); SpatialCell::set_mpi_transfer_type(Transfer::VEL_BLOCK_DATA); @@ -112,8 +112,7 @@ void calculateSpatialTranslation( phiprof::stop(trans_timer); phiprof::start("compute-mapping-x"); - bool foo; - foo = trans_map_1d_amr(mpiGrid,local_propagated_cells, remoteTargetCellsx, 0,dt,popID); // map along x// + trans_map_1d_amr(mpiGrid,local_propagated_cells, remoteTargetCellsx, 0,dt,popID); // map along x// phiprof::stop("compute-mapping-x"); trans_timer=phiprof::initializeTimer("update_remote-x","MPI"); @@ -126,7 +125,7 @@ void calculateSpatialTranslation( // std::cout << "I am at line " << __LINE__ << " of " << __FILE__ << std::endl; // ------------- SLICE - map dist function in Y --------------- // - if(P::ycells_ini > 1 ){ + if(P::ycells_ini > 1 && false){ trans_timer=phiprof::initializeTimer("transfer-stencil-data-y","MPI"); phiprof::start(trans_timer); SpatialCell::set_mpi_transfer_type(Transfer::VEL_BLOCK_DATA); From a9ac0db494b19e9feee70b8968a197b2fd1385f6 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Fri, 19 Oct 2018 17:13:39 +0300 Subject: [PATCH 101/602] Removed some debug statements, just printing total mass once per time step. --- vlasiator.cpp | 50 ++++++++++++++++---------------------------------- 1 file changed, 16 insertions(+), 34 deletions(-) diff --git a/vlasiator.cpp b/vlasiator.cpp index 9baac0a6f..bde05a40f 100644 --- a/vlasiator.cpp +++ b/vlasiator.cpp @@ -700,6 +700,17 @@ int main(int argn,char* args[]) { double beforeTime = MPI_Wtime(); double beforeSimulationTime=P::t_min; double beforeStep=P::tstep_min; + + Real nSum = 0.0; + for(auto cell: cells) { + creal rho = mpiGrid[cell]->parameters[CellParams::RHOM_R]; + creal dx = mpiGrid[cell]->parameters[CellParams::DX]; + creal dy = mpiGrid[cell]->parameters[CellParams::DY]; + creal dz = mpiGrid[cell]->parameters[CellParams::DZ]; + + nSum += rho*dx*dy*dz; + } + cout << "nSum = " << nSum << endl; while(P::tstep <= P::tstep_max && P::t-P::dt <= P::t_max+DT_EPSILON && @@ -1061,26 +1072,6 @@ int main(int argn,char* args[]) { phiprof::start("Spatial-space"); if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; - - cout << "Before spatialTranslation, "; - Real nSum = 0.0; - Real nSumRef = 0.0; - - for(auto cell: cells) { - - creal rho = mpiGrid[cell]->parameters[CellParams::RHOM_R]; - creal dx = mpiGrid[cell]->parameters[CellParams::DX]; - creal dy = mpiGrid[cell]->parameters[CellParams::DY]; - creal dz = mpiGrid[cell]->parameters[CellParams::DZ]; - if(cell >= 114) { - nSumRef += rho*dx*dy*dz; - cout << "Cell " << cell << " n = " << rho*dx*dy*dz << endl; - } else { - nSum += rho*dx*dy*dz; - cout << "Cell " << cell << " n = " << rho*dx*dy*dz << endl; - } - } - cout << "nSum = " << nSum << ", nSumRef = " << nSumRef << endl; if( P::propagateVlasovTranslation) { calculateSpatialTranslation(mpiGrid,P::dt); @@ -1088,26 +1079,17 @@ int main(int argn,char* args[]) { calculateSpatialTranslation(mpiGrid,0.0); } - cout << "After spatialTranslation, "; - nSum = 0.0; - nSumRef = 0.0; + Real nSum = 0.0; for(auto cell: cells) { creal rho = mpiGrid[cell]->parameters[CellParams::RHOM_R]; creal dx = mpiGrid[cell]->parameters[CellParams::DX]; creal dy = mpiGrid[cell]->parameters[CellParams::DY]; creal dz = mpiGrid[cell]->parameters[CellParams::DZ]; - - if(cell >= 114) { - nSumRef += rho*dx*dy*dz; - cout << "Cell " << cell << " n = " << rho*dx*dy*dz << endl; - } else { - nSum += rho*dx*dy*dz; - cout << "Cell " << cell << " n = " << rho*dx*dy*dz << endl; - } + + nSum += rho*dx*dy*dz; + //cout << "Cell " << cell << " rho = " << rho << endl; } - cout << "nSum = " << nSum << ", nSumRef = " << nSumRef << endl; - - bailout(true, "", __FILE__, __LINE__); + cout << "nSum = " << nSum << endl; if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; phiprof::stop("Spatial-space",computedCells,"Cells"); From faebe97fdeb2c37c2342d5e2f56d38d326836967 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Fri, 19 Oct 2018 17:15:11 +0300 Subject: [PATCH 102/602] Runs succesfully with refinement! Added path to the pencil data structure. Using stored path in ghost cell calculation. Taking the average of pencils that transit mass to a coarser cell. --- vlasovsolver/cpu_trans_map_amr.cpp | 286 +++++++++++++++++++++-------- vlasovsolver/cpu_trans_map_amr.hpp | 35 ++-- 2 files changed, 232 insertions(+), 89 deletions(-) diff --git a/vlasovsolver/cpu_trans_map_amr.cpp b/vlasovsolver/cpu_trans_map_amr.cpp index 14ff69b1c..8908c3e29 100644 --- a/vlasovsolver/cpu_trans_map_amr.cpp +++ b/vlasovsolver/cpu_trans_map_amr.cpp @@ -49,13 +49,13 @@ void computeSpatialSourceCellsForPencil(const dccrg::Dccrg distances; + for (auto nbrPair : *frontNbrPairs) { + if(nbrPair.second[dimension] < 0) { + int distanceInRefinedCells = nbrPair.second[dimension] * (maxRefLvl - nbrPair.second.back() + 2); + distances.insert(distanceInRefinedCells); } - std::cout << std::endl;; } - std::cout << endl; - - for (int i = 0; i < VLASOV_STENCIL_WIDTH; ++i) { - - // Add VLASOV_STENCIL_WIDTH neighbors from the front of the pencil to the beginning of the array - sourceCells[i] = mpiGrid[(*frontNbrPairs)[i].first]; - // Add VLASOV_STENCIL_WIDTH neighbors from the back of the pencil to the end of the array - sourceCells[L + VLASOV_STENCIL_WIDTH + i] = mpiGrid[(*backNbrPairs)[VLASOV_STENCIL_WIDTH + i].first]; + // Iterate through distances for VLASOV_STENCIL_WIDTH elements starting from the largest distance. + // Distances are negative here so the largest distance has the smallest value. + auto ibeg = distances.begin(); + std::advance(ibeg, distances.size() - VLASOV_STENCIL_WIDTH); + for (auto it = ibeg; it != distances.end(); ++it) { + // Collect all neighbors at distance *it to a vector + std::vector< CellID > neighbors; + for (auto nbrPair : *frontNbrPairs) { + int distanceInRefinedCells = nbrPair.second[dimension] * (maxRefLvl - nbrPair.second.back() + 2); + if(distanceInRefinedCells == *it) neighbors.push_back(nbrPair.first); + } + + switch (neighbors.size()) { + case 1: + { + // If there's only one cell in the neighbors vector, there's no refinement and we can just add it + sourceCells[iSrc++] = mpiGrid[neighbors.at(0)]; + if(printDebug) std::cout << neighbors.at(0) << " "; + break; + } + + case 4: + { + // If there's four cells in the neighbors vector, select one according to the path of the pencil. + int refLvl = mpiGrid.get_refinement_level(ids.front()); + sourceCells[iSrc++] = mpiGrid[neighbors.at(pencils.path[iPencil][refLvl])]; + if(printDebug) std::cout << neighbors.at(pencils.path[iPencil][refLvl]) << " "; + break; + } + + // In principle, 8,12,16 are also possibilities. Or are they? TODO: Investigate + default: + bailout(true, "Unexpected number of neighbors",__FILE__,__LINE__); + break; + } } - std::cout << std::endl; - - // SpatialCell* last_good_cell = mpiGrid[ids.front()]; - // /*loop to neative side and replace all invalid cells with the closest good cell*/ - // for(int i = -1;i>=-VLASOV_STENCIL_WIDTH;i--){ - // if(sourceCells[i + VLASOV_STENCIL_WIDTH] == NULL) - // sourceCells[i + VLASOV_STENCIL_WIDTH] = last_good_cell; - // else - // last_good_cell = sourceCells[i + VLASOV_STENCIL_WIDTH]; - // } - // last_good_cell = mpiGrid[ids.back()]; - // /*loop to positive side and replace all invalid cells with the closest good cell*/ - // for(int i = L + 1; i <= L + VLASOV_STENCIL_WIDTH; i++){ - // if(sourceCells[i + VLASOV_STENCIL_WIDTH] == NULL) - // sourceCells[i + VLASOV_STENCIL_WIDTH] = last_good_cell; - // else - // last_good_cell = sourceCells[i + VLASOV_STENCIL_WIDTH]; - // } + if(printDebug) std::cout << "... "; + + iSrc = L + VLASOV_STENCIL_WIDTH; + distances.clear(); + // Create list of unique distances in the positive direction from the last cell in pencil + for (auto nbrPair : *backNbrPairs) { + if(nbrPair.second[dimension] > 0) { + int distanceInRefinedCells = nbrPair.second[dimension] * (maxRefLvl - nbrPair.second.back() + 2); + distances.insert(distanceInRefinedCells); + } + } + + // Iterate through distances for VLASOV_STENCIL_WIDTH (2) elements starting from the smallest distance. + // Distances are positive here so smallest distance has smallest value. + auto iend = distances.begin(); + std::advance(iend,VLASOV_STENCIL_WIDTH); + for (auto it = distances.begin(); it != iend; ++it) { + + // Collect all neighbors at distance *it to a vector + std::vector< CellID > neighbors; + for (auto nbrPair : *backNbrPairs) { + int distanceInRefinedCells = nbrPair.second[dimension] * (maxRefLvl - nbrPair.second.back() + 2); + if(distanceInRefinedCells == *it) neighbors.push_back(nbrPair.first); + } + + switch (neighbors.size()) { + case 1: + { + // If there's only one cell at distance *it, there's no refinement and we can just add it + sourceCells[iSrc++] = mpiGrid[neighbors.at(0)]; + if(printDebug) std::cout << neighbors.at(0) << " "; + break; + } + + case 4: + { + // If there's four neighbor cells, select one according to the path of the pencil. + int refLvl = mpiGrid.get_refinement_level(ids.back()); + sourceCells[iSrc++] = mpiGrid[neighbors.at(pencils.path[iPencil][refLvl])]; + if(printDebug) std::cout << neighbors.at(pencils.path[iPencil][refLvl]) << " "; + break; + } + + // In principle, 8,12,16 are also possibilities. Or are they? TODO: Investigate + default: + bailout(true, "Unexpected number of neighbors",__FILE__,__LINE__); + break; + } + } + + if(printDebug) std::cout << std::endl; + + /*loop to neative side and replace all invalid cells with the closest good cell*/ + SpatialCell* lastGoodCell = mpiGrid[ids.front()]; + for(int i = -1;i>=-VLASOV_STENCIL_WIDTH;i--){ + if(sourceCells[i + VLASOV_STENCIL_WIDTH] == NULL) + sourceCells[i + VLASOV_STENCIL_WIDTH] = lastGoodCell; + else + lastGoodCell = sourceCells[i + VLASOV_STENCIL_WIDTH]; + } + /*loop to positive side and replace all invalid cells with the closest good cell*/ + lastGoodCell = mpiGrid[ids.back()]; + for(int i = L + 1; i <= L + VLASOV_STENCIL_WIDTH; i++){ + if(sourceCells[i + VLASOV_STENCIL_WIDTH] == NULL) + sourceCells[i + VLASOV_STENCIL_WIDTH] = lastGoodCell; + else + lastGoodCell = sourceCells[i + VLASOV_STENCIL_WIDTH]; + } } /*compute spatial target neighbors for pencils of size N. No boundary cells are included*/ @@ -130,7 +234,6 @@ void computeSpatialTargetCellsForPencils(const dccrg::Dccrgfront().first << " "; - // std::cout << backNbrPairs->back().first << std::endl; - targetCells[GID] = mpiGrid[frontNbrPairs->front().first]; - targetCells[GID + L + 1] = mpiGrid[backNbrPairs->back().first]; + // std::cout << backNbrPairs->back().first << std::endl; + vector frontNeighborIds; + for( auto nbrPair: *frontNbrPairs ) { + if (nbrPair.second.at(dimension) == -1) { + frontNeighborIds.push_back(nbrPair.first); + } + } + + vector backNeighborIds; + for( auto nbrPair: *backNbrPairs ) { + if (nbrPair.second.at(dimension) == 1) { + backNeighborIds.push_back(nbrPair.first); + } + } + + switch (frontNeighborIds.size()) { + case 1: { + targetCells[GID] = mpiGrid[frontNeighborIds.at(0)]; + break; + } + case 4: { + targetCells[GID] = mpiGrid[frontNeighborIds.at(pencils.path[iPencil][mpiGrid.get_refinement_level(ids.front())])]; + break; + } + default: + cout << "Unexpected number of neighbors for cell " << ids.front() << endl; + break; + } + + switch (backNeighborIds.size()) { + case 1: { + targetCells[GID + L + 1] = mpiGrid[backNeighborIds.at(0)]; + break; + } + case 4: { + targetCells[GID + L + 1] = mpiGrid[backNeighborIds.at(pencils.path[iPencil][mpiGrid.get_refinement_level(ids.back())])]; + break; + } + default: + cout << "Unexpected number of neighbors for cell " << ids.front() << endl; + break; + } // Incerment global id by L + 2 ghost cells. GID += (L + 2); @@ -383,7 +525,7 @@ setOfPencils buildPencilsWithNeighbors( const dccrg::Dccrg std::vector idsToSplit; for (setOfPencils pencils : pencilSets) { - cout << "pencils.N = " << pencils.N << endl; + for (uint pencili = 0; pencili < pencils.N; ++pencili) { if(pencils.periodic[pencili]) continue; @@ -861,22 +1003,24 @@ bool trans_map_1d_amr(const dccrg::Dccrg& } // Print out ids of pencils (if needed for debugging) - if (true) { + if (false) { uint ibeg = 0; uint iend = 0; std::cout << "I have created " << pencils.N << " pencils along dimension " << dimension << ":\n"; - std::cout << "mpirank (x, y): indices " << std::endl; + std::cout << "N, mpirank, (x, y): indices " << std::endl; std::cout << "-----------------------------------------------------------------" << std::endl; for (uint i = 0; i < pencils.N; i++) { iend += pencils.lengthOfPencils[i]; - std::cout << mpiGrid.get_process(pencils.ids[ibeg])<< " (" << pencils.x[i] << ", " << pencils.y[i] << "): "; + std::cout << i << ", "; + std::cout << mpiGrid.get_process(pencils.ids[ibeg]) << ", "; + std::cout << "(" << pencils.x[i] << ", " << pencils.y[i] << "): "; for (auto j = pencils.ids.begin() + ibeg; j != pencils.ids.begin() + iend; ++j) { std::cout << *j << " "; } ibeg = iend; std::cout << std::endl; } - + // CellID idX = 114; // const auto* neighborsX = mpiGrid.get_neighbors_of(idX, VLASOV_SOLVER_X_NEIGHBORHOOD_ID); // if (neighborsX != NULL) { @@ -1015,7 +1159,7 @@ bool trans_map_1d_amr(const dccrg::Dccrg& // Loop over pencils uint totalTargetLength = 0; - for(uint pencili = 0; pencili < pencils.N; pencili++){ + for(uint pencili = 0; pencili < pencils.N; ++pencili){ vector pencilIds = pencils.getIds(pencili); int L = pencils.lengthOfPencils[pencili]; @@ -1025,6 +1169,7 @@ bool trans_map_1d_amr(const dccrg::Dccrg& // Compute spatial neighbors for the source cells of the pencil. In // source cells we have a wider stencil and take into account boundaries. std::vector sourceCells(sourceLength); + computeSpatialSourceCellsForPencil(mpiGrid, pencils, pencili, dimension, sourceCells.data()); if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; @@ -1059,34 +1204,6 @@ bool trans_map_1d_amr(const dccrg::Dccrg& cellid_transpose, popID); if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; - - bool printMoreStuff = false; - for (auto id: pencilIds) { - if (id == 13 || id == 15) { - printMoreStuff = true; - } - } - - if(printMoreStuff) { - cout << "Ids are: "; - for(auto id: pencilIds) cout << id << " "; - cout << endl; - - cout << "dz's are: "; - for(uint i = 0; i < sourceCells.size(); ++i) { - cout << (int)(dz[i][0]*1e-4)/100.0 << " "; - } - cout << endl; - - // cout << "X,Y,Z are: " << endl; - // for(auto cell: sourceCells) { - // cout << (int)(cell->SpatialCell::parameters[CellParams::XCRD]*1e-4)/100.0 << ","; - // cout << (int)(cell->SpatialCell::parameters[CellParams::YCRD]*1e-4)/100.0 << ","; - // cout << (int)(cell->SpatialCell::parameters[CellParams::ZCRD]*1e-4)/100.0 << endl; - // } - - cout << endl; - } // Dz and sourceVecData are both padded by VLASOV_STENCIL_WIDTH // Dz has 1 value/cell, sourceVecData has WID3 values/cell @@ -1139,7 +1256,13 @@ bool trans_map_1d_amr(const dccrg::Dccrg& uint targetLength = L + 2; vector pencilIds = pencils.getIds(pencili); - bool debugPencilFlag = false; + int maxRefLvl = 0; + int minRefLvl = mpiGrid.get_maximum_refinement_level(); + for (auto id : pencilIds) { + int refLvl = mpiGrid.get_refinement_level(id); + maxRefLvl = max(maxRefLvl,refLvl); + minRefLvl = min(minRefLvl,refLvl); + } // Unpack the vector data @@ -1187,8 +1310,17 @@ bool trans_map_1d_amr(const dccrg::Dccrg& } Realf* blockData = spatial_cell->get_data(blockLID, popID); + + Realf areaRatio = 1.0; + if (maxRefLvl > minRefLvl && + spatial_cell->parameters[CellParams::DX] == P::dx_ini && + spatial_cell->parameters[CellParams::DY] == P::dy_ini && + spatial_cell->parameters[CellParams::DZ] == P::dz_ini) { + areaRatio = 1.0 / pow(pow(2, maxRefLvl - minRefLvl), 2); + } + for(int i = 0; i < WID3 ; i++) { - blockData[i] += targetBlockData[GID * WID3 + i]; + blockData[i] += targetBlockData[GID * WID3 + i] * areaRatio; } } diff --git a/vlasovsolver/cpu_trans_map_amr.hpp b/vlasovsolver/cpu_trans_map_amr.hpp index b20879f2e..d0275c334 100644 --- a/vlasovsolver/cpu_trans_map_amr.hpp +++ b/vlasovsolver/cpu_trans_map_amr.hpp @@ -32,25 +32,28 @@ struct setOfPencils { uint N; // Number of pencils in the set uint sumOfLengths; - std::vector lengthOfPencils; // Lengths of pencils - std::vector ids; // List of cells - std::vector x,y; // x,y - position - std::vector periodic; + std::vector< uint > lengthOfPencils; // Lengths of pencils + std::vector< CellID > ids; // List of cells + std::vector< Realv > x,y; // x,y - position + std::vector< bool > periodic; + std::vector< std::vector > path; // Path taken through refinement levels setOfPencils() { + N = 0; sumOfLengths = 0; } - void addPencil(std::vector idsIn, Real xIn, Real yIn, bool periodicIn) { + void addPencil(std::vector idsIn, Real xIn, Real yIn, bool periodicIn, std::vector pathIn) { - N += 1; + N++; sumOfLengths += idsIn.size(); lengthOfPencils.push_back(idsIn.size()); ids.insert(ids.end(),idsIn.begin(),idsIn.end()); x.push_back(xIn); y.push_back(yIn); periodic.push_back(periodicIn); + path.push_back(pathIn); } std::vector getIds(const uint pencilId) const { @@ -78,14 +81,22 @@ struct setOfPencils { // dx and dy are the dimensions of the original pencil. void split(const uint pencilId, const Realv dx, const Realv dy) { - x[pencilId] += 0.25 * dx; + auto ids = getIds(pencilId); + auto path1 = path.at(pencilId); + auto path2 = path.at(pencilId); + auto path3 = path.at(pencilId); + + path1.push_back(1); + path2.push_back(2); + path3.push_back(3); + + x[pencilId] -= 0.25 * dx; y[pencilId] += 0.25 * dy; + path.at(pencilId).push_back(0); - auto ids = getIds(pencilId); - - addPencil(ids, x[pencilId] + 0.25 * dx, y[pencilId] , periodic[pencilId]); - addPencil(ids, x[pencilId] , y[pencilId] + 0.25 * dy, periodic[pencilId]); - addPencil(ids, x[pencilId] + 0.25 * dx, y[pencilId] + 0.25 * dy, periodic[pencilId]); + addPencil(ids, x[pencilId] + 0.25 * dx, y[pencilId] + 0.25 * dy, periodic[pencilId], path1); + addPencil(ids, x[pencilId] - 0.25 * dx, y[pencilId] - 0.25 * dy, periodic[pencilId], path2); + addPencil(ids, x[pencilId] + 0.25 * dx, y[pencilId] - 0.25 * dy, periodic[pencilId], path3); } From d08d7edd16cbfe820517cb3f8fa366ff1693e32e Mon Sep 17 00:00:00 2001 From: tkoskela Date: Fri, 26 Oct 2018 17:17:58 +0300 Subject: [PATCH 103/602] Created testAmr project. Copy from Multipeak. --- projects/testAmr/testAmr.cfg | 107 +++++++++++++++ projects/testAmr/testAmr.cpp | 256 +++++++++++++++++++++++++++++++++++ projects/testAmr/testAmr.h | 107 +++++++++++++++ 3 files changed, 470 insertions(+) create mode 100644 projects/testAmr/testAmr.cfg create mode 100644 projects/testAmr/testAmr.cpp create mode 100644 projects/testAmr/testAmr.h diff --git a/projects/testAmr/testAmr.cfg b/projects/testAmr/testAmr.cfg new file mode 100644 index 000000000..16bcae4ec --- /dev/null +++ b/projects/testAmr/testAmr.cfg @@ -0,0 +1,107 @@ +propagate_field = 0 +propagate_vlasov_acceleration = 1 +propagate_vlasov_translation = 0 +dynamic_timestep = 0 +project = MultiPeak + +[io] +diagnostic_write_interval = 1 +write_initial_state = 1 +restart_walltime_interval = 100000 +number_of_restarts = 1 + +system_write_t_interval = 360 +system_write_file_name = fullf +system_write_distribution_stride = 1 +system_write_distribution_xline_stride = 0 +system_write_distribution_yline_stride = 0 +system_write_distribution_zline_stride = 0 + +[gridbuilder] +x_length = 1 +y_length = 1 +z_length = 1 +x_min = 0.0 +x_max = 1.0e3 +y_min = 0.0 +y_max = 1.0e3 +z_min = 0.0 +z_max = 1.0e3 +vx_min = -2.0e6 +vx_max = +2.0e6 +vy_min = -2.0e6 +vy_max = +2.0e6 +vz_min = -2.0e6 +vz_max = +2.0e6 +vx_length = 12 +vy_length = 12 +vz_length = 12 +t_max = 3600.0 +dt = 15.0 + +[vlasovsolver] +#minCFL = 0.4 +#maxCFL = 0.6 +#vlasovSemiLagAcceleration = 0 + +[boundaries] +periodic_x = yes +periodic_y = yes +periodic_z = yes + +[variables] +output = Rho +output = B +output = BackgroundB +output = PerturbedB +output = Pressure +output = RhoV +output = E +output = PTensor +output = MPIrank +output = Blocks +diagnostic = Blocks +diagnostic = populations_RhoLossAdjust +diagnostic = RhoLossVelBoundary +diagnostic = MaxDistributionFunction +diagnostic = MinDistributionFunction + +[sparse] +minValue = 1.0e-22 + +[MultiPeak] +n = 2 + +Vx = -5.0e5 +Vy = 0.0 +Vz = 0.0 +Tx = 1.0e5 +Ty = 1.0e5 +Tz = 1.0e5 +rho = 1.0e6 +rhoPertAbsAmp = 0.0 + +Vx = 5.0e5 +Vy = 0.0 +Vz = 0.0 +Tx = 1.0e5 +Ty = 5.0e5 +Tz = 5.0e5 +rho = 1.0e4 +rhoPertAbsAmp = 0.0 + +Bx = 0.0 +By = 0.0 +Bz = 1.82206867e-10 + +dBx = 0.0 +dBy = 0.0e-10 +dBz = 0.0e-10 + +magXPertAbsAmp = 0.0 +magYPertAbsAmp = 0.0 +magZPertAbsAmp = 0.0 + +lambda = 120.0e5 + +nVelocitySamples = 2 diff --git a/projects/testAmr/testAmr.cpp b/projects/testAmr/testAmr.cpp new file mode 100644 index 000000000..ad0e4f58c --- /dev/null +++ b/projects/testAmr/testAmr.cpp @@ -0,0 +1,256 @@ +/* + * This file is part of Vlasiator. + * Copyright 2010-2016 Finnish Meteorological Institute + * + * For details of usage, see the COPYING file and read the "Rules of the Road" + * at http://www.physics.helsinki.fi/vlasiator/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ + +#include +#include +#include +#include + +#include "../../common.h" +#include "../../readparameters.h" +#include "../../backgroundfield/backgroundfield.h" +#include "../../backgroundfield/constantfield.hpp" +#include "../../object_wrapper.h" + + +#include "MultiPeak.h" + +using namespace std; +using namespace spatial_cell; + + +Real projects::MultiPeak::rhoRnd; + +namespace projects { + MultiPeak::MultiPeak(): TriAxisSearch() { } + + MultiPeak::~MultiPeak() { } + + bool MultiPeak::initialize(void) { + return Project::initialize(); + } + + void MultiPeak::addParameters(){ + typedef Readparameters RP; + + RP::add("MultiPeak.Bx", "Magnetic field x component (T)", 0.0); + RP::add("MultiPeak.By", "Magnetic field y component (T)", 0.0); + RP::add("MultiPeak.Bz", "Magnetic field z component (T)", 0.0); + RP::add("MultiPeak.dBx", "Magnetic field x component cosine perturbation amplitude (T)", 0.0); + RP::add("MultiPeak.dBy", "Magnetic field y component cosine perturbation amplitude (T)", 0.0); + RP::add("MultiPeak.dBz", "Magnetic field z component cosine perturbation amplitude (T)", 0.0); + RP::add("MultiPeak.magXPertAbsAmp", "Absolute amplitude of the random magnetic perturbation along x (T)", 1.0e-9); + RP::add("MultiPeak.magYPertAbsAmp", "Absolute amplitude of the random magnetic perturbation along y (T)", 1.0e-9); + RP::add("MultiPeak.magZPertAbsAmp", "Absolute amplitude of the random magnetic perturbation along z (T)", 1.0e-9); + RP::add("MultiPeak.lambda", "B cosine perturbation wavelength (m)", 1.0); + RP::add("MultiPeak.nVelocitySamples", "Number of sampling points per velocity dimension", 2); + RP::add("MultiPeak.densityModel","Which spatial density model is used?",string("uniform")); + + // Per-population parameters + for(uint i=0; i< getObjectWrapper().particleSpecies.size(); i++) { + const std::string& pop = getObjectWrapper().particleSpecies[i].name; + RP::add(pop+"_MultiPeak.n", "Number of peaks to create", 0); + RP::addComposing(pop+"_MultiPeak.rho", "Number density (m^-3)"); + RP::addComposing(pop+"_MultiPeak.Tx", "Temperature (K)"); + RP::addComposing(pop+"_MultiPeak.Ty", "Temperature"); + RP::addComposing(pop+"_MultiPeak.Tz", "Temperature"); + RP::addComposing(pop+"_MultiPeak.Vx", "Bulk velocity x component (m/s)"); + RP::addComposing(pop+"_MultiPeak.Vy", "Bulk velocity y component (m/s)"); + RP::addComposing(pop+"_MultiPeak.Vz", "Bulk velocity z component (m/s)"); + RP::addComposing(pop+"_MultiPeak.rhoPertAbsAmp", "Absolute amplitude of the density perturbation"); + } + } + + void MultiPeak::getParameters(){ + + typedef Readparameters RP; + Project::getParameters(); + RP::get("MultiPeak.Bx", this->Bx); + RP::get("MultiPeak.By", this->By); + RP::get("MultiPeak.Bz", this->Bz); + RP::get("MultiPeak.magXPertAbsAmp", this->magXPertAbsAmp); + RP::get("MultiPeak.magYPertAbsAmp", this->magYPertAbsAmp); + RP::get("MultiPeak.magZPertAbsAmp", this->magZPertAbsAmp); + RP::get("MultiPeak.dBx", this->dBx); + RP::get("MultiPeak.dBy", this->dBy); + RP::get("MultiPeak.dBz", this->dBz); + RP::get("MultiPeak.lambda", this->lambda); + RP::get("MultiPeak.nVelocitySamples", this->nVelocitySamples); + + // Per-population parameters + for(uint i=0; i< getObjectWrapper().particleSpecies.size(); i++) { + const std::string& pop = getObjectWrapper().particleSpecies[i].name; + + MultiPeakSpeciesParameters sP; + RP::get(pop + "_MultiPeak.n", sP.numberOfPeaks); + RP::get(pop + "_MultiPeak.rho",sP.rho); + RP::get(pop + "_MultiPeak.Tx", sP.Tx); + RP::get(pop + "_MultiPeak.Ty", sP.Ty); + RP::get(pop + "_MultiPeak.Tz", sP.Tz); + RP::get(pop + "_MultiPeak.Vx", sP.Vx); + RP::get(pop + "_MultiPeak.Vy", sP.Vy); + RP::get(pop + "_MultiPeak.Vz", sP.Vz); + + RP::get(pop + "_MultiPeak.rhoPertAbsAmp", sP.rhoPertAbsAmp); + if(!sP.isConsistent()) { + cerr << "You should define all parameters (MultiPeak.rho, MultiPeak.Tx, MultiPeak.Ty, MultiPeak.Tz, MultiPeak.Vx, MultiPeak.Vy, MultiPeak.Vz, MultiPeak.rhoPertAbsAmp) for all " << sP.numberOfPeaks << " peaks of population " << pop << "." << endl; + abort(); + } + + speciesParams.push_back(sP); + } + + string densModelString; + RP::get("MultiPeak.densityModel",densModelString); + + if (densModelString == "uniform") densityModel = Uniform; + else if (densModelString == "testcase") densityModel = TestCase; + } + + Real MultiPeak::getDistribValue(creal& vx, creal& vy, creal& vz, creal& dvx, creal& dvy, creal& dvz,const uint popID) const { + const MultiPeakSpeciesParameters& sP = speciesParams[popID]; + creal mass = getObjectWrapper().particleSpecies[popID].mass; + creal kb = physicalconstants::K_B; + + Real value = 0.0; + + for (uint i=0; i= 3.9e5 && x <= 6.1e5) && (y >= 3.9e5 && y <= 6.1e5)) { + rhoFactor = 1.5; + } + break; + default: + rhoFactor = 1.0; + break; + } + + // Sample the distribution using N*N*N points + for (uint vi=0; vi::min(),avg * static_cast(1e-6)); + Real avgAccum = avgTotal / (avg + N3_sum); + Real avgCurrent = avg / (N*N*N); + if (fabs(avgCurrent-avgAccum)/(avgAccum+eps) < 0.01) ok = true; + else if (avg < avgLimit) ok = true; + else if (N > 10) { + ok = true; + } + + avgTotal += avg; + N3_sum += N*N*N; + ++N; + } while (ok == false); + + return avgTotal / N3_sum; + } + + void MultiPeak::calcCellParameters(spatial_cell::SpatialCell* cell,creal& t) { + Real* cellParams = cell->get_cell_parameters(); + setRandomCellSeed(cell,cellParams); + + if (this->lambda != 0.0) { + cellParams[CellParams::PERBX] = this->dBx*cos(2.0 * M_PI * cellParams[CellParams::XCRD] / this->lambda); + cellParams[CellParams::PERBY] = this->dBy*sin(2.0 * M_PI * cellParams[CellParams::XCRD] / this->lambda); + cellParams[CellParams::PERBZ] = this->dBz*cos(2.0 * M_PI * cellParams[CellParams::XCRD] / this->lambda); + } + + cellParams[CellParams::PERBX] += this->magXPertAbsAmp * (0.5 - getRandomNumber(cell)); + cellParams[CellParams::PERBY] += this->magYPertAbsAmp * (0.5 - getRandomNumber(cell)); + cellParams[CellParams::PERBZ] += this->magZPertAbsAmp * (0.5 - getRandomNumber(cell)); + + rhoRnd = 0.5 - getRandomNumber(cell); + } + + void MultiPeak::setCellBackgroundField(SpatialCell* cell) const { + ConstantField bgField; + bgField.initialize(this->Bx, + this->By, + this->Bz); + + setBackgroundField(bgField,cell->parameters.data(), cell->derivatives.data(),cell->derivativesBVOL.data()); + } + + std::vector > MultiPeak::getV0( + creal x, + creal y, + creal z, + const uint popID + ) const { + const MultiPeakSpeciesParameters& sP = speciesParams[popID]; + vector > centerPoints; + for(uint i=0; i point {{sP.Vx[i], sP.Vy[i], sP.Vz[i]}}; + centerPoints.push_back(point); + } + return centerPoints; + } + +}// namespace projects diff --git a/projects/testAmr/testAmr.h b/projects/testAmr/testAmr.h new file mode 100644 index 000000000..55356a80a --- /dev/null +++ b/projects/testAmr/testAmr.h @@ -0,0 +1,107 @@ +/* + * This file is part of Vlasiator. + * Copyright 2010-2016 Finnish Meteorological Institute + * + * For details of usage, see the COPYING file and read the "Rules of the Road" + * at http://www.physics.helsinki.fi/vlasiator/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ + +#ifndef MULTIPEAK_H +#define MULTIPEAK_H + +#include + +#include "../../definitions.h" +#include "../projectTriAxisSearch.h" + +namespace projects { + struct MultiPeakSpeciesParameters { + uint numberOfPeaks; + std::vector rho; + std::vector Tx; + std::vector Ty; + std::vector Tz; + std::vector Vx; + std::vector Vy; + std::vector Vz; + std::vector rhoPertAbsAmp; + + // Test whether parameters have been set up for all peaks + bool isConsistent() { + return rho.size() == Tx.size() && + Tx.size() == Ty.size() && + Ty.size() == Tz.size() && + Tz.size() == Vx.size() && + Vx.size() == Vy.size() && + Vy.size() == Vz.size() && + Vz.size() == rhoPertAbsAmp.size() && + rhoPertAbsAmp.size() == rho.size() && + rho.size() == numberOfPeaks; + } + }; + class MultiPeak: public TriAxisSearch { + public: + MultiPeak(); + virtual ~MultiPeak(); + + virtual bool initialize(void); + static void addParameters(void); + virtual void getParameters(void); + virtual void setCellBackgroundField(spatial_cell::SpatialCell* cell) const; + protected: + Real getDistribValue( + creal& x,creal& y, creal& z, + creal& vx, creal& vy, creal& vz, + const uint popID) const; + virtual void calcCellParameters(spatial_cell::SpatialCell* cell,creal& t); + virtual Real calcPhaseSpaceDensity( + creal& x, creal& y, creal& z, + creal& dx, creal& dy, creal& dz, + creal& vx, creal& vy, creal& vz, + creal& dvx, creal& dvy, creal& dvz, + const uint popID) const; + virtual std::vector > getV0( + creal x, + creal y, + creal z, + const uint popID + ) const; + static Real rhoRnd; //static as it has to be threadprivate + #pragma omp threadprivate(rhoRnd) + Real Bx; + Real By; + Real Bz; + Real dBx; + Real dBy; + Real dBz; + Real magXPertAbsAmp; + Real magYPertAbsAmp; + Real magZPertAbsAmp; + Real lambda; + uint nVelocitySamples; + std::vector speciesParams; + + enum densitymodel { + Uniform, + TestCase + } densityModel; + + }; // class MultiPeak +} // namespace projects + +#endif + From f26a45b79f6dd33cd317a00b6d013ee7ccc3dfb6 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Fri, 26 Oct 2018 17:18:43 +0300 Subject: [PATCH 104/602] Added refine-function to project base class --- projects/project.cpp | 8 ++++++++ projects/project.h | 2 ++ 2 files changed, 10 insertions(+) diff --git a/projects/project.cpp b/projects/project.cpp index 6455cf9ff..3b11b307a 100644 --- a/projects/project.cpp +++ b/projects/project.cpp @@ -507,6 +507,14 @@ namespace projects { (int) ((z - Parameters::zmin) / dz) * Parameters::xcells_ini * Parameters::ycells_ini; setRandomSeed(cell,cellID); } + + /* + Refine cells of mpiGrid. Each project that wants refinement shoudl implement this function. + Base class function does nothing. + */ + bool Project::refineSpatialCells( const dccrg::Dccrg& mpiGrid ) const { + return false; + } Project* createProject() { Project* rvalue = NULL; diff --git a/projects/project.h b/projects/project.h index d5df0766c..7c5ae8252 100644 --- a/projects/project.h +++ b/projects/project.h @@ -161,6 +161,8 @@ namespace projects { */ void setRandomCellSeed(spatial_cell::SpatialCell* cell,const Real* const cellParams) const; + bool refineSpatialCells( const dccrg::Dccrg& mpiGrid ) const; + private: uint seed; static char rngStateBuffer[256]; From 606e4894998ceb7514aa2c4526c5afc29088d5fb Mon Sep 17 00:00:00 2001 From: tkoskela Date: Mon, 29 Oct 2018 11:54:54 +0200 Subject: [PATCH 105/602] Modified selectNeighbors() to not assume 1 or 4 neighbors. Neighbors that are not on the local process return INVALID_CELLID. --- vlasovsolver/cpu_trans_map_amr.cpp | 56 +++++++++++++----------------- 1 file changed, 24 insertions(+), 32 deletions(-) diff --git a/vlasovsolver/cpu_trans_map_amr.cpp b/vlasovsolver/cpu_trans_map_amr.cpp index 8908c3e29..37404a28f 100644 --- a/vlasovsolver/cpu_trans_map_amr.cpp +++ b/vlasovsolver/cpu_trans_map_amr.cpp @@ -313,33 +313,19 @@ CellID selectNeighbor(const dccrg::Dccrg myNeighbors.push_back(cell.first); } - CellID neighbor; - - switch( myNeighbors.size() ) { - // Since refinement can only increase by 1 level the only possibilities - // Should be 0 neighbors, 1 neighbor or 4 neighbors. - case 0 : - // did not find neighbors - neighbor = INVALID_CELLID; - break; - - case 1 : - neighbor = myNeighbors[0]; - break; - - case 4 : + CellID neighbor = INVALID_CELLID; + + if (myNeighbors.size() == 1) { + neighbor = myNeighbors[0]; + } else if ( path < myNeighbors.size() ) { neighbor = myNeighbors[path]; - break; - - default: - // something is wrong - neighbor = INVALID_CELLID; - throw "Invalid neighbor count!"; - break; } + // std::cout << "selectNeighbor: path = " << path << " neighbors = "; + // for (auto nbr : myNeighbors) std::cout << neighbor << " "; + // std::cout << ", returning " << neighbor << std::endl; + return neighbor; - } @@ -395,16 +381,22 @@ setOfPencils buildPencilsWithNeighbors( const dccrg::Dccrg 0) { periodic = false; - - // Find the refinement level in the neighboring cell. Any neighbor will do - // since refinement level can only increase by 1 between neighbors. - nextNeighbor = selectNeighbor(grid,id,dimension); - + bool neighborExists = false; + int refLvl = 0; + + // Find the refinement level in the neighboring cell. Check all possible neighbors + // in case some of them are remote. + for (int tmpPath = 0; tmpPath < 4; ++tmpPath) { + nextNeighbor = selectNeighbor(grid,id,dimension,tmpPath); + if(nextNeighbor != INVALID_CELLID) { + refLvl = max(refLvl,grid.get_refinement_level(nextNeighbor)); + neighborExists = true; + } + } + // If there are no neighbors, we can stop. - if (nextNeighbor == 0) - break; - - uint refLvl = grid.get_refinement_level(nextNeighbor); + if (!neighborExists) + break; if (refLvl > 0) { From fe313fb9b60a408c797e063929f746d2a62acb35 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Mon, 29 Oct 2018 12:01:48 +0200 Subject: [PATCH 106/602] Removed false conditions in y and z translations --- vlasovsolver/vlasovmover.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/vlasovsolver/vlasovmover.cpp b/vlasovsolver/vlasovmover.cpp index 44e9d7e8b..f9a8ad73b 100644 --- a/vlasovsolver/vlasovmover.cpp +++ b/vlasovsolver/vlasovmover.cpp @@ -80,7 +80,7 @@ void calculateSpatialTranslation( // std::cout << "I am at line " << __LINE__ << " of " << __FILE__ << std::endl; // ------------- SLICE - map dist function in Z --------------- // - if(P::zcells_ini > 1 && false){ + if(P::zcells_ini > 1){ trans_timer=phiprof::initializeTimer("transfer-stencil-data-z","MPI"); phiprof::start(trans_timer); SpatialCell::set_mpi_transfer_type(Transfer::VEL_BLOCK_DATA); @@ -125,7 +125,7 @@ void calculateSpatialTranslation( // std::cout << "I am at line " << __LINE__ << " of " << __FILE__ << std::endl; // ------------- SLICE - map dist function in Y --------------- // - if(P::ycells_ini > 1 && false){ + if(P::ycells_ini > 1){ trans_timer=phiprof::initializeTimer("transfer-stencil-data-y","MPI"); phiprof::start(trans_timer); SpatialCell::set_mpi_transfer_type(Transfer::VEL_BLOCK_DATA); From b732d223d8149db775631ccd3a6feebde1683cd9 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Mon, 29 Oct 2018 12:48:17 +0200 Subject: [PATCH 107/602] Moved refineSpatialCells to public functions. Added testAMR project. --- projects/project.cpp | 6 +++++- projects/project.h | 4 ++-- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/projects/project.cpp b/projects/project.cpp index 3b11b307a..ef59d256f 100644 --- a/projects/project.cpp +++ b/projects/project.cpp @@ -48,6 +48,7 @@ #include "IPShock/IPShock.h" #include "Template/Template.h" #include "test_fp/test_fp.h" +#include "testAMR/testAMR.h" #include "testHall/testHall.h" #include "test_trans/test_trans.h" #include "verificationLarmor/verificationLarmor.h" @@ -512,7 +513,7 @@ namespace projects { Refine cells of mpiGrid. Each project that wants refinement shoudl implement this function. Base class function does nothing. */ - bool Project::refineSpatialCells( const dccrg::Dccrg& mpiGrid ) const { + bool Project::refineSpatialCells( dccrg::Dccrg& mpiGrid ) const { return false; } @@ -579,6 +580,9 @@ Project* createProject() { if(Parameters::projectName == "test_fp") { rvalue = new projects::test_fp; } + if(Parameters::projectName == "testAmr") { + rvalue = new projects::testAmr; + } if(Parameters::projectName == "testHall") { rvalue = new projects::TestHall; } diff --git a/projects/project.h b/projects/project.h index 7c5ae8252..d976fae8c 100644 --- a/projects/project.h +++ b/projects/project.h @@ -74,6 +74,8 @@ namespace projects { Real setVelocityBlock(spatial_cell::SpatialCell* cell,const vmesh::LocalID& blockLID,const uint popID) const; + bool refineSpatialCells( dccrg::Dccrg& mpiGrid ) const; + protected: /*! \brief Returns a list of blocks to loop through when initialising. * @@ -160,8 +162,6 @@ namespace projects { \param cellParams The cell parameters list in each spatial cell */ void setRandomCellSeed(spatial_cell::SpatialCell* cell,const Real* const cellParams) const; - - bool refineSpatialCells( const dccrg::Dccrg& mpiGrid ) const; private: uint seed; From 3578f9f6d5bdf0f4b7dc680e29e02cfba97a7d9c Mon Sep 17 00:00:00 2001 From: tkoskela Date: Mon, 29 Oct 2018 16:02:32 +0200 Subject: [PATCH 108/602] Added testAmr::addParameters, and some verbosity to the refineSpatialCells base class version. --- projects/project.cpp | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/projects/project.cpp b/projects/project.cpp index ef59d256f..cd828ccb7 100644 --- a/projects/project.cpp +++ b/projects/project.cpp @@ -48,7 +48,7 @@ #include "IPShock/IPShock.h" #include "Template/Template.h" #include "test_fp/test_fp.h" -#include "testAMR/testAMR.h" +#include "testAmr/testAmr.h" #include "testHall/testHall.h" #include "test_trans/test_trans.h" #include "verificationLarmor/verificationLarmor.h" @@ -126,6 +126,7 @@ namespace projects { projects::IPShock::addParameters(); projects::Template::addParameters(); projects::test_fp::addParameters(); + projects::testAmr::addParameters(); projects::TestHall::addParameters(); projects::test_trans::addParameters(); projects::verificationLarmor::addParameters(); @@ -511,9 +512,15 @@ namespace projects { /* Refine cells of mpiGrid. Each project that wants refinement shoudl implement this function. - Base class function does nothing. + Base class function prints a warning and does nothing. */ bool Project::refineSpatialCells( dccrg::Dccrg& mpiGrid ) const { + int rank; + MPI_Comm_rank(MPI_COMM_WORLD,&rank); + if (rank == MASTER_RANK) { + cerr << "(Project.cpp) WARNING: Base class 'refineSpatialCells' in " << __FILE__ << ":" << __LINE__ << " called." << endl; + } + return false; } From 5dea44cc28af04b99e09179a75b52a1203b34866 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Mon, 29 Oct 2018 16:03:41 +0200 Subject: [PATCH 109/602] Implemented refineSpatialCells, fixed various bugs. --- projects/testAmr/testAmr.cpp | 161 ++++++++++++++++++++--------------- projects/testAmr/testAmr.h | 18 ++-- 2 files changed, 102 insertions(+), 77 deletions(-) diff --git a/projects/testAmr/testAmr.cpp b/projects/testAmr/testAmr.cpp index ad0e4f58c..c77ad61fc 100644 --- a/projects/testAmr/testAmr.cpp +++ b/projects/testAmr/testAmr.cpp @@ -31,88 +31,87 @@ #include "../../backgroundfield/constantfield.hpp" #include "../../object_wrapper.h" - -#include "MultiPeak.h" +#include "testAmr.h" using namespace std; using namespace spatial_cell; - -Real projects::MultiPeak::rhoRnd; +Real projects::testAmr::rhoRnd; namespace projects { - MultiPeak::MultiPeak(): TriAxisSearch() { } + testAmr::testAmr(): TriAxisSearch() { } - MultiPeak::~MultiPeak() { } + testAmr::~testAmr() { } - bool MultiPeak::initialize(void) { + bool testAmr::initialize(void) { return Project::initialize(); } - void MultiPeak::addParameters(){ + void testAmr::addParameters(){ typedef Readparameters RP; - RP::add("MultiPeak.Bx", "Magnetic field x component (T)", 0.0); - RP::add("MultiPeak.By", "Magnetic field y component (T)", 0.0); - RP::add("MultiPeak.Bz", "Magnetic field z component (T)", 0.0); - RP::add("MultiPeak.dBx", "Magnetic field x component cosine perturbation amplitude (T)", 0.0); - RP::add("MultiPeak.dBy", "Magnetic field y component cosine perturbation amplitude (T)", 0.0); - RP::add("MultiPeak.dBz", "Magnetic field z component cosine perturbation amplitude (T)", 0.0); - RP::add("MultiPeak.magXPertAbsAmp", "Absolute amplitude of the random magnetic perturbation along x (T)", 1.0e-9); - RP::add("MultiPeak.magYPertAbsAmp", "Absolute amplitude of the random magnetic perturbation along y (T)", 1.0e-9); - RP::add("MultiPeak.magZPertAbsAmp", "Absolute amplitude of the random magnetic perturbation along z (T)", 1.0e-9); - RP::add("MultiPeak.lambda", "B cosine perturbation wavelength (m)", 1.0); - RP::add("MultiPeak.nVelocitySamples", "Number of sampling points per velocity dimension", 2); - RP::add("MultiPeak.densityModel","Which spatial density model is used?",string("uniform")); + RP::add("testAmr.Bx", "Magnetic field x component (T)", 0.0); + RP::add("testAmr.By", "Magnetic field y component (T)", 0.0); + RP::add("testAmr.Bz", "Magnetic field z component (T)", 0.0); + RP::add("testAmr.dBx", "Magnetic field x component cosine perturbation amplitude (T)", 0.0); + RP::add("testAmr.dBy", "Magnetic field y component cosine perturbation amplitude (T)", 0.0); + RP::add("testAmr.dBz", "Magnetic field z component cosine perturbation amplitude (T)", 0.0); + RP::add("testAmr.magXPertAbsAmp", "Absolute amplitude of the random magnetic perturbation along x (T)", 1.0e-9); + RP::add("testAmr.magYPertAbsAmp", "Absolute amplitude of the random magnetic perturbation along y (T)", 1.0e-9); + RP::add("testAmr.magZPertAbsAmp", "Absolute amplitude of the random magnetic perturbation along z (T)", 1.0e-9); + RP::add("testAmr.lambda", "B cosine perturbation wavelength (m)", 1.0); + RP::add("testAmr.nVelocitySamples", "Number of sampling points per velocity dimension", 2); + RP::add("testAmr.densityModel","Which spatial density model is used?",string("uniform")); // Per-population parameters for(uint i=0; i< getObjectWrapper().particleSpecies.size(); i++) { const std::string& pop = getObjectWrapper().particleSpecies[i].name; - RP::add(pop+"_MultiPeak.n", "Number of peaks to create", 0); - RP::addComposing(pop+"_MultiPeak.rho", "Number density (m^-3)"); - RP::addComposing(pop+"_MultiPeak.Tx", "Temperature (K)"); - RP::addComposing(pop+"_MultiPeak.Ty", "Temperature"); - RP::addComposing(pop+"_MultiPeak.Tz", "Temperature"); - RP::addComposing(pop+"_MultiPeak.Vx", "Bulk velocity x component (m/s)"); - RP::addComposing(pop+"_MultiPeak.Vy", "Bulk velocity y component (m/s)"); - RP::addComposing(pop+"_MultiPeak.Vz", "Bulk velocity z component (m/s)"); - RP::addComposing(pop+"_MultiPeak.rhoPertAbsAmp", "Absolute amplitude of the density perturbation"); + RP::add(pop+"_testAmr.n", "Number of peaks to create", 0); + RP::addComposing(pop+"_testAmr.rho", "Number density (m^-3)"); + RP::addComposing(pop+"_testAmr.Tx", "Temperature (K)"); + RP::addComposing(pop+"_testAmr.Ty", "Temperature"); + RP::addComposing(pop+"_testAmr.Tz", "Temperature"); + RP::addComposing(pop+"_testAmr.Vx", "Bulk velocity x component (m/s)"); + RP::addComposing(pop+"_testAmr.Vy", "Bulk velocity y component (m/s)"); + RP::addComposing(pop+"_testAmr.Vz", "Bulk velocity z component (m/s)"); + RP::addComposing(pop+"_testAmr.rhoPertAbsAmp", "Absolute amplitude of the density perturbation"); } } - void MultiPeak::getParameters(){ + void testAmr::getParameters(){ typedef Readparameters RP; Project::getParameters(); - RP::get("MultiPeak.Bx", this->Bx); - RP::get("MultiPeak.By", this->By); - RP::get("MultiPeak.Bz", this->Bz); - RP::get("MultiPeak.magXPertAbsAmp", this->magXPertAbsAmp); - RP::get("MultiPeak.magYPertAbsAmp", this->magYPertAbsAmp); - RP::get("MultiPeak.magZPertAbsAmp", this->magZPertAbsAmp); - RP::get("MultiPeak.dBx", this->dBx); - RP::get("MultiPeak.dBy", this->dBy); - RP::get("MultiPeak.dBz", this->dBz); - RP::get("MultiPeak.lambda", this->lambda); - RP::get("MultiPeak.nVelocitySamples", this->nVelocitySamples); + RP::get("testAmr.Bx", this->Bx); + RP::get("testAmr.By", this->By); + RP::get("testAmr.Bz", this->Bz); + RP::get("testAmr.magXPertAbsAmp", this->magXPertAbsAmp); + RP::get("testAmr.magYPertAbsAmp", this->magYPertAbsAmp); + RP::get("testAmr.magZPertAbsAmp", this->magZPertAbsAmp); + RP::get("testAmr.dBx", this->dBx); + RP::get("testAmr.dBy", this->dBy); + RP::get("testAmr.dBz", this->dBz); + RP::get("testAmr.lambda", this->lambda); + RP::get("testAmr.nVelocitySamples", this->nVelocitySamples); // Per-population parameters for(uint i=0; i< getObjectWrapper().particleSpecies.size(); i++) { const std::string& pop = getObjectWrapper().particleSpecies[i].name; - MultiPeakSpeciesParameters sP; - RP::get(pop + "_MultiPeak.n", sP.numberOfPeaks); - RP::get(pop + "_MultiPeak.rho",sP.rho); - RP::get(pop + "_MultiPeak.Tx", sP.Tx); - RP::get(pop + "_MultiPeak.Ty", sP.Ty); - RP::get(pop + "_MultiPeak.Tz", sP.Tz); - RP::get(pop + "_MultiPeak.Vx", sP.Vx); - RP::get(pop + "_MultiPeak.Vy", sP.Vy); - RP::get(pop + "_MultiPeak.Vz", sP.Vz); - - RP::get(pop + "_MultiPeak.rhoPertAbsAmp", sP.rhoPertAbsAmp); + testAmrSpeciesParameters sP; + RP::get(pop + "_testAmr.n", sP.numberOfPeaks); + RP::get(pop + "_testAmr.rho",sP.rho); + RP::get(pop + "_testAmr.Tx", sP.Tx); + RP::get(pop + "_testAmr.Ty", sP.Ty); + RP::get(pop + "_testAmr.Tz", sP.Tz); + RP::get(pop + "_testAmr.Vx", sP.Vx); + RP::get(pop + "_testAmr.Vy", sP.Vy); + RP::get(pop + "_testAmr.Vz", sP.Vz); + + RP::get(pop + "_testAmr.rhoPertAbsAmp", sP.rhoPertAbsAmp); + if(!sP.isConsistent()) { - cerr << "You should define all parameters (MultiPeak.rho, MultiPeak.Tx, MultiPeak.Ty, MultiPeak.Tz, MultiPeak.Vx, MultiPeak.Vy, MultiPeak.Vz, MultiPeak.rhoPertAbsAmp) for all " << sP.numberOfPeaks << " peaks of population " << pop << "." << endl; + cerr << "You should define all parameters (testAmr.rho, testAmr.Tx, testAmr.Ty, testAmr.Tz, testAmr.Vx, testAmr.Vy, testAmr.Vz, testAmr.rhoPertAbsAmp) for all " << sP.numberOfPeaks << " peaks of population " << pop << "." << endl; abort(); } @@ -120,14 +119,14 @@ namespace projects { } string densModelString; - RP::get("MultiPeak.densityModel",densModelString); + RP::get("testAmr.densityModel",densModelString); if (densModelString == "uniform") densityModel = Uniform; else if (densModelString == "testcase") densityModel = TestCase; } - Real MultiPeak::getDistribValue(creal& vx, creal& vy, creal& vz, creal& dvx, creal& dvy, creal& dvz,const uint popID) const { - const MultiPeakSpeciesParameters& sP = speciesParams[popID]; + Real testAmr::getDistribValue(creal& vx, creal& vy, creal& vz, creal& dvx, creal& dvy, creal& dvz,const uint popID) const { + const testAmrSpeciesParameters& sP = speciesParams[popID]; creal mass = getObjectWrapper().particleSpecies[popID].mass; creal kb = physicalconstants::K_B; @@ -144,9 +143,9 @@ namespace projects { return value; } - Real MultiPeak::calcPhaseSpaceDensity(creal& x, creal& y, creal& z, creal& dx, creal& dy, creal& dz, - creal& vx, creal& vy, creal& vz, creal& dvx, creal& dvy, creal& dvz, - const uint popID) const { + Real testAmr::calcPhaseSpaceDensity(creal& x, creal& y, creal& z, creal& dx, creal& dy, creal& dz, + creal& vx, creal& vy, creal& vz, creal& dvx, creal& dvy, creal& dvz, + const uint popID) const { // Iterative sampling of the distribution function. Keep track of the // accumulated volume average over the iterations. When the next // iteration improves the average by less than 1%, return the value. @@ -155,9 +154,8 @@ namespace projects { uint N = nVelocitySamples; // Start by using nVelocitySamples int N3_sum = 0; // Sum of sampling points used so far - const MultiPeakSpeciesParameters& sP = speciesParams[popID]; - - #warning TODO: Replace getObjectWrapper().particleSpecies[popID].sparseMinValue with SpatialCell::getVelocityBlockMinValue(popID) + const testAmrSpeciesParameters& sP = speciesParams[popID]; + const Real avgLimit = 0.01*getObjectWrapper().particleSpecies[popID].sparseMinValue; do { Real avg = 0.0; // Volume average obtained during this sampling @@ -172,15 +170,15 @@ namespace projects { break; case TestCase: rhoFactor = 1.0; - if ((x >= 3.9e5 && x <= 6.1e5) && (y >= 3.9e5 && y <= 6.1e5)) { - rhoFactor = 1.5; + if (x < 6.1e5) { + rhoFactor = 3.0; } break; default: rhoFactor = 1.0; break; } - + // Sample the distribution using N*N*N points for (uint vi=0; viget_cell_parameters(); setRandomCellSeed(cell,cellParams); @@ -229,7 +227,7 @@ namespace projects { rhoRnd = 0.5 - getRandomNumber(cell); } - void MultiPeak::setCellBackgroundField(SpatialCell* cell) const { + void testAmr::setCellBackgroundField(SpatialCell* cell) const { ConstantField bgField; bgField.initialize(this->Bx, this->By, @@ -238,13 +236,13 @@ namespace projects { setBackgroundField(bgField,cell->parameters.data(), cell->derivatives.data(),cell->derivativesBVOL.data()); } - std::vector > MultiPeak::getV0( + std::vector > testAmr::getV0( creal x, creal y, creal z, const uint popID ) const { - const MultiPeakSpeciesParameters& sP = speciesParams[popID]; + const testAmrSpeciesParameters& sP = speciesParams[popID]; vector > centerPoints; for(uint i=0; i point {{sP.Vx[i], sP.Vy[i], sP.Vz[i]}}; @@ -252,5 +250,30 @@ namespace projects { } return centerPoints; } + + bool testAmr::refineSpatialCells( dccrg::Dccrg& mpiGrid ) const { + + std::array coords; + coords[0] = (P::xmax - P::xmin) / 2.0; + coords[1] = (P::ymax - P::ymin) / 2.0; + coords[2] = (P::zmax - P::zmin) / 2.0; + std::cout << "Trying to refine at " << coords[0] << ", " << coords[1] << ", " << coords[2] << std::endl; + CellID myCell = mpiGrid.get_existing_cell(coords); + std::cout << "Got cell ID " << myCell << std::endl; + std::cout << "Maximum refinement level is " << mpiGrid.mapping.get_maximum_refinement_level() << std::endl; + bool refineSuccess = mpiGrid.refine_completely_at(coords); + std::vector refinedCells = mpiGrid.stop_refining(); + mpiGrid.balance_load(); + if(refineSuccess) { + cout << "Refined Cells are: "; + for (auto cellid : refinedCells) { + cout << cellid << " "; + } + cout << endl; + mpiGrid.write_vtk_file("mpiGrid.vtk"); + } + + return refineSuccess; + } }// namespace projects diff --git a/projects/testAmr/testAmr.h b/projects/testAmr/testAmr.h index 55356a80a..7ebaf59d8 100644 --- a/projects/testAmr/testAmr.h +++ b/projects/testAmr/testAmr.h @@ -20,8 +20,8 @@ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ -#ifndef MULTIPEAK_H -#define MULTIPEAK_H +#ifndef TESTAMR_H +#define TESTAMR_H #include @@ -29,7 +29,7 @@ #include "../projectTriAxisSearch.h" namespace projects { - struct MultiPeakSpeciesParameters { + struct testAmrSpeciesParameters { uint numberOfPeaks; std::vector rho; std::vector Tx; @@ -53,10 +53,10 @@ namespace projects { rho.size() == numberOfPeaks; } }; - class MultiPeak: public TriAxisSearch { + class testAmr: public TriAxisSearch { public: - MultiPeak(); - virtual ~MultiPeak(); + testAmr(); + virtual ~testAmr(); virtual bool initialize(void); static void addParameters(void); @@ -80,6 +80,8 @@ namespace projects { creal z, const uint popID ) const; + bool refineSpatialCells( dccrg::Dccrg& mpiGrid ) const; + static Real rhoRnd; //static as it has to be threadprivate #pragma omp threadprivate(rhoRnd) Real Bx; @@ -93,14 +95,14 @@ namespace projects { Real magZPertAbsAmp; Real lambda; uint nVelocitySamples; - std::vector speciesParams; + std::vector speciesParams; enum densitymodel { Uniform, TestCase } densityModel; - }; // class MultiPeak + }; // class testAmr } // namespace projects #endif From 8cd533e27874e231ac54097bed06df54f303b448 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Mon, 29 Oct 2018 16:05:26 +0200 Subject: [PATCH 110/602] Added testAmr - stuff to Makefile. Added refineSpatialCells call to initializeGrid. Cleaned up printouts in vlasiator.cpp --- Makefile | 9 +++++-- grid.cpp | 49 ++++++++++++++++++--------------- vlasiator.cpp | 75 ++++++++++++++++++++++++++++++--------------------- 3 files changed, 79 insertions(+), 54 deletions(-) diff --git a/Makefile b/Makefile index d7cdfe563..7747028f8 100644 --- a/Makefile +++ b/Makefile @@ -149,6 +149,7 @@ DEPS_PROJECTS = projects/project.h projects/project.cpp \ projects/IPShock/IPShock.h projects/IPShock/IPShock.cpp \ projects/Template/Template.h projects/Template/Template.cpp \ projects/test_fp/test_fp.h projects/test_fp/test_fp.cpp \ + projects/testAmr/testAmr.h projects/testAmr/testAmr.cpp \ projects/testHall/testHall.h projects/testHall/testHall.cpp \ projects/test_trans/test_trans.h projects/test_trans/test_trans.cpp \ projects/verificationLarmor/verificationLarmor.h projects/verificationLarmor/verificationLarmor.cpp \ @@ -189,8 +190,9 @@ OBJS = version.o memoryallocation.o backgroundfield.o quadr.o dipole.o linedipo donotcompute.o ionosphere.o outflow.o setbyuser.o setmaxwellian.o antisymmetric.o\ sysboundary.o sysboundarycondition.o project_boundary.o particle_species.o\ project.o projectTriAxisSearch.o read_gaussian_population.o\ - Alfven.o Diffusion.o Dispersion.o Distributions.o electric_sail.o Firehose.o Flowthrough.o Fluctuations.o Harris.o KHB.o Larmor.o \ - Magnetosphere.o MultiPeak.o VelocityBox.o Riemann1.o Shock.o Template.o test_fp.o testHall.o test_trans.o \ + Alfven.o Diffusion.o Dispersion.o Distributions.o electric_sail.o Firehose.o\ + Flowthrough.o Fluctuations.o Harris.o KHB.o Larmor.o Magnetosphere.o MultiPeak.o\ + VelocityBox.o Riemann1.o Shock.o Template.o test_fp.o testAmr.o testHall.o test_trans.o\ IPShock.o object_wrapper.o\ verificationLarmor.o Shocktest.o grid.o ioread.o iowrite.o vlasiator.o logger.o\ common.o parameters.o readparameters.o spatial_cell.o mesh_data_container.o\ @@ -362,6 +364,9 @@ Template.o: ${DEPS_COMMON} projects/Template/Template.h projects/Template/Templa test_fp.o: ${DEPS_COMMON} projects/test_fp/test_fp.h projects/test_fp/test_fp.cpp ${CMP} ${CXXFLAGS} ${FLAGS} -c projects/test_fp/test_fp.cpp ${INC_DCCRG} ${INC_ZOLTAN} ${INC_BOOST} ${INC_EIGEN} +testAmr.o: ${DEPS_COMMON} projects/testAmr/testAmr.h projects/testAmr/testAmr.cpp + ${CMP} ${CXXFLAGS} ${FLAGS} ${MATHFLAGS} -c projects/testAmr/testAmr.cpp ${INC_DCCRG} ${INC_ZOLTAN} ${INC_BOOST} ${INC_EIGEN} + testHall.o: ${DEPS_COMMON} projects/testHall/testHall.h projects/testHall/testHall.cpp ${CMP} ${CXXFLAGS} ${FLAGS} ${MATHFLAGS} -c projects/testHall/testHall.cpp ${INC_DCCRG} ${INC_ZOLTAN} ${INC_BOOST} ${INC_EIGEN} diff --git a/grid.cpp b/grid.cpp index cd4a73348..282df6ee1 100644 --- a/grid.cpp +++ b/grid.cpp @@ -123,30 +123,37 @@ void initializeGrid( .initialize(comm) .set_geometry(geom_params); - if(true) { - std::array coords; - coords[0] = (P::xmax - P::xmin) / 2.0; - coords[1] = (P::ymax - P::ymin) / 2.0; - coords[2] = (P::zmax - P::zmin) / 2.0; - cout << "Trying to refine at " << coords[0] << ", " << coords[1] << ", " << coords[2] << endl; - CellID myCell = mpiGrid.get_existing_cell(coords); - cout << "Got cell ID " << myCell << endl; - cout << "Maximum refinement level is " << mpiGrid.mapping.get_maximum_refinement_level() << endl; - bool refineSuccess = mpiGrid.refine_completely_at(coords); - std::vector refinedCells = mpiGrid.stop_refining(); - std::cout << std::boolalpha <<"Refine result: " << refineSuccess << endl; - mpiGrid.balance_load(); - if(refineSuccess) { - cout << "Refined Cells are: "; - for (auto cellid : refinedCells) { - cout << cellid << " "; - } - cout << endl; - mpiGrid.write_vtk_file("mpiGrid.vtk"); - } + + if(project.refineSpatialCells(mpiGrid)) { recalculateLocalCellsCache(); + } else { + std::cout << "Call to refineSpatialCells failed" << std::endl; } + // if(true) { + // std::array coords; + // coords[0] = (P::xmax - P::xmin) / 2.0; + // coords[1] = (P::ymax - P::ymin) / 2.0; + // coords[2] = (P::zmax - P::zmin) / 2.0; + // cout << "Trying to refine at " << coords[0] << ", " << coords[1] << ", " << coords[2] << endl; + // CellID myCell = mpiGrid.get_existing_cell(coords); + // cout << "Got cell ID " << myCell << endl; + // cout << "Maximum refinement level is " << mpiGrid.mapping.get_maximum_refinement_level() << endl; + // bool refineSuccess = mpiGrid.refine_completely_at(coords); + // std::vector refinedCells = mpiGrid.stop_refining(); + // std::cout << std::boolalpha <<"Refine result: " << refineSuccess << endl; + // mpiGrid.balance_load(); + // if(refineSuccess) { + // cout << "Refined Cells are: "; + // for (auto cellid : refinedCells) { + // cout << cellid << " "; + // } + // cout << endl; + // mpiGrid.write_vtk_file("mpiGrid.vtk"); + // } + // recalculateLocalCellsCache(); + // } + // Init velocity mesh on all cells initVelocityGridGeometry(mpiGrid); initializeStencils(mpiGrid); diff --git a/vlasiator.cpp b/vlasiator.cpp index bde05a40f..d5d01da62 100644 --- a/vlasiator.cpp +++ b/vlasiator.cpp @@ -436,6 +436,10 @@ int main(int argn,char* args[]) { // std::cout << "cell " << cell << " dx,dy,dz = " << dx << ", " << dy << ", " << dz << std::endl; // } + + // cout << "Local cells are: "; + // for(auto id : cells) cout << id << " "; + // cout << endl; // Couple FSGrids to mpiGrid // TODO: Do we really need to couple *all* of these fields? @@ -457,6 +461,7 @@ int main(int argn,char* args[]) { // Calculate the corresponding fsgrid ids for each dccrg cell and set coupling for each fsgrid id. for(auto& dccrgId : cells) { const auto fsgridIds = mapDccrgIdToFsGrid(mpiGrid, fsGridDimensions, dccrgId); + for (auto fsgridId : fsgridIds) { perBGrid. setGridCoupling(fsgridId, myRank); perBDt2Grid. setGridCoupling(fsgridId, myRank); @@ -502,7 +507,7 @@ int main(int argn,char* args[]) { feedMomentsIntoFsGrid(mpiGrid, cells, momentsGrid,false); feedMomentsIntoFsGrid(mpiGrid, cells, momentsDt2Grid,false); - if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; + if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; phiprof::start("Init field propagator"); if ( @@ -528,7 +533,7 @@ int main(int argn,char* args[]) { } phiprof::stop("Init field propagator"); - if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; + if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; // Initialize Poisson solver (if used) if (P::propagatePotential == true) { @@ -543,7 +548,7 @@ int main(int argn,char* args[]) { // Free up memory: readparameters.finalize(); - if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; + if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; if (P::isRestart == false) { // Run Vlasov solver once with zero dt to initialize @@ -580,7 +585,7 @@ int main(int argn,char* args[]) { getVolumeFieldsFromFsGrid(volGrid, mpiGrid, cells); phiprof::stop("getVolumeFieldsFromFsGrid"); - if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; + if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; // Save restart data if (P::writeInitialState) { @@ -621,14 +626,14 @@ int main(int argn,char* args[]) { phiprof::stop("write-initial-state"); } - if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; + if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; if (P::isRestart == false) { //compute new dt phiprof::start("compute-dt"); getFsGridMaxDt(technicalGrid, mpiGrid, cells); - if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; + if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; computeNewTimeStep(mpiGrid,newDt,dtIsChanged); if (P::dynamicTimestep == true && dtIsChanged == true) { @@ -638,7 +643,7 @@ int main(int argn,char* args[]) { phiprof::stop("compute-dt"); } - if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; + if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; if (!P::isRestart) { //go forward by dt/2 in V, initializes leapfrog split. In restarts the @@ -660,7 +665,7 @@ int main(int argn,char* args[]) { // ***** INITIALIZATION COMPLETE ***** // *********************************** - if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; + if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; // Main simulation loop: if (myRank == MASTER_RANK) logFile << "(MAIN): Starting main simulation loop." << endl << writeVerbose; @@ -707,8 +712,12 @@ int main(int argn,char* args[]) { creal dx = mpiGrid[cell]->parameters[CellParams::DX]; creal dy = mpiGrid[cell]->parameters[CellParams::DY]; creal dz = mpiGrid[cell]->parameters[CellParams::DZ]; + creal x = mpiGrid[cell]->parameters[CellParams::XCRD]; + creal y = mpiGrid[cell]->parameters[CellParams::YCRD]; + creal z = mpiGrid[cell]->parameters[CellParams::ZCRD]; nSum += rho*dx*dy*dz; + //cout << "Cell " << cell << " rho = " << rho << " x: " << x << " y: " << y << " z: " << z << endl; } cout << "nSum = " << nSum << endl; @@ -716,7 +725,7 @@ int main(int argn,char* args[]) { P::t-P::dt <= P::t_max+DT_EPSILON && wallTimeRestartCounter <= P::exitAfterRestarts) { - if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; + if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; addTimedBarrier("barrier-loop-start"); @@ -729,7 +738,7 @@ int main(int argn,char* args[]) { } phiprof::stop("checkExternalCommands"); - if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; + if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; //write out phiprof profiles and logs with a lower interval than normal //diagnostic (every 10 diagnostic intervals). @@ -760,7 +769,7 @@ int main(int argn,char* args[]) { logFile << writeVerbose; phiprof::stop("logfile-io"); - if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; + if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; // Check whether diagnostic output has to be produced if (P::diagnosticInterval != 0 && P::tstep % P::diagnosticInterval == 0) { @@ -788,7 +797,7 @@ int main(int argn,char* args[]) { phiprof::stop("diagnostic-io"); } - if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; + if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; bool extractFsGridFields = true; // write system, loop through write classes @@ -843,14 +852,14 @@ int main(int argn,char* args[]) { } } - if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; + if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; // Reduce globalflags::bailingOut from all processes phiprof::start("Bailout-allreduce"); MPI_Allreduce(&(globalflags::bailingOut), &(doBailout), 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD); phiprof::stop("Bailout-allreduce"); - if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; + if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; // Write restart data if needed // Combined with checking of additional load balancing to have only one collective call. @@ -877,7 +886,7 @@ int main(int argn,char* args[]) { globalflags::balanceLoad = false; } } - if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; + if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; MPI_Bcast( &doNow, 2 , MPI_INT , MASTER_RANK ,MPI_COMM_WORLD); writeRestartNow = doNow[0]; @@ -888,7 +897,7 @@ int main(int argn,char* args[]) { } phiprof::stop("compute-is-restart-written-and-extra-LB"); - if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; + if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; if (writeRestartNow >= 1){ phiprof::start("write-restart"); @@ -911,7 +920,7 @@ int main(int argn,char* args[]) { phiprof::stop("IO"); addTimedBarrier("barrier-end-io"); - if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; + if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; //no need to propagate if we are on the final step, we just //wanted to make sure all IO is done even for final step @@ -935,7 +944,7 @@ int main(int argn,char* args[]) { logFile << "(LB): ... done!" << endl << writeVerbose; P::prepareForRebalance = false; - if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; + if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; // Re-couple fsgrids to updated grid situation phiprof::start("fsgrid-recouple-after-lb"); @@ -945,7 +954,7 @@ int main(int argn,char* args[]) { cout << "Local cells are: "; for(auto id : cells) cout << id << " "; cout << endl; - + perBGrid. setupForGridCoupling(); perBDt2Grid. setupForGridCoupling(); EGrid. setupForGridCoupling(); @@ -960,15 +969,16 @@ int main(int argn,char* args[]) { volGrid. setupForGridCoupling(); technicalGrid. setupForGridCoupling(); - if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; + if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; // Each dccrg cell may have to communicate with multiple fsgrid cells, if they are on a lower refinement level. // Calculate the corresponding fsgrid ids for each dccrg cell and set coupling for each fsgrid id. + cout << "send tags are: " << endl; for(auto& dccrgId : cells) { const auto fsgridIds = mapDccrgIdToFsGrid(mpiGrid, fsGridDimensions, dccrgId); - cout << "Fsgrid ids for cell " << dccrgId << " are: "; - for (auto fsgridId : fsgridIds) { - cout << fsgridId << " "; + //cout << "Fsgrid ids for cell " << dccrgId << " are: "; + for (auto& fsgridId : fsgridIds) { + //cout << fsgridId << " "; perBGrid. setGridCoupling(fsgridId, myRank); perBDt2Grid. setGridCoupling(fsgridId, myRank); EGrid. setGridCoupling(fsgridId, myRank); @@ -983,10 +993,10 @@ int main(int argn,char* args[]) { volGrid. setGridCoupling(fsgridId, myRank); technicalGrid. setGridCoupling(fsgridId, myRank); } - cout << endl; + //cout << endl; } - //cout << endl; - if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; + cout << endl; + if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; perBGrid. finishGridCoupling(); perBDt2Grid. finishGridCoupling(); @@ -1006,7 +1016,7 @@ int main(int argn,char* args[]) { overrideRebalanceNow = false; } - if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; + if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; //get local cells const vector& cells = getLocalCells(); @@ -1071,7 +1081,7 @@ int main(int argn,char* args[]) { } phiprof::start("Spatial-space"); - if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; + if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; if( P::propagateVlasovTranslation) { calculateSpatialTranslation(mpiGrid,P::dt); @@ -1085,13 +1095,16 @@ int main(int argn,char* args[]) { creal dx = mpiGrid[cell]->parameters[CellParams::DX]; creal dy = mpiGrid[cell]->parameters[CellParams::DY]; creal dz = mpiGrid[cell]->parameters[CellParams::DZ]; + creal x = mpiGrid[cell]->parameters[CellParams::XCRD]; + creal y = mpiGrid[cell]->parameters[CellParams::YCRD]; + creal z = mpiGrid[cell]->parameters[CellParams::ZCRD]; nSum += rho*dx*dy*dz; - //cout << "Cell " << cell << " rho = " << rho << endl; + //cout << "Cell " << cell << " rho = " << rho << " x: " << x << " y: " << y << " z: " << z << endl; } cout << "nSum = " << nSum << endl; - if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << " dt = " << P::dt << endl; + if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; phiprof::stop("Spatial-space",computedCells,"Cells"); phiprof::start("Compute interp moments"); @@ -1223,7 +1236,7 @@ int main(int argn,char* args[]) { double after = MPI_Wtime(); - cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; + if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; phiprof::stop("Simulation"); phiprof::start("Finalization"); From b5903d7785a1c0402d17d0d4d41cc3c86e94ef4d Mon Sep 17 00:00:00 2001 From: Urs Ganse Date: Tue, 30 Oct 2018 11:02:32 +0200 Subject: [PATCH 111/602] Write spatial AMR mesh that visit can parse Thanks to Arto for implementing this a long time ago. I unfortunately had to reinvent the wheel first, before I noticed that this is exactly what's required. --- iowrite.cpp | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/iowrite.cpp b/iowrite.cpp index e9668320b..a84281d58 100644 --- a/iowrite.cpp +++ b/iowrite.cpp @@ -627,7 +627,11 @@ bool writeZoneGlobalIdNumbers( const dccrg::Dccrg Date: Tue, 30 Oct 2018 11:06:14 +0200 Subject: [PATCH 112/602] Added -g --- MAKE/Makefile.appa | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/MAKE/Makefile.appa b/MAKE/Makefile.appa index 33e90ff57..796c7a873 100644 --- a/MAKE/Makefile.appa +++ b/MAKE/Makefile.appa @@ -48,7 +48,7 @@ FLAGS = #GNU flags: CC_BRAND = gcc CC_BRAND_VERSION = 5.4.0 -CXXFLAGS += -O3 -fopenmp -funroll-loops -std=c++11 -W -Wall -Wno-unused -fabi-version=0 -mavx2 +CXXFLAGS += -g -O2 -fopenmp -funroll-loops -std=c++11 -W -Wall -Wno-unused -fabi-version=0 -mavx2 #CXXFLAGS += -g -fopenmp -funroll-loops -std=c++11 -W -Wall -Wno-unused -fabi-version=0 -mavx2 testpackage: CXXFLAGS = -O2 -fopenmp -funroll-loops -std=c++0x -fabi-version=0 -mavx From 32624aba456d84fe8c29e64bd0c4fe1493352ba2 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Tue, 30 Oct 2018 11:06:34 +0200 Subject: [PATCH 113/602] Moved printouts behind if statements --- vlasiator.cpp | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/vlasiator.cpp b/vlasiator.cpp index d5d01da62..f37cf819d 100644 --- a/vlasiator.cpp +++ b/vlasiator.cpp @@ -269,6 +269,8 @@ int main(int argn,char* args[]) { Real newDt; bool dtIsChanged; const bool printLines = false; + const bool printCells = false; + const bool printSums = false; // Init MPI: int required=MPI_THREAD_FUNNELED; @@ -369,6 +371,8 @@ int main(int argn,char* args[]) { // Add AMR refinement criterias: amr_ref_criteria::addRefinementCriteria(); + + if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; // Initialize grid. After initializeGrid local cells have dist // functions, and B fields set. Cells have also been classified for @@ -380,6 +384,8 @@ int main(int argn,char* args[]) { //dccrg::Dccrg mpiGrid; initializeGrid(argn,args,mpiGrid,sysBoundaries,*project); isSysBoundaryCondDynamic = sysBoundaries.isDynamic(); + + if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; phiprof::stop("Init grid"); @@ -717,9 +723,9 @@ int main(int argn,char* args[]) { creal z = mpiGrid[cell]->parameters[CellParams::ZCRD]; nSum += rho*dx*dy*dz; - //cout << "Cell " << cell << " rho = " << rho << " x: " << x << " y: " << y << " z: " << z << endl; + if(printCells) cout << "Cell " << cell << " rho = " << rho << " x: " << x << " y: " << y << " z: " << z << endl; } - cout << "nSum = " << nSum << endl; + if(printSums) cout << "nSum = " << nSum << endl; while(P::tstep <= P::tstep_max && P::t-P::dt <= P::t_max+DT_EPSILON && @@ -1100,9 +1106,9 @@ int main(int argn,char* args[]) { creal z = mpiGrid[cell]->parameters[CellParams::ZCRD]; nSum += rho*dx*dy*dz; - //cout << "Cell " << cell << " rho = " << rho << " x: " << x << " y: " << y << " z: " << z << endl; + if(printCells) cout << "Cell " << cell << " rho = " << rho << " x: " << x << " y: " << y << " z: " << z << endl; } - cout << "nSum = " << nSum << endl; + if (printSums) cout << "nSum = " << nSum << endl; if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; phiprof::stop("Spatial-space",computedCells,"Cells"); From 164959396eb236cae641d876fe7e7e297686efd4 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Tue, 30 Oct 2018 11:07:27 +0200 Subject: [PATCH 114/602] Clean up of commented code and unnecessary prints --- grid.cpp | 29 +---------------------------- 1 file changed, 1 insertion(+), 28 deletions(-) diff --git a/grid.cpp b/grid.cpp index 282df6ee1..7f69e5ed0 100644 --- a/grid.cpp +++ b/grid.cpp @@ -62,7 +62,6 @@ void initVelocityGridGeometry(dccrg::Dccrg& mpiGrid); void initializeStencils(dccrg::Dccrg& mpiGrid); -#warning This is for testing, can be removed later void writeVelMesh(dccrg::Dccrg& mpiGrid) { const vector& cells = getLocalCells(); @@ -126,33 +125,7 @@ void initializeGrid( if(project.refineSpatialCells(mpiGrid)) { recalculateLocalCellsCache(); - } else { - std::cout << "Call to refineSpatialCells failed" << std::endl; - } - - // if(true) { - // std::array coords; - // coords[0] = (P::xmax - P::xmin) / 2.0; - // coords[1] = (P::ymax - P::ymin) / 2.0; - // coords[2] = (P::zmax - P::zmin) / 2.0; - // cout << "Trying to refine at " << coords[0] << ", " << coords[1] << ", " << coords[2] << endl; - // CellID myCell = mpiGrid.get_existing_cell(coords); - // cout << "Got cell ID " << myCell << endl; - // cout << "Maximum refinement level is " << mpiGrid.mapping.get_maximum_refinement_level() << endl; - // bool refineSuccess = mpiGrid.refine_completely_at(coords); - // std::vector refinedCells = mpiGrid.stop_refining(); - // std::cout << std::boolalpha <<"Refine result: " << refineSuccess << endl; - // mpiGrid.balance_load(); - // if(refineSuccess) { - // cout << "Refined Cells are: "; - // for (auto cellid : refinedCells) { - // cout << cellid << " "; - // } - // cout << endl; - // mpiGrid.write_vtk_file("mpiGrid.vtk"); - // } - // recalculateLocalCellsCache(); - // } + } // Init velocity mesh on all cells initVelocityGridGeometry(mpiGrid); From b1d1fcd6e407bf01872f7a41f78cd57767f15987 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Tue, 30 Oct 2018 11:07:52 +0200 Subject: [PATCH 115/602] added virtual to refineSpatialCells --- projects/project.cpp | 2 +- projects/project.h | 2 +- projects/testAmr/testAmr.cpp | 2 ++ 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/projects/project.cpp b/projects/project.cpp index cd828ccb7..ba67d6d3a 100644 --- a/projects/project.cpp +++ b/projects/project.cpp @@ -518,7 +518,7 @@ namespace projects { int rank; MPI_Comm_rank(MPI_COMM_WORLD,&rank); if (rank == MASTER_RANK) { - cerr << "(Project.cpp) WARNING: Base class 'refineSpatialCells' in " << __FILE__ << ":" << __LINE__ << " called." << endl; + cerr << "(Project.cpp) Base class 'refineSpatialCells' in " << __FILE__ << ":" << __LINE__ << " called. This function does nothing." << endl; } return false; diff --git a/projects/project.h b/projects/project.h index d976fae8c..ca9f7f46a 100644 --- a/projects/project.h +++ b/projects/project.h @@ -74,7 +74,7 @@ namespace projects { Real setVelocityBlock(spatial_cell::SpatialCell* cell,const vmesh::LocalID& blockLID,const uint popID) const; - bool refineSpatialCells( dccrg::Dccrg& mpiGrid ) const; + virtual bool refineSpatialCells( dccrg::Dccrg& mpiGrid ) const; protected: /*! \brief Returns a list of blocks to loop through when initialising. diff --git a/projects/testAmr/testAmr.cpp b/projects/testAmr/testAmr.cpp index c77ad61fc..90e0cf0d2 100644 --- a/projects/testAmr/testAmr.cpp +++ b/projects/testAmr/testAmr.cpp @@ -253,6 +253,8 @@ namespace projects { bool testAmr::refineSpatialCells( dccrg::Dccrg& mpiGrid ) const { + //cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; + std::array coords; coords[0] = (P::xmax - P::xmin) / 2.0; coords[1] = (P::ymax - P::ymin) / 2.0; From a00dbf33de3ce03ad19e8812ecd442cd9572a16a Mon Sep 17 00:00:00 2001 From: tkoskela Date: Tue, 30 Oct 2018 11:19:33 +0200 Subject: [PATCH 116/602] Updated config file --- projects/testAmr/testAmr.cfg | 122 ++++++++++++++++------------------- 1 file changed, 55 insertions(+), 67 deletions(-) diff --git a/projects/testAmr/testAmr.cfg b/projects/testAmr/testAmr.cfg index 16bcae4ec..581ca410c 100644 --- a/projects/testAmr/testAmr.cfg +++ b/projects/testAmr/testAmr.cfg @@ -1,48 +1,52 @@ +dynamic_timestep = 1 +project = testAmr +ParticlePopulations = proton propagate_field = 0 -propagate_vlasov_acceleration = 1 -propagate_vlasov_translation = 0 -dynamic_timestep = 0 -project = MultiPeak +propagate_vlasov_acceleration = 0 +propagate_vlasov_translation = 1 + +[proton_properties] +mass = 1 +mass_units = PROTON +charge = 1 [io] diagnostic_write_interval = 1 write_initial_state = 1 -restart_walltime_interval = 100000 -number_of_restarts = 1 -system_write_t_interval = 360 +system_write_t_interval = 0.01 system_write_file_name = fullf system_write_distribution_stride = 1 system_write_distribution_xline_stride = 0 system_write_distribution_yline_stride = 0 system_write_distribution_zline_stride = 0 + [gridbuilder] -x_length = 1 -y_length = 1 -z_length = 1 +x_length = 9 +y_length = 3 +z_length = 3 x_min = 0.0 -x_max = 1.0e3 +x_max = 1.8e6 y_min = 0.0 -y_max = 1.0e3 +y_max = 0.6e6 z_min = 0.0 -z_max = 1.0e3 +z_max = 0.6e6 +timestep_max = 100 + +[proton_vspace] vx_min = -2.0e6 vx_max = +2.0e6 vy_min = -2.0e6 vy_max = +2.0e6 vz_min = -2.0e6 vz_max = +2.0e6 -vx_length = 12 -vy_length = 12 -vz_length = 12 -t_max = 3600.0 -dt = 15.0 - -[vlasovsolver] -#minCFL = 0.4 -#maxCFL = 0.6 -#vlasovSemiLagAcceleration = 0 +vx_length = 1 +vy_length = 1 +vz_length = 1 +max_refinement_level = 1 +[proton_sparse] +minValue = 1.0e-16 [boundaries] periodic_x = yes @@ -50,58 +54,42 @@ periodic_y = yes periodic_z = yes [variables] -output = Rho +output = populations_Rho output = B -output = BackgroundB -output = PerturbedB output = Pressure -output = RhoV +output = populations_V output = E -output = PTensor output = MPIrank -output = Blocks -diagnostic = Blocks -diagnostic = populations_RhoLossAdjust -diagnostic = RhoLossVelBoundary -diagnostic = MaxDistributionFunction -diagnostic = MinDistributionFunction +output = populations_Blocks +#output = VelocitySubSteps -[sparse] -minValue = 1.0e-22 +diagnostic = populations_Blocks +#diagnostic = Pressure +#diagnostic = populations_Rho +#diagnostic = populations_RhoLossAdjust +#diagnostic = populations_RhoLossVelBoundary -[MultiPeak] -n = 2 +[testAmr] +#magnitude of 1.82206867e-10 gives a period of 360s, useful for testing... +Bx = 1.2e-10 +By = 0.8e-10 +Bz = 1.1135233442526334e-10 +magXPertAbsAmp = 0 +magYPertAbsAmp = 0 +magZPertAbsAmp = 0 +densityModel = testcase +nVelocitySamples = 3 -Vx = -5.0e5 -Vy = 0.0 +[proton_testAmr] +n = 1 +Vx = 5e5 +Vy = 5e5 Vz = 0.0 -Tx = 1.0e5 -Ty = 1.0e5 -Tz = 1.0e5 +Tx = 500000.0 +Ty = 500000.0 +Tz = 500000.0 rho = 1.0e6 rhoPertAbsAmp = 0.0 -Vx = 5.0e5 -Vy = 0.0 -Vz = 0.0 -Tx = 1.0e5 -Ty = 5.0e5 -Tz = 5.0e5 -rho = 1.0e4 -rhoPertAbsAmp = 0.0 - -Bx = 0.0 -By = 0.0 -Bz = 1.82206867e-10 - -dBx = 0.0 -dBy = 0.0e-10 -dBz = 0.0e-10 - -magXPertAbsAmp = 0.0 -magYPertAbsAmp = 0.0 -magZPertAbsAmp = 0.0 - -lambda = 120.0e5 - -nVelocitySamples = 2 +[loadBalance] +algorithm = RCB \ No newline at end of file From 9f50d8e91b57cbacbbb9639d75223f8dea332d6f Mon Sep 17 00:00:00 2001 From: tkoskela Date: Thu, 1 Nov 2018 10:53:45 +0200 Subject: [PATCH 117/602] Added two parameters to CellParams, CELLID which is the dccrg id of the cell and REFINEMENT_LEVEL which is the refinement level of the cell. These are set in initSpatialCellCoordinates. --- common.h | 2 ++ grid.cpp | 11 +++++++---- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/common.h b/common.h index 0755bb774..8ca859c52 100644 --- a/common.h +++ b/common.h @@ -217,6 +217,8 @@ namespace CellParams { BGEZVOL, /*!< Background electric field averaged over spatial cell, z-component.*/ FSGRID_RANK, /*!< Rank of this cell in the FsGrid cartesian communicator */ FSGRID_BOUNDARYTYPE, /*!< Boundary type of this cell, as stored in the fsGrid */ + CELLID, /*! < DCCRG cell index */ + REFINEMENT_LEVEL, /*! < Refinement level */ N_SPATIAL_CELL_PARAMS }; } diff --git a/grid.cpp b/grid.cpp index 7f69e5ed0..9548086d5 100644 --- a/grid.cpp +++ b/grid.cpp @@ -126,7 +126,7 @@ void initializeGrid( if(project.refineSpatialCells(mpiGrid)) { recalculateLocalCellsCache(); } - + // Init velocity mesh on all cells initVelocityGridGeometry(mpiGrid); initializeStencils(mpiGrid); @@ -140,18 +140,18 @@ void initializeGrid( if (myRank == MASTER_RANK) logFile << "(INIT): Set initial state." << endl << writeVerbose; phiprof::start("Set initial state"); - + phiprof::start("Set spatial cell coordinates"); initSpatialCellCoordinates(mpiGrid); phiprof::stop("Set spatial cell coordinates"); - + phiprof::start("Initialize system boundary conditions"); if(sysBoundaries.initSysBoundaries(project, P::t_min) == false) { if (myRank == MASTER_RANK) cerr << "Error in initialising the system boundaries." << endl; exit(1); } phiprof::stop("Initialize system boundary conditions"); - + // Initialise system boundary conditions (they need the initialised positions!!) phiprof::start("Classify cells (sys boundary conditions)"); if(sysBoundaries.classifyCells(mpiGrid) == false) { @@ -302,6 +302,9 @@ void initSpatialCellCoordinates(dccrg::Dccrgparameters[CellParams::DX ] = cell_length[0]; mpiGrid[cells[i]]->parameters[CellParams::DY ] = cell_length[1]; mpiGrid[cells[i]]->parameters[CellParams::DZ ] = cell_length[2]; + + mpiGrid[cells[i]]->parameters[CellParams::CELLID] = cells[i]; + mpiGrid[cells[i]]->parameters[CellParams::REFINEMENT_LEVEL] = mpiGrid.get_refinement_level(cells[i]); } } From e52f0a49fee9ef61c81c2badb05f4344d5d4e674 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Thu, 1 Nov 2018 10:55:07 +0200 Subject: [PATCH 118/602] Printing process numbers on debug lines --- vlasiator.cpp | 69 ++++++++++++++++++++++++++------------------------- 1 file changed, 35 insertions(+), 34 deletions(-) diff --git a/vlasiator.cpp b/vlasiator.cpp index f37cf819d..afabbbac8 100644 --- a/vlasiator.cpp +++ b/vlasiator.cpp @@ -268,6 +268,7 @@ int main(int argn,char* args[]) { typedef Parameters P; Real newDt; bool dtIsChanged; + const bool printLines = false; const bool printCells = false; const bool printSums = false; @@ -372,7 +373,7 @@ int main(int argn,char* args[]) { // Add AMR refinement criterias: amr_ref_criteria::addRefinementCriteria(); - if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; + if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; // Initialize grid. After initializeGrid local cells have dist // functions, and B fields set. Cells have also been classified for @@ -385,7 +386,7 @@ int main(int argn,char* args[]) { initializeGrid(argn,args,mpiGrid,sysBoundaries,*project); isSysBoundaryCondDynamic = sysBoundaries.isDynamic(); - if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; + if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; phiprof::stop("Init grid"); @@ -513,7 +514,7 @@ int main(int argn,char* args[]) { feedMomentsIntoFsGrid(mpiGrid, cells, momentsGrid,false); feedMomentsIntoFsGrid(mpiGrid, cells, momentsDt2Grid,false); - if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; + if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; phiprof::start("Init field propagator"); if ( @@ -539,7 +540,7 @@ int main(int argn,char* args[]) { } phiprof::stop("Init field propagator"); - if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; + if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; // Initialize Poisson solver (if used) if (P::propagatePotential == true) { @@ -554,7 +555,7 @@ int main(int argn,char* args[]) { // Free up memory: readparameters.finalize(); - if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; + if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; if (P::isRestart == false) { // Run Vlasov solver once with zero dt to initialize @@ -591,7 +592,7 @@ int main(int argn,char* args[]) { getVolumeFieldsFromFsGrid(volGrid, mpiGrid, cells); phiprof::stop("getVolumeFieldsFromFsGrid"); - if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; + if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; // Save restart data if (P::writeInitialState) { @@ -632,14 +633,14 @@ int main(int argn,char* args[]) { phiprof::stop("write-initial-state"); } - if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; + if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; if (P::isRestart == false) { //compute new dt phiprof::start("compute-dt"); getFsGridMaxDt(technicalGrid, mpiGrid, cells); - if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; + if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; computeNewTimeStep(mpiGrid,newDt,dtIsChanged); if (P::dynamicTimestep == true && dtIsChanged == true) { @@ -649,7 +650,7 @@ int main(int argn,char* args[]) { phiprof::stop("compute-dt"); } - if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; + if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; if (!P::isRestart) { //go forward by dt/2 in V, initializes leapfrog split. In restarts the @@ -671,7 +672,7 @@ int main(int argn,char* args[]) { // ***** INITIALIZATION COMPLETE ***** // *********************************** - if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; + if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; // Main simulation loop: if (myRank == MASTER_RANK) logFile << "(MAIN): Starting main simulation loop." << endl << writeVerbose; @@ -731,7 +732,7 @@ int main(int argn,char* args[]) { P::t-P::dt <= P::t_max+DT_EPSILON && wallTimeRestartCounter <= P::exitAfterRestarts) { - if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; + if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; addTimedBarrier("barrier-loop-start"); @@ -744,7 +745,7 @@ int main(int argn,char* args[]) { } phiprof::stop("checkExternalCommands"); - if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; + if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; //write out phiprof profiles and logs with a lower interval than normal //diagnostic (every 10 diagnostic intervals). @@ -775,7 +776,7 @@ int main(int argn,char* args[]) { logFile << writeVerbose; phiprof::stop("logfile-io"); - if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; + if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; // Check whether diagnostic output has to be produced if (P::diagnosticInterval != 0 && P::tstep % P::diagnosticInterval == 0) { @@ -803,7 +804,7 @@ int main(int argn,char* args[]) { phiprof::stop("diagnostic-io"); } - if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; + if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; bool extractFsGridFields = true; // write system, loop through write classes @@ -858,14 +859,14 @@ int main(int argn,char* args[]) { } } - if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; + if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; // Reduce globalflags::bailingOut from all processes phiprof::start("Bailout-allreduce"); MPI_Allreduce(&(globalflags::bailingOut), &(doBailout), 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD); phiprof::stop("Bailout-allreduce"); - if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; + if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; // Write restart data if needed // Combined with checking of additional load balancing to have only one collective call. @@ -892,7 +893,7 @@ int main(int argn,char* args[]) { globalflags::balanceLoad = false; } } - if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; + if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; MPI_Bcast( &doNow, 2 , MPI_INT , MASTER_RANK ,MPI_COMM_WORLD); writeRestartNow = doNow[0]; @@ -903,7 +904,7 @@ int main(int argn,char* args[]) { } phiprof::stop("compute-is-restart-written-and-extra-LB"); - if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; + if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; if (writeRestartNow >= 1){ phiprof::start("write-restart"); @@ -926,7 +927,7 @@ int main(int argn,char* args[]) { phiprof::stop("IO"); addTimedBarrier("barrier-end-io"); - if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; + if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; //no need to propagate if we are on the final step, we just //wanted to make sure all IO is done even for final step @@ -950,7 +951,7 @@ int main(int argn,char* args[]) { logFile << "(LB): ... done!" << endl << writeVerbose; P::prepareForRebalance = false; - if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; + if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; // Re-couple fsgrids to updated grid situation phiprof::start("fsgrid-recouple-after-lb"); @@ -975,7 +976,7 @@ int main(int argn,char* args[]) { volGrid. setupForGridCoupling(); technicalGrid. setupForGridCoupling(); - if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; + if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; // Each dccrg cell may have to communicate with multiple fsgrid cells, if they are on a lower refinement level. // Calculate the corresponding fsgrid ids for each dccrg cell and set coupling for each fsgrid id. @@ -1002,7 +1003,7 @@ int main(int argn,char* args[]) { //cout << endl; } cout << endl; - if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; + if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; perBGrid. finishGridCoupling(); perBDt2Grid. finishGridCoupling(); @@ -1022,7 +1023,7 @@ int main(int argn,char* args[]) { overrideRebalanceNow = false; } - if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; + if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; //get local cells const vector& cells = getLocalCells(); @@ -1087,7 +1088,7 @@ int main(int argn,char* args[]) { } phiprof::start("Spatial-space"); - if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; + if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; if( P::propagateVlasovTranslation) { calculateSpatialTranslation(mpiGrid,P::dt); @@ -1110,7 +1111,7 @@ int main(int argn,char* args[]) { } if (printSums) cout << "nSum = " << nSum << endl; - if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; + if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; phiprof::stop("Spatial-space",computedCells,"Cells"); phiprof::start("Compute interp moments"); @@ -1127,7 +1128,7 @@ int main(int argn,char* args[]) { ); phiprof::stop("Compute interp moments"); - if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; + if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; // Apply boundary conditions if (P::propagateVlasovTranslation || P::propagateVlasovAcceleration ) { @@ -1137,14 +1138,14 @@ int main(int argn,char* args[]) { addTimedBarrier("barrier-boundary-conditions"); } - if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; + if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; // Propagate fields forward in time by dt. This needs to be done before the // moments for t + dt are computed (field uses t and t+0.5dt) if (P::propagateField) { phiprof::start("Propagate Fields"); - if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; + if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; phiprof::start("fsgrid-coupling-in"); // Copy moments over into the fsgrid. @@ -1193,12 +1194,12 @@ int main(int argn,char* args[]) { calculateAcceleration(mpiGrid, 0.0); } - if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; + if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; phiprof::stop("Velocity-space",computedCells,"Cells"); addTimedBarrier("barrier-after-acceleration"); - if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; + if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; phiprof::start("Compute interp moments"); // *here we compute rho and rho_v for timestep t + dt, so next @@ -1216,7 +1217,7 @@ int main(int argn,char* args[]) { ); phiprof::stop("Compute interp moments"); - if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; + if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; phiprof::stop("Propagate",computedCells,"Cells"); @@ -1224,7 +1225,7 @@ int main(int argn,char* args[]) { project->hook(hook::END_OF_TIME_STEP, mpiGrid); phiprof::stop("Project endTimeStep"); - if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; + if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; // Check timestep if (P::dt < P::bailout_min_dt) { @@ -1237,12 +1238,12 @@ int main(int argn,char* args[]) { ++P::tstep; P::t += P::dt; - if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; + if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; } double after = MPI_Wtime(); - if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; + if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; phiprof::stop("Simulation"); phiprof::start("Finalization"); From 6625f9ffaa918c8aa310cf39e9a3fc03fb81c861 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Thu, 1 Nov 2018 10:55:52 +0200 Subject: [PATCH 119/602] Modified refinement criteria --- projects/testAmr/testAmr.cpp | 76 ++++++++++++++++++++++++++---------- 1 file changed, 56 insertions(+), 20 deletions(-) diff --git a/projects/testAmr/testAmr.cpp b/projects/testAmr/testAmr.cpp index 90e0cf0d2..62d16d0c9 100644 --- a/projects/testAmr/testAmr.cpp +++ b/projects/testAmr/testAmr.cpp @@ -170,7 +170,8 @@ namespace projects { break; case TestCase: rhoFactor = 1.0; - if (x < 6.1e5) { + if (x < 0.31 * (P::xmax - P::xmin) && + y < 0.31 * (P::ymax - P::ymin)) { rhoFactor = 3.0; } break; @@ -253,29 +254,64 @@ namespace projects { bool testAmr::refineSpatialCells( dccrg::Dccrg& mpiGrid ) const { - //cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; - - std::array coords; - coords[0] = (P::xmax - P::xmin) / 2.0; - coords[1] = (P::ymax - P::ymin) / 2.0; - coords[2] = (P::zmax - P::zmin) / 2.0; - std::cout << "Trying to refine at " << coords[0] << ", " << coords[1] << ", " << coords[2] << std::endl; - CellID myCell = mpiGrid.get_existing_cell(coords); - std::cout << "Got cell ID " << myCell << std::endl; + cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; std::cout << "Maximum refinement level is " << mpiGrid.mapping.get_maximum_refinement_level() << std::endl; - bool refineSuccess = mpiGrid.refine_completely_at(coords); - std::vector refinedCells = mpiGrid.stop_refining(); - mpiGrid.balance_load(); - if(refineSuccess) { - cout << "Refined Cells are: "; - for (auto cellid : refinedCells) { - cout << cellid << " "; + + std::array xyz_mid; + xyz_mid[0] = (P::xmax - P::xmin) / 2.0; + xyz_mid[1] = (P::ymax - P::ymin) / 2.0; + xyz_mid[2] = (P::zmax - P::zmin) / 2.0; + + std::vector refineSuccess; + + // Refine the top-right quadrant of the box (-boundaries) + for (double x = xyz_mid[0]; x < P::xmax - P::dx_ini; x += P::dx_ini) { + for (double y = xyz_mid[1]; y < P::ymax - P::dy_ini; y += P::dy_ini) { + auto xyz = xyz_mid; + xyz[0] = x; + xyz[1] = y; + //std::cout << "Trying to refine at " << xyz[0] << ", " << xyz[1] << ", " << xyz[2] << std::endl; + //CellID myCell = mpiGrid.get_existing_cell(xyz); + //std::cout << "Got cell ID " << myCell << std::endl; + //refineSuccess.push_back(mpiGrid.refine_completely(myCell)); + + refineSuccess.push_back(mpiGrid.refine_completely_at(xyz)); } - cout << endl; - mpiGrid.write_vtk_file("mpiGrid.vtk"); } - return refineSuccess; + std::vector refinedCells = mpiGrid.stop_refining(true); + + cout << "Finished first level of refinement" << endl; + + // cout << "Refined Cells are: "; + // for (auto cellid : refinedCells) { + // cout << cellid << " "; + // } + // cout << endl; + + // auto xyz = xyz_mid; + // xyz[0] = 1.5 * xyz[0]; + // xyz[1] = 1.5 * xyz[1]; + // std::cout << "Trying to refine at " << xyz[0] << ", " << xyz[1] << ", " << xyz[2] << std::endl; + // CellID myCell = mpiGrid.get_existing_cell(xyz); + // std::cout << "Got cell ID " << myCell << std::endl; + // //mpiGrid.refine_completely_at(xyz); + // mpiGrid.refine_completely(myCell); + // refinedCells.clear(); + // refinedCells = mpiGrid.stop_refining(true); + // cout << "Finished second level of refinement" << endl; + // cout << "Refined Cells are: "; + // for (auto cellid : refinedCells) { + // cout << cellid << " "; + // } + // cout << endl; + //mpiGrid.write_vtk_file("mpiGrid.vtk"); + + mpiGrid.balance_load(); + + cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; + + return std::all_of(refineSuccess.begin(), refineSuccess.end(), [](bool v) { return v; }); } }// namespace projects From f131c43dc5f7fb89819ff3908c85cdb3b8c69964 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Thu, 1 Nov 2018 10:56:38 +0200 Subject: [PATCH 120/602] Updated areaRatio calculation to work on multiple refinement levels. --- vlasovsolver/cpu_trans_map_amr.cpp | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/vlasovsolver/cpu_trans_map_amr.cpp b/vlasovsolver/cpu_trans_map_amr.cpp index 37404a28f..2624c1c1d 100644 --- a/vlasovsolver/cpu_trans_map_amr.cpp +++ b/vlasovsolver/cpu_trans_map_amr.cpp @@ -895,6 +895,7 @@ bool trans_map_1d_amr(const dccrg::Dccrg& const Realv dt, const uint popID) { + const bool printPencils = true; const bool printLines = false; Realv dvz,vz_min; uint cell_indices_to_id[3]; /*< used when computing id of target cell in block*/ @@ -995,7 +996,7 @@ bool trans_map_1d_amr(const dccrg::Dccrg& } // Print out ids of pencils (if needed for debugging) - if (false) { + if (printPencils) { uint ibeg = 0; uint iend = 0; std::cout << "I have created " << pencils.N << " pencils along dimension " << dimension << ":\n"; @@ -1248,6 +1249,10 @@ bool trans_map_1d_amr(const dccrg::Dccrg& uint targetLength = L + 2; vector pencilIds = pencils.getIds(pencili); + // Calculate the max and min refinement levels in this pencil. + // All cells that are not on the max refinement level will be split + // Into multiple pencils. This has to be taken into account when adding + // up the contributions from each pencil. int maxRefLvl = 0; int minRefLvl = mpiGrid.get_maximum_refinement_level(); for (auto id : pencilIds) { @@ -1304,11 +1309,8 @@ bool trans_map_1d_amr(const dccrg::Dccrg& Realf* blockData = spatial_cell->get_data(blockLID, popID); Realf areaRatio = 1.0; - if (maxRefLvl > minRefLvl && - spatial_cell->parameters[CellParams::DX] == P::dx_ini && - spatial_cell->parameters[CellParams::DY] == P::dy_ini && - spatial_cell->parameters[CellParams::DZ] == P::dz_ini) { - areaRatio = 1.0 / pow(pow(2, maxRefLvl - minRefLvl), 2); + if (spatial_cell->parameters[CellParams::REFINEMENT_LEVEL] < maxRefLvl) { + areaRatio = 1.0 / pow(pow(2, maxRefLvl - spatial_cell->parameters[CellParams::REFINEMENT_LEVEL]), 2); } for(int i = 0; i < WID3 ; i++) { From 96d27a22a9b55f2ab3121c787754c313d43cfb52 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Fri, 2 Nov 2018 11:45:06 +0200 Subject: [PATCH 121/602] Removed compiler warnings about signed - unsigned int comparison by casting the unsigned ints to ints. --- vlasovsolver/cpu_trans_map.cpp | 4 ++-- vlasovsolver/cpu_trans_map_amr.cpp | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/vlasovsolver/cpu_trans_map.cpp b/vlasovsolver/cpu_trans_map.cpp index 73ac6ce2c..24b432ccc 100644 --- a/vlasovsolver/cpu_trans_map.cpp +++ b/vlasovsolver/cpu_trans_map.cpp @@ -91,7 +91,7 @@ CellID get_spatial_neighbor(const dccrg::Dccrg= length[i] ) + while(indices[i] >= static_cast(length[i]) ) indices[i] -= length[i]; } } @@ -99,7 +99,7 @@ CellID get_spatial_neighbor(const dccrg::Dccrg=length[i]) + if(indices[i]>=static_cast(length[i])) return INVALID_CELLID; } //store nbr indices into the correct datatype diff --git a/vlasovsolver/cpu_trans_map_amr.cpp b/vlasovsolver/cpu_trans_map_amr.cpp index 2624c1c1d..0ac66876d 100644 --- a/vlasovsolver/cpu_trans_map_amr.cpp +++ b/vlasovsolver/cpu_trans_map_amr.cpp @@ -403,7 +403,7 @@ setOfPencils buildPencilsWithNeighbors( const dccrg::Dccrg= refLvl ) { + if ( static_cast(path.size()) >= refLvl ) { if(debug) { std::cout << "I am cell " << id << ". "; @@ -413,7 +413,7 @@ setOfPencils buildPencilsWithNeighbors( const dccrg::Dccrg& const Realv dt, const uint popID) { - const bool printPencils = true; + const bool printPencils = false; const bool printLines = false; Realv dvz,vz_min; uint cell_indices_to_id[3]; /*< used when computing id of target cell in block*/ From 9248bcf111590afa4f348617dd6a81d86dce0446 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Fri, 2 Nov 2018 12:12:15 +0200 Subject: [PATCH 122/602] Adjusted refinement criteria --- projects/testAmr/testAmr.cpp | 46 ++++++++++++++++++++---------------- 1 file changed, 26 insertions(+), 20 deletions(-) diff --git a/projects/testAmr/testAmr.cpp b/projects/testAmr/testAmr.cpp index 62d16d0c9..921715dd3 100644 --- a/projects/testAmr/testAmr.cpp +++ b/projects/testAmr/testAmr.cpp @@ -264,9 +264,10 @@ namespace projects { std::vector refineSuccess; - // Refine the top-right quadrant of the box (-boundaries) - for (double x = xyz_mid[0]; x < P::xmax - P::dx_ini; x += P::dx_ini) { - for (double y = xyz_mid[1]; y < P::ymax - P::dy_ini; y += P::dy_ini) { + int boxHalfWidth = 1; + + for (double x = xyz_mid[0] - boxHalfWidth * P::dx_ini; x <= xyz_mid[0] + boxHalfWidth * P::dx_ini; x += P::dx_ini) { + for (double y = xyz_mid[1] - boxHalfWidth * P::dx_ini; y <= xyz_mid[1] + boxHalfWidth * P::dx_ini; y += P::dy_ini) { auto xyz = xyz_mid; xyz[0] = x; xyz[1] = y; @@ -274,44 +275,49 @@ namespace projects { //CellID myCell = mpiGrid.get_existing_cell(xyz); //std::cout << "Got cell ID " << myCell << std::endl; //refineSuccess.push_back(mpiGrid.refine_completely(myCell)); - refineSuccess.push_back(mpiGrid.refine_completely_at(xyz)); } - } - - std::vector refinedCells = mpiGrid.stop_refining(true); - + } + std::vector refinedCells = mpiGrid.stop_refining(true); cout << "Finished first level of refinement" << endl; - - // cout << "Refined Cells are: "; - // for (auto cellid : refinedCells) { - // cout << cellid << " "; - // } - // cout << endl; + cout << "Refined Cells are: "; + for (auto cellid : refinedCells) { + cout << cellid << " "; + } + cout << endl; // auto xyz = xyz_mid; - // xyz[0] = 1.5 * xyz[0]; - // xyz[1] = 1.5 * xyz[1]; + // xyz[0] = 1.4 * xyz[0]; + // xyz[1] = 1.4 * xyz[1]; // std::cout << "Trying to refine at " << xyz[0] << ", " << xyz[1] << ", " << xyz[2] << std::endl; // CellID myCell = mpiGrid.get_existing_cell(xyz); // std::cout << "Got cell ID " << myCell << std::endl; + // int refLvl = mpiGrid.get_refinement_level(myCell); + // std::cout << "Refinement level is " << refLvl << std::endl; // //mpiGrid.refine_completely_at(xyz); // mpiGrid.refine_completely(myCell); // refinedCells.clear(); // refinedCells = mpiGrid.stop_refining(true); - // cout << "Finished second level of refinement" << endl; + // cout << "Finished second level of refinement" << endl; // cout << "Refined Cells are: "; // for (auto cellid : refinedCells) { // cout << cellid << " "; // } // cout << endl; - //mpiGrid.write_vtk_file("mpiGrid.vtk"); - + + // mpiGrid.refine_completely_at(xyz_mid); + // mpiGrid.stop_refining(); + // mpiGrid.refine_completely_at(xyz_mid); + // mpiGrid.stop_refining(); + // mpiGrid.unrefine_completely_at(xyz_mid); + // mpiGrid.stop_refining(); + mpiGrid.balance_load(); cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; - return std::all_of(refineSuccess.begin(), refineSuccess.end(), [](bool v) { return v; }); + // return std::all_of(refineSuccess.begin(), refineSuccess.end(), [](bool v) { return v; }); + return true; } }// namespace projects From f0c72277b08e45c1d8cf55ab2b406a6ce7faecc1 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Fri, 2 Nov 2018 12:24:28 +0200 Subject: [PATCH 123/602] Added unique message tags to each fsgrid object. Some debugging enabled. --- vlasiator.cpp | 188 +++++++++++++++++++++++++++++--------------------- 1 file changed, 108 insertions(+), 80 deletions(-) diff --git a/vlasiator.cpp b/vlasiator.cpp index afabbbac8..5e2f3003f 100644 --- a/vlasiator.cpp +++ b/vlasiator.cpp @@ -269,7 +269,7 @@ int main(int argn,char* args[]) { Real newDt; bool dtIsChanged; - const bool printLines = false; + const bool printLines = true; const bool printCells = false; const bool printSums = false; @@ -406,20 +406,23 @@ int main(int argn,char* args[]) { std::array periodicity{mpiGrid.topology.is_periodic(0), mpiGrid.topology.is_periodic(1), mpiGrid.topology.is_periodic(2)}; + + const int tagOffset = 1e6; + int tagId = 0; - FsGrid< std::array, 2> perBGrid(fsGridDimensions, comm, periodicity); - FsGrid< std::array, 2> perBDt2Grid(fsGridDimensions, comm, periodicity); - FsGrid< std::array, 2> EGrid(fsGridDimensions, comm, periodicity); - FsGrid< std::array, 2> EDt2Grid(fsGridDimensions, comm, periodicity); - FsGrid< std::array, 2> EHallGrid(fsGridDimensions, comm, periodicity); - FsGrid< std::array, 2> EGradPeGrid(fsGridDimensions, comm, periodicity); - FsGrid< std::array, 2> momentsGrid(fsGridDimensions, comm, periodicity); - FsGrid< std::array, 2> momentsDt2Grid(fsGridDimensions, comm, periodicity); - FsGrid< std::array, 2> dPerBGrid(fsGridDimensions, comm, periodicity); - FsGrid< std::array, 2> dMomentsGrid(fsGridDimensions, comm, periodicity); - FsGrid< std::array, 2> BgBGrid(fsGridDimensions, comm, periodicity); - FsGrid< std::array, 2> volGrid(fsGridDimensions, comm, periodicity); - FsGrid< fsgrids::technical, 2> technicalGrid(fsGridDimensions, comm, periodicity); + FsGrid< std::array, 2> perBGrid(fsGridDimensions, comm, periodicity, tagOffset * tagId++); + FsGrid< std::array, 2> perBDt2Grid(fsGridDimensions, comm, periodicity, tagOffset * tagId++); + FsGrid< std::array, 2> EGrid(fsGridDimensions, comm, periodicity, tagOffset * tagId++); + FsGrid< std::array, 2> EDt2Grid(fsGridDimensions, comm, periodicity, tagOffset * tagId++); + FsGrid< std::array, 2> EHallGrid(fsGridDimensions, comm, periodicity, tagOffset * tagId++); + FsGrid< std::array, 2> EGradPeGrid(fsGridDimensions, comm, periodicity, tagOffset * tagId++); + FsGrid< std::array, 2> momentsGrid(fsGridDimensions, comm, periodicity, tagOffset * tagId++); + FsGrid< std::array, 2> momentsDt2Grid(fsGridDimensions, comm, periodicity, tagOffset * tagId++); + FsGrid< std::array, 2> dPerBGrid(fsGridDimensions, comm, periodicity, tagOffset * tagId++); + FsGrid< std::array, 2> dMomentsGrid(fsGridDimensions, comm, periodicity, tagOffset * tagId++); + FsGrid< std::array, 2> BgBGrid(fsGridDimensions, comm, periodicity, tagOffset * tagId++); + FsGrid< std::array, 2> volGrid(fsGridDimensions, comm, periodicity, tagOffset * tagId++); + FsGrid< fsgrids::technical, 2> technicalGrid(fsGridDimensions, comm, periodicity, tagOffset * tagId); // Set DX,DY and DZ // TODO: This is currently just taking the values from cell 1, and assuming them to be // constant throughout the simulation. @@ -450,19 +453,25 @@ int main(int argn,char* args[]) { // Couple FSGrids to mpiGrid // TODO: Do we really need to couple *all* of these fields? - perBGrid. setupForGridCoupling(); - perBDt2Grid. setupForGridCoupling(); - EGrid. setupForGridCoupling(); - EDt2Grid. setupForGridCoupling(); - EHallGrid. setupForGridCoupling(); - EGradPeGrid. setupForGridCoupling(); - momentsGrid. setupForGridCoupling(); - momentsDt2Grid.setupForGridCoupling(); - dPerBGrid. setupForGridCoupling(); - dMomentsGrid. setupForGridCoupling(); - BgBGrid. setupForGridCoupling(); - volGrid. setupForGridCoupling(); - technicalGrid. setupForGridCoupling(); + + bool debugFsgrid = false; + + perBGrid. setupForGridCoupling(debugFsgrid); + perBDt2Grid. setupForGridCoupling(debugFsgrid); + + debugFsgrid = false; + + EGrid. setupForGridCoupling(debugFsgrid); + EDt2Grid. setupForGridCoupling(debugFsgrid); + EHallGrid. setupForGridCoupling(debugFsgrid); + EGradPeGrid. setupForGridCoupling(debugFsgrid); + momentsGrid. setupForGridCoupling(debugFsgrid); + momentsDt2Grid.setupForGridCoupling(debugFsgrid); + dPerBGrid. setupForGridCoupling(debugFsgrid); + dMomentsGrid. setupForGridCoupling(debugFsgrid); + BgBGrid. setupForGridCoupling(debugFsgrid); + volGrid. setupForGridCoupling(debugFsgrid); + technicalGrid. setupForGridCoupling(debugFsgrid); // Each dccrg cell may have to communicate with multiple fsgrid cells, if they are on a lower refinement level. // Calculate the corresponding fsgrid ids for each dccrg cell and set coupling for each fsgrid id. @@ -470,19 +479,25 @@ int main(int argn,char* args[]) { const auto fsgridIds = mapDccrgIdToFsGrid(mpiGrid, fsGridDimensions, dccrgId); for (auto fsgridId : fsgridIds) { - perBGrid. setGridCoupling(fsgridId, myRank); - perBDt2Grid. setGridCoupling(fsgridId, myRank); - EGrid. setGridCoupling(fsgridId, myRank); - EDt2Grid. setGridCoupling(fsgridId, myRank); - EHallGrid. setGridCoupling(fsgridId, myRank); - EGradPeGrid. setGridCoupling(fsgridId, myRank); - momentsGrid. setGridCoupling(fsgridId, myRank); - momentsDt2Grid.setGridCoupling(fsgridId, myRank); - dPerBGrid. setGridCoupling(fsgridId, myRank); - dMomentsGrid. setGridCoupling(fsgridId, myRank); - BgBGrid. setGridCoupling(fsgridId, myRank); - volGrid. setGridCoupling(fsgridId, myRank); - technicalGrid. setGridCoupling(fsgridId, myRank); + + debugFsgrid = false; + + perBGrid. setGridCoupling(fsgridId, myRank, debugFsgrid); + perBDt2Grid. setGridCoupling(fsgridId, myRank, debugFsgrid); + + debugFsgrid = false; + + EGrid. setGridCoupling(fsgridId, myRank, debugFsgrid); + EDt2Grid. setGridCoupling(fsgridId, myRank, debugFsgrid); + EHallGrid. setGridCoupling(fsgridId, myRank, debugFsgrid); + EGradPeGrid. setGridCoupling(fsgridId, myRank, debugFsgrid); + momentsGrid. setGridCoupling(fsgridId, myRank, debugFsgrid); + momentsDt2Grid.setGridCoupling(fsgridId, myRank, debugFsgrid); + dPerBGrid. setGridCoupling(fsgridId, myRank, debugFsgrid); + dMomentsGrid. setGridCoupling(fsgridId, myRank, debugFsgrid); + BgBGrid. setGridCoupling(fsgridId, myRank, debugFsgrid); + volGrid. setGridCoupling(fsgridId, myRank, debugFsgrid); + technicalGrid. setGridCoupling(fsgridId, myRank, debugFsgrid); } } @@ -940,7 +955,7 @@ int main(int argn,char* args[]) { //Re-loadbalance if needed //TODO - add LB measure and do LB if it exceeds threshold #warning Re-loadbalance has been disabled temporarily for amr debugging - if(((P::tstep % P::rebalanceInterval == 0 && P::tstep > P::tstep_min) || overrideRebalanceNow) && false) { + if(((P::tstep % P::rebalanceInterval == 0 && P::tstep > P::tstep_min) || overrideRebalanceNow)) { logFile << "(LB): Start load balance, tstep = " << P::tstep << " t = " << P::t << endl << writeVerbose; balanceLoad(mpiGrid, sysBoundaries); addTimedBarrier("barrier-end-load-balance"); @@ -962,43 +977,56 @@ int main(int argn,char* args[]) { for(auto id : cells) cout << id << " "; cout << endl; - perBGrid. setupForGridCoupling(); - perBDt2Grid. setupForGridCoupling(); - EGrid. setupForGridCoupling(); - EDt2Grid. setupForGridCoupling(); - EHallGrid. setupForGridCoupling(); - EGradPeGrid. setupForGridCoupling(); - momentsGrid. setupForGridCoupling(); - momentsDt2Grid.setupForGridCoupling(); - dPerBGrid. setupForGridCoupling(); - dMomentsGrid. setupForGridCoupling(); - BgBGrid. setupForGridCoupling(); - volGrid. setupForGridCoupling(); - technicalGrid. setupForGridCoupling(); + debugFsgrid = true; + + perBGrid. setupForGridCoupling(debugFsgrid); + + debugFsgrid = false; + + perBDt2Grid. setupForGridCoupling(debugFsgrid); + + EGrid. setupForGridCoupling(debugFsgrid); + EDt2Grid. setupForGridCoupling(debugFsgrid); + EHallGrid. setupForGridCoupling(debugFsgrid); + EGradPeGrid. setupForGridCoupling(debugFsgrid); + momentsGrid. setupForGridCoupling(debugFsgrid); + momentsDt2Grid.setupForGridCoupling(debugFsgrid); + dPerBGrid. setupForGridCoupling(debugFsgrid); + dMomentsGrid. setupForGridCoupling(debugFsgrid); + BgBGrid. setupForGridCoupling(debugFsgrid); + volGrid. setupForGridCoupling(debugFsgrid); + technicalGrid. setupForGridCoupling(debugFsgrid); if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; // Each dccrg cell may have to communicate with multiple fsgrid cells, if they are on a lower refinement level. // Calculate the corresponding fsgrid ids for each dccrg cell and set coupling for each fsgrid id. - cout << "send tags are: " << endl; + // cout << "send tags are: " << endl; for(auto& dccrgId : cells) { const auto fsgridIds = mapDccrgIdToFsGrid(mpiGrid, fsGridDimensions, dccrgId); //cout << "Fsgrid ids for cell " << dccrgId << " are: "; for (auto& fsgridId : fsgridIds) { //cout << fsgridId << " "; - perBGrid. setGridCoupling(fsgridId, myRank); - perBDt2Grid. setGridCoupling(fsgridId, myRank); - EGrid. setGridCoupling(fsgridId, myRank); - EDt2Grid. setGridCoupling(fsgridId, myRank); - EHallGrid. setGridCoupling(fsgridId, myRank); - EGradPeGrid. setGridCoupling(fsgridId, myRank); - momentsGrid. setGridCoupling(fsgridId, myRank); - momentsDt2Grid.setGridCoupling(fsgridId, myRank); - dPerBGrid. setGridCoupling(fsgridId, myRank); - dMomentsGrid. setGridCoupling(fsgridId, myRank); - BgBGrid. setGridCoupling(fsgridId, myRank); - volGrid. setGridCoupling(fsgridId, myRank); - technicalGrid. setGridCoupling(fsgridId, myRank); + + debugFsgrid = true; + + perBGrid. setGridCoupling(fsgridId, myRank, debugFsgrid); + + debugFsgrid = false; + + perBDt2Grid. setGridCoupling(fsgridId, myRank, debugFsgrid); + + EGrid. setGridCoupling(fsgridId, myRank, debugFsgrid); + EDt2Grid. setGridCoupling(fsgridId, myRank, debugFsgrid); + EHallGrid. setGridCoupling(fsgridId, myRank, debugFsgrid); + EGradPeGrid. setGridCoupling(fsgridId, myRank, debugFsgrid); + momentsGrid. setGridCoupling(fsgridId, myRank, debugFsgrid); + momentsDt2Grid.setGridCoupling(fsgridId, myRank, debugFsgrid); + dPerBGrid. setGridCoupling(fsgridId, myRank, debugFsgrid); + dMomentsGrid. setGridCoupling(fsgridId, myRank, debugFsgrid); + BgBGrid. setGridCoupling(fsgridId, myRank, debugFsgrid); + volGrid. setGridCoupling(fsgridId, myRank, debugFsgrid); + technicalGrid. setGridCoupling(fsgridId, myRank, debugFsgrid); } //cout << endl; } @@ -1006,18 +1034,18 @@ int main(int argn,char* args[]) { if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; perBGrid. finishGridCoupling(); - perBDt2Grid. finishGridCoupling(); - EGrid. finishGridCoupling(); - EDt2Grid. finishGridCoupling(); - EHallGrid. finishGridCoupling(); - EGradPeGrid. finishGridCoupling(); - momentsGrid. finishGridCoupling(); - momentsDt2Grid.finishGridCoupling(); - dPerBGrid. finishGridCoupling(); - dMomentsGrid. finishGridCoupling(); - BgBGrid. finishGridCoupling(); - volGrid. finishGridCoupling(); - technicalGrid. finishGridCoupling(); + // perBDt2Grid. finishGridCoupling(); + // EGrid. finishGridCoupling(); + // EDt2Grid. finishGridCoupling(); + // EHallGrid. finishGridCoupling(); + // EGradPeGrid. finishGridCoupling(); + // momentsGrid. finishGridCoupling(); + // momentsDt2Grid.finishGridCoupling(); + // dPerBGrid. finishGridCoupling(); + // dMomentsGrid. finishGridCoupling(); + // BgBGrid. finishGridCoupling(); + // volGrid. finishGridCoupling(); + // technicalGrid. finishGridCoupling(); phiprof::stop("fsgrid-recouple-after-lb"); overrideRebalanceNow = false; From 9e4b1730260d6b6fb54c2977a143b8dbdb96fe0e Mon Sep 17 00:00:00 2001 From: Tuomas Koskela Date: Fri, 2 Nov 2018 15:18:19 +0200 Subject: [PATCH 124/602] Optimized the offset for fsgrid MPI tags to be equal to the size of fgrid. This should make it less likely to get illegal tags. Deactivated debugging. --- MAKE/Makefile.sisu_gcc | 12 ++++++------ vlasiator.cpp | 34 +++++++++++----------------------- 2 files changed, 17 insertions(+), 29 deletions(-) diff --git a/MAKE/Makefile.sisu_gcc b/MAKE/Makefile.sisu_gcc index 82b0ca340..d8b11be9c 100644 --- a/MAKE/Makefile.sisu_gcc +++ b/MAKE/Makefile.sisu_gcc @@ -46,8 +46,8 @@ FLAGS = #GNU flags: CC_BRAND = gcc CC_BRAND_VERSION = 5.1.0 -CXXFLAGS += -O3 -fopenmp -funroll-loops -std=c++11 -W -Wall -Wno-unused -fabi-version=0 -mavx2 -testpackage: CXXFLAGS = -O2 -fopenmp -funroll-loops -std=c++0x -fabi-version=0 -mavx +CXXFLAGS += -g -O2 -static -funroll-loops -std=c++11 -W -Wall -Wno-unused -fabi-version=0 -mavx2 +testpackage: CXXFLAGS = -g -fopenmp -funroll-loops -std=c++0x -fabi-version=0 -mavx MATHFLAGS = -ffast-math LDFLAGS = @@ -82,9 +82,9 @@ INC_PROFILE = -I$(LIBRARY_PREFIX)/mpich2/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_V #header libraries INC_EIGEN = -I$(LIBRARY_PREFIX)/eigen/ -INC_DCCRG = -I$(LIBRARY_PREFIX)/dccrg/ +#INC_DCCRG = -I$(LIBRARY_PREFIX)/dccrg/ INC_VECTORCLASS = -I$(LIBRARY_PREFIX)/vectorclass -INC_FSGRID = -I$(LIBRARY_PREFIX)/fsgrid - - +#INC_FSGRID = -I$(LIBRARY_PREFIX)/fsgrid +INC_FSGRID = -I/homeappl/home/koskelat/lib/fsgrid/ +INC_DCCRG = -I/homeappl/home/koskelat/lib/dccrg/ diff --git a/vlasiator.cpp b/vlasiator.cpp index 5e2f3003f..e3b3e349f 100644 --- a/vlasiator.cpp +++ b/vlasiator.cpp @@ -402,12 +402,14 @@ int main(int argn,char* args[]) { const std::array fsGridDimensions = {convert(P::xcells_ini) * pow(2,P::amrMaxSpatialRefLevel), convert(P::ycells_ini) * pow(2,P::amrMaxSpatialRefLevel), convert(P::zcells_ini) * pow(2,P::amrMaxSpatialRefLevel)}; - const int fsGridSize = fsGridDimensions[0] * fsGridDimensions[1] * fsGridDimensions[2]; + std::array periodicity{mpiGrid.topology.is_periodic(0), mpiGrid.topology.is_periodic(1), mpiGrid.topology.is_periodic(2)}; - const int tagOffset = 1e6; + const int fsGridSize = (fsGridDimensions[0] + 4) * (fsGridDimensions[1] + 4) * (fsGridDimensions[2] + 4); + // adding 1 just to be safe + const int tagOffset = fsGridSize + 1; int tagId = 0; FsGrid< std::array, 2> perBGrid(fsGridDimensions, comm, periodicity, tagOffset * tagId++); @@ -457,10 +459,7 @@ int main(int argn,char* args[]) { bool debugFsgrid = false; perBGrid. setupForGridCoupling(debugFsgrid); - perBDt2Grid. setupForGridCoupling(debugFsgrid); - - debugFsgrid = false; - + perBDt2Grid. setupForGridCoupling(debugFsgrid); EGrid. setupForGridCoupling(debugFsgrid); EDt2Grid. setupForGridCoupling(debugFsgrid); EHallGrid. setupForGridCoupling(debugFsgrid); @@ -483,10 +482,7 @@ int main(int argn,char* args[]) { debugFsgrid = false; perBGrid. setGridCoupling(fsgridId, myRank, debugFsgrid); - perBDt2Grid. setGridCoupling(fsgridId, myRank, debugFsgrid); - - debugFsgrid = false; - + perBDt2Grid. setGridCoupling(fsgridId, myRank, debugFsgrid); EGrid. setGridCoupling(fsgridId, myRank, debugFsgrid); EDt2Grid. setGridCoupling(fsgridId, myRank, debugFsgrid); EHallGrid. setGridCoupling(fsgridId, myRank, debugFsgrid); @@ -973,18 +969,14 @@ int main(int argn,char* args[]) { const vector& cells = getLocalCells(); - cout << "Local cells are: "; + cout << "Reloadbalance: Local cells are: "; for(auto id : cells) cout << id << " "; cout << endl; - debugFsgrid = true; - - perBGrid. setupForGridCoupling(debugFsgrid); - debugFsgrid = false; - perBDt2Grid. setupForGridCoupling(debugFsgrid); - + perBGrid. setupForGridCoupling(debugFsgrid); + perBDt2Grid. setupForGridCoupling(debugFsgrid); EGrid. setupForGridCoupling(debugFsgrid); EDt2Grid. setupForGridCoupling(debugFsgrid); EHallGrid. setupForGridCoupling(debugFsgrid); @@ -1008,14 +1000,10 @@ int main(int argn,char* args[]) { for (auto& fsgridId : fsgridIds) { //cout << fsgridId << " "; - debugFsgrid = true; - - perBGrid. setGridCoupling(fsgridId, myRank, debugFsgrid); - debugFsgrid = false; - perBDt2Grid. setGridCoupling(fsgridId, myRank, debugFsgrid); - + perBGrid. setGridCoupling(fsgridId, myRank, debugFsgrid); + perBDt2Grid. setGridCoupling(fsgridId, myRank, debugFsgrid); EGrid. setGridCoupling(fsgridId, myRank, debugFsgrid); EDt2Grid. setGridCoupling(fsgridId, myRank, debugFsgrid); EHallGrid. setGridCoupling(fsgridId, myRank, debugFsgrid); From dee9e3a3abcd81994a8334132914698a625ce030 Mon Sep 17 00:00:00 2001 From: Tuomas Koskela Date: Fri, 2 Nov 2018 15:30:33 +0200 Subject: [PATCH 125/602] Removed comments from finishGridCoupling calls --- vlasiator.cpp | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/vlasiator.cpp b/vlasiator.cpp index e3b3e349f..3bcdf15b7 100644 --- a/vlasiator.cpp +++ b/vlasiator.cpp @@ -1022,18 +1022,18 @@ int main(int argn,char* args[]) { if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; perBGrid. finishGridCoupling(); - // perBDt2Grid. finishGridCoupling(); - // EGrid. finishGridCoupling(); - // EDt2Grid. finishGridCoupling(); - // EHallGrid. finishGridCoupling(); - // EGradPeGrid. finishGridCoupling(); - // momentsGrid. finishGridCoupling(); - // momentsDt2Grid.finishGridCoupling(); - // dPerBGrid. finishGridCoupling(); - // dMomentsGrid. finishGridCoupling(); - // BgBGrid. finishGridCoupling(); - // volGrid. finishGridCoupling(); - // technicalGrid. finishGridCoupling(); + perBDt2Grid. finishGridCoupling(); + EGrid. finishGridCoupling(); + EDt2Grid. finishGridCoupling(); + EHallGrid. finishGridCoupling(); + EGradPeGrid. finishGridCoupling(); + momentsGrid. finishGridCoupling(); + momentsDt2Grid.finishGridCoupling(); + dPerBGrid. finishGridCoupling(); + dMomentsGrid. finishGridCoupling(); + BgBGrid. finishGridCoupling(); + volGrid. finishGridCoupling(); + technicalGrid. finishGridCoupling(); phiprof::stop("fsgrid-recouple-after-lb"); overrideRebalanceNow = false; From 29313269e3eff2e9f9edd52f2115818514d9adad Mon Sep 17 00:00:00 2001 From: tkoskela Date: Mon, 5 Nov 2018 11:08:50 +0200 Subject: [PATCH 126/602] Changed datatype of id in buildPencilsWithNeighbors from uint to int. --- vlasovsolver/cpu_trans_map_amr.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vlasovsolver/cpu_trans_map_amr.cpp b/vlasovsolver/cpu_trans_map_amr.cpp index 0ac66876d..5e4527ae0 100644 --- a/vlasovsolver/cpu_trans_map_amr.cpp +++ b/vlasovsolver/cpu_trans_map_amr.cpp @@ -336,7 +336,7 @@ setOfPencils buildPencilsWithNeighbors( const dccrg::Dccrg Date: Mon, 5 Nov 2018 12:12:06 +0200 Subject: [PATCH 127/602] Modified grid dimensions in testAmr.cfg --- projects/testAmr/testAmr.cfg | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/projects/testAmr/testAmr.cfg b/projects/testAmr/testAmr.cfg index 581ca410c..6af0d38ae 100644 --- a/projects/testAmr/testAmr.cfg +++ b/projects/testAmr/testAmr.cfg @@ -23,13 +23,13 @@ system_write_distribution_zline_stride = 0 [gridbuilder] -x_length = 9 -y_length = 3 +x_length = 5 +y_length = 5 z_length = 3 x_min = 0.0 x_max = 1.8e6 y_min = 0.0 -y_max = 0.6e6 +y_max = 1.8e6 z_min = 0.0 z_max = 0.6e6 timestep_max = 100 From a0cbba359530adf3a87e3c21b80a3a3ad92272e3 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Mon, 5 Nov 2018 12:16:59 +0200 Subject: [PATCH 128/602] Bug fix in the selection criteria for refinement. dx_ini --> dy_ini --- projects/testAmr/testAmr.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/projects/testAmr/testAmr.cpp b/projects/testAmr/testAmr.cpp index 921715dd3..3fefefff1 100644 --- a/projects/testAmr/testAmr.cpp +++ b/projects/testAmr/testAmr.cpp @@ -267,7 +267,7 @@ namespace projects { int boxHalfWidth = 1; for (double x = xyz_mid[0] - boxHalfWidth * P::dx_ini; x <= xyz_mid[0] + boxHalfWidth * P::dx_ini; x += P::dx_ini) { - for (double y = xyz_mid[1] - boxHalfWidth * P::dx_ini; y <= xyz_mid[1] + boxHalfWidth * P::dx_ini; y += P::dy_ini) { + for (double y = xyz_mid[1] - boxHalfWidth * P::dy_ini; y <= xyz_mid[1] + boxHalfWidth * P::dy_ini; y += P::dy_ini) { auto xyz = xyz_mid; xyz[0] = x; xyz[1] = y; From 9d27cce59e05f8b0991db404d52e3fbf860aad11 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Mon, 5 Nov 2018 12:29:14 +0200 Subject: [PATCH 129/602] Changed to get_neighbors_of instead of get_face_neighbors_of everywhere. --- vlasovsolver/cpu_trans_map_amr.cpp | 95 +++++++++++++++--------------- 1 file changed, 49 insertions(+), 46 deletions(-) diff --git a/vlasovsolver/cpu_trans_map_amr.cpp b/vlasovsolver/cpu_trans_map_amr.cpp index 5e4527ae0..2ce18f40e 100644 --- a/vlasovsolver/cpu_trans_map_amr.cpp +++ b/vlasovsolver/cpu_trans_map_amr.cpp @@ -26,6 +26,42 @@ using namespace spatial_cell; #define i_trans_ps_blockv_pencil(planeVectorIndex, planeIndex, blockIndex, lengthOfPencil) ( (blockIndex) + VLASOV_STENCIL_WIDTH + ( (planeVectorIndex) + (planeIndex) * VEC_PER_PLANE ) * ( lengthOfPencil + 2 * VLASOV_STENCIL_WIDTH) ) +int getNeighborhood(const uint dimension, const uint stencil) { + + int neighborhood = 0; + + if (stencil == 1) { + switch (dimension) { + case 0: + neighborhood = VLASOV_SOLVER_TARGET_X_NEIGHBORHOOD_ID; + break; + case 1: + neighborhood = VLASOV_SOLVER_TARGET_Y_NEIGHBORHOOD_ID; + break; + case 2: + neighborhood = VLASOV_SOLVER_TARGET_Z_NEIGHBORHOOD_ID; + break; + } + } + + if (stencil == 2) { + switch (dimension) { + case 0: + neighborhood = VLASOV_SOLVER_X_NEIGHBORHOOD_ID; + break; + case 1: + neighborhood = VLASOV_SOLVER_Y_NEIGHBORHOOD_ID; + break; + case 2: + neighborhood = VLASOV_SOLVER_Z_NEIGHBORHOOD_ID; + break; + } + } + + return neighborhood; + +} + void computeSpatialSourceCellsForPencil(const dccrg::Dccrg& mpiGrid, setOfPencils pencils, const uint iPencil, @@ -36,18 +72,7 @@ void computeSpatialSourceCellsForPencil(const dccrg::Dccrg ids = pencils.getIds(iPencil); - int neighborhood = 0; - switch (dimension) { - case 0: - neighborhood = VLASOV_SOLVER_X_NEIGHBORHOOD_ID; - break; - case 1: - neighborhood = VLASOV_SOLVER_Y_NEIGHBORHOOD_ID; - break; - case 2: - neighborhood = VLASOV_SOLVER_Z_NEIGHBORHOOD_ID; - break; - } + int neighborhood = getNeighborhood(dimension,2); // std::cout << "Source cells: "; // Get pointers for each cell id of the pencil @@ -213,6 +238,8 @@ void computeSpatialTargetCellsForPencils(const dccrg::Dccrg ids = pencils.getIds(iPencil); - - int neighborhood = 0; - switch (dimension) { - case 0: - neighborhood = VLASOV_SOLVER_TARGET_X_NEIGHBORHOOD_ID; - break; - case 1: - neighborhood = VLASOV_SOLVER_TARGET_Y_NEIGHBORHOOD_ID; - break; - case 2: - neighborhood = VLASOV_SOLVER_TARGET_Z_NEIGHBORHOOD_ID; - break; - } //std::cout << "Target cells for pencil " << iPencil << ": "; @@ -301,15 +315,17 @@ void computeSpatialTargetCellsForPencils(const dccrg::Dccrg &grid, CellID id, int dimension = 0, uint path = 0) { - const auto neighbors = grid.get_face_neighbors_of(id); + int neighborhood = getNeighborhood(dimension,1); + + const auto* neighbors = grid.get_neighbors_of(id, neighborhood); const int myProcess = grid.get_process(id); vector < CellID > myNeighbors; // Collect neighbor ids in the positive direction of the chosen dimension, // that are on the same process as the origin. // Note that dimension indexing starts from 1 (of course it does) - for (const auto cell : neighbors) { - if (cell.second == dimension + 1 && grid.get_process(cell.first) == myProcess) + for (const auto cell : *neighbors) { + if (cell.second[dimension] == 1 && grid.get_process(cell.first) == myProcess) myNeighbors.push_back(cell.first); } @@ -629,6 +645,7 @@ void get_seed_ids(const dccrg::Dccrg& mpi vector &seedIds) { const bool debug = false; + int neighborhood = getNeighborhood(dimension,1); //#pragma omp parallel for for(auto celli: localPropagatedCells) { @@ -636,12 +653,12 @@ void get_seed_ids(const dccrg::Dccrg& mpi // Collect a list of cell ids that do not have a neighbor in the negative direction // These are the seed ids for the pencils. vector negativeNeighbors; - // Returns all neighbors as (id, direction-dimension) pairs. - for ( const auto neighbor : mpiGrid.get_face_neighbors_of(celli ) ) { + // Returns all neighbors as (id, direction-dimension) pair pointers. + for ( const auto neighbor : *(mpiGrid.get_neighbors_of(celli, neighborhood)) ) { if ( mpiGrid.get_process(neighbor.first) == myProcess ) { // select the neighbor in the negative dimension of the propagation - if (neighbor.second == - (static_cast(dimension) + 1)) { + if (neighbor.second[dimension] == -1) { // add the id of the neighbor to a list if it's on the same process negativeNeighbors.push_back(neighbor.first); @@ -808,21 +825,7 @@ void check_ghost_cells(const dccrg::Dccrg uint dimension, int offset) { - uint neighborhoodId; - switch (dimension) { - case 0: - neighborhoodId = VLASOV_SOLVER_X_NEIGHBORHOOD_ID; - break; - case 1: - neighborhoodId = VLASOV_SOLVER_Y_NEIGHBORHOOD_ID; - break; - case 2: - neighborhoodId = VLASOV_SOLVER_Z_NEIGHBORHOOD_ID; - break; - default: - neighborhoodId = 0; - break; - } + int neighborhoodId = getNeighborhood(dimension,2); std::vector idsToSplit; From 3eff77f8d059eb38117fe46cdd214d40a5e69f3e Mon Sep 17 00:00:00 2001 From: tkoskela Date: Mon, 5 Nov 2018 12:48:46 +0200 Subject: [PATCH 130/602] Unified naming conventions --- vlasovsolver/cpu_trans_map_amr.cpp | 58 +++++++++--------------------- 1 file changed, 16 insertions(+), 42 deletions(-) diff --git a/vlasovsolver/cpu_trans_map_amr.cpp b/vlasovsolver/cpu_trans_map_amr.cpp index 2ce18f40e..1567ab388 100644 --- a/vlasovsolver/cpu_trans_map_amr.cpp +++ b/vlasovsolver/cpu_trans_map_amr.cpp @@ -90,27 +90,6 @@ void computeSpatialSourceCellsForPencil(const dccrg::Dccrg int neighborhood = getNeighborhood(dimension,1); - const auto* neighbors = grid.get_neighbors_of(id, neighborhood); + const auto* nbrPairs = grid.get_neighbors_of(id, neighborhood); const int myProcess = grid.get_process(id); vector < CellID > myNeighbors; // Collect neighbor ids in the positive direction of the chosen dimension, // that are on the same process as the origin. - // Note that dimension indexing starts from 1 (of course it does) - for (const auto cell : *neighbors) { - if (cell.second[dimension] == 1 && grid.get_process(cell.first) == myProcess) - myNeighbors.push_back(cell.first); + for (const auto nbrPair : *nbrPairs) { + if (nbrPair.second[dimension] == 1 && grid.get_process(nbrPair.first) == myProcess) + myNeighbors.push_back(nbrPair.first); } CellID neighbor = INVALID_CELLID; @@ -639,10 +617,10 @@ void propagatePencil(Vec* dz, Vec* values, const uint dimension, const uint bloc } } -void get_seed_ids(const dccrg::Dccrg& mpiGrid, - const vector &localPropagatedCells, - const uint dimension, - vector &seedIds) { +void getSeedIds(const dccrg::Dccrg& mpiGrid, + const vector &localPropagatedCells, + const uint dimension, + vector &seedIds) { const bool debug = false; int neighborhood = getNeighborhood(dimension,1); @@ -654,16 +632,12 @@ void get_seed_ids(const dccrg::Dccrg& mpi // These are the seed ids for the pencils. vector negativeNeighbors; // Returns all neighbors as (id, direction-dimension) pair pointers. - for ( const auto neighbor : *(mpiGrid.get_neighbors_of(celli, neighborhood)) ) { + for ( const auto nbrPair : *(mpiGrid.get_neighbors_of(celli, neighborhood)) ) { - if ( mpiGrid.get_process(neighbor.first) == myProcess ) { - // select the neighbor in the negative dimension of the propagation - if (neighbor.second[dimension] == -1) { - - // add the id of the neighbor to a list if it's on the same process - negativeNeighbors.push_back(neighbor.first); - - } + if ( nbrPair.second[dimension] == -1 && mpiGrid.get_process(nbrPair.first) == myProcess ) { + // select the first local neighbor in the negative direction and + // add the id of the neighbor to a list + negativeNeighbors.push_back(nbrPair.first); } } @@ -843,13 +817,13 @@ void check_ghost_cells(const dccrg::Dccrg const auto* backNeighbors = mpiGrid.get_neighbors_of(ids.back() ,neighborhoodId); int refLvl = 0; - for (pair> nbrPair: *frontNeighbors) { + for (auto nbrPair: *frontNeighbors) { if(nbrPair.second[dimension] == -offset) { refLvl = max(refLvl,mpiGrid.mapping.get_refinement_level(nbrPair.first)); } } - for (pair> nbrPair: *backNeighbors) { + for (auto nbrPair: *backNeighbors) { if(nbrPair.second[dimension] == offset) { refLvl = max(refLvl,mpiGrid.mapping.get_refinement_level(nbrPair.first)); } @@ -977,7 +951,7 @@ bool trans_map_1d_amr(const dccrg::Dccrg& // std::cout << endl; vector seedIds; - get_seed_ids(mpiGrid, localPropagatedCells, dimension, seedIds); + getSeedIds(mpiGrid, localPropagatedCells, dimension, seedIds); // Empty vectors for internal use of buildPencilsWithNeighbors. Could be default values but // default vectors are complicated. Should overload buildPencilsWithNeighbors like suggested here From bf4b179866fc7646b22a4dc229f3a992e35234ad Mon Sep 17 00:00:00 2001 From: Tuomas Koskela Date: Tue, 6 Nov 2018 12:00:34 +0200 Subject: [PATCH 131/602] Fixes to gridGlue to get correct tags for mpi send/receives on multiple processes. Changed mapDccrgIdToFsgrid to mapDccrgIdToFsgridGlobalID since the global id should be passed to all fsgrid functions. I think there is still an issue here, all ranks post the same receive tags, which as far as I can tell means that the messages can end on any random process. Needs further investigation. --- fieldsolver/gridGlue.cpp | 63 ++++++++++++++++++---------------------- fieldsolver/gridGlue.hpp | 32 ++++++++++---------- vlasiator.cpp | 14 +++++++-- 3 files changed, 57 insertions(+), 52 deletions(-) diff --git a/fieldsolver/gridGlue.cpp b/fieldsolver/gridGlue.cpp index 4e4dc971d..cba3b1242 100644 --- a/fieldsolver/gridGlue.cpp +++ b/fieldsolver/gridGlue.cpp @@ -63,9 +63,11 @@ void feedMomentsIntoFsGrid(dccrg::Dccrg& } } + + for (uint i = 0;i < cells.size(); ++i) { CellID dccrgId = cells[i]; - const auto fsgridIds = mapDccrgIdToFsGrid(mpiGrid, momentsGrid.getLocalSize(), dccrgId); + const auto fsgridIds = mapDccrgIdToFsGridGlobalID(mpiGrid, dccrgId); for (auto fsgridId : fsgridIds) { momentsGrid.transferDataIn(fsgridId, &transferBuffer[i]); } @@ -123,7 +125,7 @@ void feedBgFieldsIntoFsGrid(dccrg::Dccrg& // Copy data into each fsgrid cell overlapping the dccrg cell for (uint i = 0; i < cells.size(); ++i) { CellID dccrgId = cells[i]; - const auto fsgridIds = mapDccrgIdToFsGrid(mpiGrid, bgBGrid.getLocalSize(), dccrgId); + const auto fsgridIds = mapDccrgIdToFsGridGlobalID(mpiGrid, dccrgId); for (auto fsgridId : fsgridIds) { bgBGrid.transferDataIn(fsgridId, &transferBuffer[i]); } @@ -148,7 +150,7 @@ void getVolumeFieldsFromFsGrid(FsGrid< std::array, for (auto dccrgId : cells) { // Assuming same local size in all fsgrids - const auto fsgridIds = mapDccrgIdToFsGrid(mpiGrid, dperbGrid.getLocalSize(), dccrgId); + const auto fsgridIds = mapDccrgIdToFsGridGlobalID(mpiGrid, dccrgId); // Store a pointer to the first fsgrid cell that maps to each dccrg Id dperbTransferBufferPointer.push_back(&dperbTransferBuffer[k]); dmomentsTransferBufferPointer.push_back(&dmomentsTransferBuffer[k]); @@ -371,7 +373,7 @@ void setupTechnicalFsGrid(dccrg::Dccrg& m for(uint i = 0; i < cells.size(); ++i) { - const auto fsgridIds = mapDccrgIdToFsGrid(mpiGrid, technicalGrid.getLocalSize(), cells[i]); + const auto fsgridIds = mapDccrgIdToFsGridGlobalID(mpiGrid, cells[i]); for (auto fsgridId : fsgridIds) { // std::cout << "fsgridId: " << fsgridId << ", fsgrid Cell Coordinates:"; @@ -401,7 +403,7 @@ void getFsGridMaxDt(FsGrid< fsgrids::technical, 2>& technicalGrid, for(int i=0; i< cells.size(); i++) { transferBufferPointer.push_back(&transferBuffer[k]); - const auto fsgridIds = mapDccrgIdToFsGrid(mpiGrid, technicalGrid.getLocalSize(), cells[i]); + const auto fsgridIds = mapDccrgIdToFsGridGlobalID(mpiGrid, cells[i]); for (auto fsgridId : fsgridIds) { fsgrids::technical* thisCellData = &transferBuffer[k++]; technicalGrid.transferDataOut(fsgridId, thisCellData); @@ -438,42 +440,27 @@ void getFsGridMaxDt(FsGrid< fsgrids::technical, 2>& technicalGrid, } /* -Map from fsgrid cell id to dccrg cell id when they aren't identical (ie. when dccrg has refinement). -*/ -CellID mapFsGridIdToDccrg(FsGrid< fsgrids::technical, 2>& technicalGrid, - dccrg::Dccrg& mpiGrid, - CellID fsgridID) { - - auto cellCoord = technicalGrid.globalIDtoCellCoord(fsgridID); - // theoretically we could directly use cellCoord as indices for - // mpiGrid.get_cell_from_indices, if we knew the refinement level - // of the cell in advance. Going via cartesian coordinates is probably - // faster than iterating through refinement levels until we find the - // correct one. - std::array cartesianCoord; - cartesianCoord[0] = cellCoord[0] * technicalGrid.DX + P::xmin; - cartesianCoord[1] = cellCoord[1] * technicalGrid.DY + P::ymin; - cartesianCoord[2] = cellCoord[2] * technicalGrid.DZ + P::zmin; - CellID dccrgID = mpiGrid.get_existing_cell(cartesianCoord); - return dccrgID; - -} -/* -Map from dccrg cell id to fsgrid cell ids when they aren't identical (ie. when dccrg has refinement). +Map from dccrg cell id to fsgrid global cell ids when they aren't identical (ie. when dccrg has refinement). */ -std::vector mapDccrgIdToFsGrid(dccrg::Dccrg& mpiGrid, - std::array fsgridDims, CellID dccrgID) { +std::vector mapDccrgIdToFsGridGlobalID(dccrg::Dccrg& mpiGrid, + CellID dccrgID) { + const auto cellLength = mpiGrid.mapping.get_cell_length_in_indices(dccrgID); const auto gridLength = mpiGrid.length.get(); - const auto maxRefLvl = mpiGrid.mapping.get_maximum_refinement_level(); + const auto maxRefLvl = mpiGrid.get_maximum_refinement_level(); const auto topLeftIndices = mpiGrid.mapping.get_indices(dccrgID); std::array indices; std::vector> allIndices; + + std::array fsgridDims; + fsgridDims[0] = P::xcells_ini * (mpiGrid.get_maximum_refinement_level() + 1); + fsgridDims[1] = P::ycells_ini * (mpiGrid.get_maximum_refinement_level() + 1); + fsgridDims[2] = P::zcells_ini * (mpiGrid.get_maximum_refinement_level() + 1); - for (uint i = 0; i < cellLength; ++i) { + for (uint k = 0; k < cellLength; ++k) { for (uint j = 0; j < cellLength; ++j) { - for (uint k = 0; k < cellLength; ++k) { + for (uint i = 0; i < cellLength; ++i) { indices[0] = topLeftIndices[0] + i; indices[1] = topLeftIndices[1] + j; indices[2] = topLeftIndices[2] + k; @@ -482,9 +469,15 @@ std::vector mapDccrgIdToFsGrid(dccrg::Dccrg fsgridIDs; + std::vector fsgridIDs; + + for (auto cellCoord: allIndices) { - fsgridIDs.push_back(cellCoord[0] + cellCoord[1] * fsgridDims[0] + cellCoord[2] * fsgridDims[1] * fsgridDims[0]); + + fsgridIDs.push_back(cellCoord[0] + + cellCoord[1] * fsgridDims[0] + + cellCoord[2] * fsgridDims[1] * fsgridDims[0]); + } return fsgridIDs; diff --git a/fieldsolver/gridGlue.hpp b/fieldsolver/gridGlue.hpp index 86a896d52..7deaae6fa 100644 --- a/fieldsolver/gridGlue.hpp +++ b/fieldsolver/gridGlue.hpp @@ -3,13 +3,8 @@ #include #include -std::vector mapDccrgIdToFsGrid(dccrg::Dccrg& mpiGrid, - std::array fsgridDims, CellID dccrgID); - -CellID mapFsGridIdToDccrg(FsGrid< fsgrids::technical, 2>& technicalGrid, - dccrg::Dccrg& mpiGrid, - CellID fsgridID); - +std::vector mapDccrgIdToFsGridGlobalID(dccrg::Dccrg& mpiGrid, + CellID dccrgID); /*! Take input moments from DCCRG grid and put them into the Fieldsolver grid * \param mpiGrid The DCCRG grid carrying rho, rhoV and P @@ -120,17 +115,24 @@ template< unsigned int numFields > void feedFieldDataIntoFsGrid( int nCells = getNumberOfCellsOnMaxRefLvl(mpiGrid, cells); targetGrid.setupForTransferIn(nCells); + int myRank; + MPI_Comm_rank(MPI_COMM_WORLD,&myRank); + //std::cout << "Process rank " << myRank << " send tags: "; + for(CellID dccrgId : cells) { - const auto fsgridIds = mapDccrgIdToFsGrid(mpiGrid, targetGrid.getLocalSize(), dccrgId); - // TODO: This assumes that the field data are lying continuous in memory. - // Check definition of CellParams in common.h if unsure. - std::array* cellDataPointer = reinterpret_cast*>( + const auto fsgridIds = mapDccrgIdToFsGridGlobalID(mpiGrid, dccrgId); + // TODO: This assumes that the field data are lying continuous in memory. + // Check definition of CellParams in common.h if unsure. + std::array* cellDataPointer = reinterpret_cast*>( &(mpiGrid[dccrgId]->get_cell_parameters()[cellParamsIndex])); - for (auto fsgridId : fsgridIds) { - targetGrid.transferDataIn(fsgridId, cellDataPointer); - } + for (auto fsgridId : fsgridIds) { + targetGrid.transferDataIn(fsgridId, cellDataPointer); + } } + //std::cout << std::endl; + //std::cout << std::endl; + targetGrid.finishTransfersIn(); } @@ -175,7 +177,7 @@ template< unsigned int numFields > void getFieldDataFromFsGrid( // Check definition of CellParams in common.h if unsure. std::array* cellDataPointer = reinterpret_cast*>( &(mpiGrid[dccrgId]->get_cell_parameters()[index])); - const auto fsgridIds = mapDccrgIdToFsGrid(mpiGrid, sourceGrid.getLocalSize(), dccrgId); + const auto fsgridIds = mapDccrgIdToFsGridGlobalID(mpiGrid, dccrgId); for (auto fsgridId : fsgridIds) { sourceGrid.transferDataIn(fsgridId, cellDataPointer); } diff --git a/vlasiator.cpp b/vlasiator.cpp index 3bcdf15b7..cca80310a 100644 --- a/vlasiator.cpp +++ b/vlasiator.cpp @@ -475,7 +475,7 @@ int main(int argn,char* args[]) { // Each dccrg cell may have to communicate with multiple fsgrid cells, if they are on a lower refinement level. // Calculate the corresponding fsgrid ids for each dccrg cell and set coupling for each fsgrid id. for(auto& dccrgId : cells) { - const auto fsgridIds = mapDccrgIdToFsGrid(mpiGrid, fsGridDimensions, dccrgId); + const auto fsgridIds = mapDccrgIdToFsGridGlobalID(mpiGrid, dccrgId); for (auto fsgridId : fsgridIds) { @@ -497,6 +497,8 @@ int main(int argn,char* args[]) { } } + if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; + perBGrid. finishGridCoupling(); perBDt2Grid. finishGridCoupling(); EGrid. finishGridCoupling(); @@ -512,15 +514,23 @@ int main(int argn,char* args[]) { technicalGrid. finishGridCoupling(); phiprof::stop("Initial fsgrid coupling"); + if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; + // Transfer initial field configuration into the FsGrids feedFieldDataIntoFsGrid(mpiGrid,cells,CellParams::PERBX,perBGrid); + if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; + feedBgFieldsIntoFsGrid(mpiGrid,cells,BgBGrid); BgBGrid.updateGhostCells(); + if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; + setupTechnicalFsGrid(mpiGrid, cells, technicalGrid); technicalGrid.updateGhostCells(); + if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; + // WARNING this means moments and dt2 moments are the same here. feedMomentsIntoFsGrid(mpiGrid, cells, momentsGrid,false); feedMomentsIntoFsGrid(mpiGrid, cells, momentsDt2Grid,false); @@ -995,7 +1005,7 @@ int main(int argn,char* args[]) { // Calculate the corresponding fsgrid ids for each dccrg cell and set coupling for each fsgrid id. // cout << "send tags are: " << endl; for(auto& dccrgId : cells) { - const auto fsgridIds = mapDccrgIdToFsGrid(mpiGrid, fsGridDimensions, dccrgId); + const auto fsgridIds = mapDccrgIdToFsGridGlobalID(mpiGrid, dccrgId); //cout << "Fsgrid ids for cell " << dccrgId << " are: "; for (auto& fsgridId : fsgridIds) { //cout << fsgridId << " "; From 19d509b1b27bc024e80a604225badcc51172e57b Mon Sep 17 00:00:00 2001 From: tkoskela Date: Tue, 6 Nov 2018 16:32:49 +0200 Subject: [PATCH 132/602] More debugging lines. --- vlasiator.cpp | 4 +- vlasovsolver/cpu_trans_map.cpp | 21 ++++++---- vlasovsolver/cpu_trans_map_amr.cpp | 61 ++++++++++++++++-------------- vlasovsolver/vlasovmover.cpp | 58 ++++++++++++++++++++++------ 4 files changed, 95 insertions(+), 49 deletions(-) diff --git a/vlasiator.cpp b/vlasiator.cpp index cca80310a..e47d2bc8a 100644 --- a/vlasiator.cpp +++ b/vlasiator.cpp @@ -961,7 +961,7 @@ int main(int argn,char* args[]) { //Re-loadbalance if needed //TODO - add LB measure and do LB if it exceeds threshold #warning Re-loadbalance has been disabled temporarily for amr debugging - if(((P::tstep % P::rebalanceInterval == 0 && P::tstep > P::tstep_min) || overrideRebalanceNow)) { + if(((P::tstep % P::rebalanceInterval == 0 && P::tstep > P::tstep_min) || overrideRebalanceNow) && false) { logFile << "(LB): Start load balance, tstep = " << P::tstep << " t = " << P::t << endl << writeVerbose; balanceLoad(mpiGrid, sysBoundaries); addTimedBarrier("barrier-end-load-balance"); @@ -1211,6 +1211,8 @@ int main(int argn,char* args[]) { poisson::solve(mpiGrid); } + if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; + phiprof::start("Velocity-space"); if ( P::propagateVlasovAcceleration ) { calculateAcceleration(mpiGrid,P::dt); diff --git a/vlasovsolver/cpu_trans_map.cpp b/vlasovsolver/cpu_trans_map.cpp index 24b432ccc..294ca9e4c 100644 --- a/vlasovsolver/cpu_trans_map.cpp +++ b/vlasovsolver/cpu_trans_map.cpp @@ -649,7 +649,11 @@ void update_remote_mapping_contribution( vector send_cells; vector receiveBuffers; - // std::cout << "I am at line " << __LINE__ << " of " << __FILE__ << std::endl; + int myRank; + const bool printLines = true; + + MPI_Comm_rank(MPI_COMM_WORLD,&myRank); + if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; //normalize if(direction > 0) direction = 1; @@ -661,12 +665,12 @@ void update_remote_mapping_contribution( ccell->neighbor_number_of_blocks = 0; } - //std::cout << "I am at line " << __LINE__ << " of " << __FILE__ << std::endl; + if(printLines) std::cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << std::endl; //TODO: prepare arrays, make parallel by avoidin push_back and by checking also for other stuff for (size_t c = 0; c < local_cells.size(); ++c) { - //std::cout << "I am at line " << __LINE__ << " of " << __FILE__ << " " << local_cells[c] << std::endl; + if(printLines) std::cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << std::endl; SpatialCell *ccell = mpiGrid[local_cells[c]]; //default values, to avoid any extra sends and receives @@ -714,7 +718,7 @@ void update_remote_mapping_contribution( m_ngbr = NbrPairVector->front().first; p_ngbr = NbrPairVector->back().first; - //std::cout << "I am at line " << __LINE__ << " of " << __FILE__ << std::endl; + if(printLines) std::cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << std::endl; //internal cell, not much to do if (mpiGrid.is_local(p_ngbr) && mpiGrid.is_local(m_ngbr)) continue; @@ -748,8 +752,9 @@ void update_remote_mapping_contribution( } } - //std::cout << "I am at line " << __LINE__ << " of " << __FILE__ << std::endl; - + if(printLines) std::cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << " " << direction << " " << dimension < int neighborhood = getNeighborhood(dimension,1); const auto* nbrPairs = grid.get_neighbors_of(id, neighborhood); - const int myProcess = grid.get_process(id); vector < CellID > myNeighbors; // Collect neighbor ids in the positive direction of the chosen dimension, // that are on the same process as the origin. for (const auto nbrPair : *nbrPairs) { - if (nbrPair.second[dimension] == 1 && grid.get_process(nbrPair.first) == myProcess) + if (nbrPair.second[dimension] == 1 && grid.is_local(nbrPair.first)) myNeighbors.push_back(nbrPair.first); } @@ -622,19 +621,18 @@ void getSeedIds(const dccrg::Dccrg& mpiGr const uint dimension, vector &seedIds) { - const bool debug = false; + const bool debug = true; int neighborhood = getNeighborhood(dimension,1); //#pragma omp parallel for for(auto celli: localPropagatedCells) { - int myProcess = mpiGrid.get_process(celli); // Collect a list of cell ids that do not have a neighbor in the negative direction // These are the seed ids for the pencils. vector negativeNeighbors; // Returns all neighbors as (id, direction-dimension) pair pointers. for ( const auto nbrPair : *(mpiGrid.get_neighbors_of(celli, neighborhood)) ) { - if ( nbrPair.second[dimension] == -1 && mpiGrid.get_process(nbrPair.first) == myProcess ) { + if ( nbrPair.second[dimension] == -1 && mpiGrid.is_local(nbrPair.first)) { // select the first local neighbor in the negative direction and // add the id of the neighbor to a list negativeNeighbors.push_back(nbrPair.first); @@ -660,7 +658,7 @@ void getSeedIds(const dccrg::Dccrg& mpiGr // yz - plane if(ix == 0) { seedId = P::xcells_ini * P::ycells_ini * iz + P::xcells_ini * iy + 1; - if(mpiGrid.get_process(seedId) == mpiGrid.get_process(localPropagatedCells[0])) + if(mpiGrid.is_local(seedId)) seedIds.push_back(seedId); } @@ -669,7 +667,7 @@ void getSeedIds(const dccrg::Dccrg& mpiGr // xz - plane if(iy == 0) { seedId = P::xcells_ini * P::ycells_ini * iz + ix + 1; - if(mpiGrid.get_process(seedId) == mpiGrid.get_process(localPropagatedCells[0])) + if(mpiGrid.is_local(seedId)) seedIds.push_back(seedId); } @@ -678,7 +676,7 @@ void getSeedIds(const dccrg::Dccrg& mpiGr // xy - plane if(iz == 0) { seedId = P::xcells_ini * iy + ix + 1; - if(mpiGrid.get_process(seedId) == mpiGrid.get_process(localPropagatedCells[0])) + if(mpiGrid.is_local(seedId)) seedIds.push_back(seedId); } break; @@ -872,8 +870,8 @@ bool trans_map_1d_amr(const dccrg::Dccrg& const Realv dt, const uint popID) { - const bool printPencils = false; - const bool printLines = false; + const bool printPencils = true; + const bool printLines = true; Realv dvz,vz_min; uint cell_indices_to_id[3]; /*< used when computing id of target cell in block*/ unsigned char cellid_transpose[WID3]; /*< defines the transpose for the solver internal (transposed) id: i + j*WID + k*WID2 to actual one*/ @@ -884,6 +882,9 @@ bool trans_map_1d_amr(const dccrg::Dccrg& return false; } + int myRank; + if(printLines || printPencils) MPI_Comm_rank(MPI_COMM_WORLD,&myRank); + //cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; // Vector with all cell ids @@ -976,13 +977,17 @@ bool trans_map_1d_amr(const dccrg::Dccrg& if (printPencils) { uint ibeg = 0; uint iend = 0; - std::cout << "I have created " << pencils.N << " pencils along dimension " << dimension << ":\n"; - std::cout << "N, mpirank, (x, y): indices " << std::endl; - std::cout << "-----------------------------------------------------------------" << std::endl; + std::cout << "I am rank " << myRank << ", I have created " << pencils.N << " pencils along dimension " << dimension << ":\n"; + MPI_Barrier(MPI_COMM_WORLD); + if(myRank == MASTER_RANK) { + std::cout << "N, mpirank, (x, y): indices " << std::endl; + std::cout << "-----------------------------------------------------------------" << std::endl; + } + MPI_Barrier(MPI_COMM_WORLD); for (uint i = 0; i < pencils.N; i++) { iend += pencils.lengthOfPencils[i]; std::cout << i << ", "; - std::cout << mpiGrid.get_process(pencils.ids[ibeg]) << ", "; + std::cout << myRank << ", "; std::cout << "(" << pencils.x[i] << ", " << pencils.y[i] << "): "; for (auto j = pencils.ids.begin() + ibeg; j != pencils.ids.begin() + iend; ++j) { std::cout << *j << " "; @@ -1041,7 +1046,7 @@ bool trans_map_1d_amr(const dccrg::Dccrg& } // **************************************************************************** - if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; + if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; const uint8_t VMESH_REFLEVEL = 0; @@ -1073,7 +1078,7 @@ bool trans_map_1d_amr(const dccrg::Dccrg& } // **************************************************************************** - if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; + if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; int t1 = phiprof::initializeTimer("mappingAndStore"); @@ -1093,7 +1098,7 @@ bool trans_map_1d_amr(const dccrg::Dccrg& vmesh.getIndices(blockGID,vRefLevel, block_indices[0], block_indices[1], block_indices[2]); - if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; + if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; // Loop over sets of pencils // This loop only has one iteration for now @@ -1104,14 +1109,14 @@ bool trans_map_1d_amr(const dccrg::Dccrg& // Add padding by 2 for each pencil Vec targetVecData[(pencils.sumOfLengths + 2 * pencils.N) * WID3 / VECL]; - if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; + if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; // Initialize targetvecdata to 0 for( uint i = 0; i < (pencils.sumOfLengths + 2 * pencils.N) * WID3 / VECL; i++ ) { targetVecData[i] = Vec(0.0); } - if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; + if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; // TODO: There's probably a smarter way to keep track of where we are writing // in the target data array. @@ -1121,11 +1126,11 @@ bool trans_map_1d_amr(const dccrg::Dccrg& // For targets we need the local cells, plus a padding of 1 cell at both ends std::vector targetCells(pencils.sumOfLengths + pencils.N * 2 ); - if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; + if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; computeSpatialTargetCellsForPencils(mpiGrid, pencils, dimension, targetCells.data()); - if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; + if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; // Loop over pencils uint totalTargetLength = 0; @@ -1142,7 +1147,7 @@ bool trans_map_1d_amr(const dccrg::Dccrg& computeSpatialSourceCellsForPencil(mpiGrid, pencils, pencili, dimension, sourceCells.data()); - if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; + if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; Vec dz[sourceCells.size()]; uint i = 0; @@ -1161,25 +1166,25 @@ bool trans_map_1d_amr(const dccrg::Dccrg& i++; } - if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; + if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; // Allocate source data: sourcedata sourcedata) / (proper xy reconstruction in future) copy_trans_block_data_amr(sourceCells.data(), blockGID, L, sourceVecData, cellid_transpose, popID); - if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; + if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; // Dz and sourceVecData are both padded by VLASOV_STENCIL_WIDTH // Dz has 1 value/cell, sourceVecData has WID3 values/cell propagatePencil(dz, sourceVecData, dimension, blockGID, dt, vmesh, L); - if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; + if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; // sourcedata => targetdata[this pencil]) for (uint i = 0; i < targetLength; i++) { @@ -1215,7 +1220,7 @@ bool trans_map_1d_amr(const dccrg::Dccrg& } } - if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; + if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; // store_data(target_data => targetCells) :Aggregate data for blockid to original location // Loop over pencils again @@ -1303,6 +1308,6 @@ bool trans_map_1d_amr(const dccrg::Dccrg& } } - if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; + if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; return true; } diff --git a/vlasovsolver/vlasovmover.cpp b/vlasovsolver/vlasovmover.cpp index f9a8ad73b..1d517eca6 100644 --- a/vlasovsolver/vlasovmover.cpp +++ b/vlasovsolver/vlasovmover.cpp @@ -76,11 +76,14 @@ void calculateSpatialTranslation( int trans_timer; bool localTargetGridGenerated = false; - - // std::cout << "I am at line " << __LINE__ << " of " << __FILE__ << std::endl; + const bool printLines = true; + + int myRank; + MPI_Comm_rank(MPI_COMM_WORLD,&myRank); + if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; // ------------- SLICE - map dist function in Z --------------- // - if(P::zcells_ini > 1){ + if(P::zcells_ini > 1 && false){ trans_timer=phiprof::initializeTimer("transfer-stencil-data-z","MPI"); phiprof::start(trans_timer); SpatialCell::set_mpi_transfer_type(Transfer::VEL_BLOCK_DATA); @@ -97,41 +100,56 @@ void calculateSpatialTranslation( update_remote_mapping_contribution(mpiGrid, 2,-1,popID); phiprof::stop("update_remote-z"); - } - // std::cout << "I am at line " << __LINE__ << " of " << __FILE__ << std::endl; + if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; // ------------- SLICE - map dist function in X --------------- // if(P::xcells_ini > 1 ){ trans_timer=phiprof::initializeTimer("transfer-stencil-data-x","MPI"); phiprof::start(trans_timer); - SpatialCell::set_mpi_transfer_type(Transfer::VEL_BLOCK_DATA); + SpatialCell::set_mpi_transfer_type(Transfer::VEL_BLOCK_DATA); + + if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; + + mpiGrid.set_send_single_cells(true); mpiGrid.update_copies_of_remote_neighbors(VLASOV_SOLVER_X_NEIGHBORHOOD_ID); phiprof::stop(trans_timer); + if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; + phiprof::start("compute-mapping-x"); trans_map_1d_amr(mpiGrid,local_propagated_cells, remoteTargetCellsx, 0,dt,popID); // map along x// phiprof::stop("compute-mapping-x"); + if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; + trans_timer=phiprof::initializeTimer("update_remote-x","MPI"); phiprof::start("update_remote-x"); update_remote_mapping_contribution(mpiGrid, 0,+1,popID); update_remote_mapping_contribution(mpiGrid, 0,-1,popID); phiprof::stop("update_remote-x"); + + if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; } - // std::cout << "I am at line " << __LINE__ << " of " << __FILE__ << std::endl; + if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; // ------------- SLICE - map dist function in Y --------------- // - if(P::ycells_ini > 1){ + if(P::ycells_ini > 1 && false){ trans_timer=phiprof::initializeTimer("transfer-stencil-data-y","MPI"); phiprof::start(trans_timer); SpatialCell::set_mpi_transfer_type(Transfer::VEL_BLOCK_DATA); + + if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; + + mpiGrid.set_send_single_cells(true); mpiGrid.update_copies_of_remote_neighbors(VLASOV_SOLVER_Y_NEIGHBORHOOD_ID); phiprof::stop(trans_timer); + if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; + phiprof::start("compute-mapping-y"); trans_map_1d_amr(mpiGrid,local_propagated_cells, remoteTargetCellsy, 1,dt,popID); // map along y// phiprof::stop("compute-mapping-y"); @@ -324,16 +342,29 @@ void calculateAcceleration(dccrg::Dccrg& typedef Parameters P; const vector& cells = getLocalCells(); + const bool printLines = true; + + int myRank; + MPI_Comm_rank(MPI_COMM_WORLD,&myRank); + if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__; + cout << " " << dt << " " << P::tstep << endl; + if (dt == 0.0 && P::tstep > 0) { + if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; + // Even if acceleration is turned off we need to adjust velocity blocks // because the boundary conditions may have altered the velocity space, // and to update changes in no-content blocks during translation. - for (uint popID=0; popID& spatial_cell::Population& pop = SC->get_population(popID); pop.ACCSUBCYCLES = getAccelerationSubcycles(SC, dt, popID); } - } + } + + if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; // Compute global maximum for number of subcycles MPI_Allreduce(&maxSubcycles, &globalMaxSubcycles, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD); - + if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; + // substep global max times for(uint step=0; step<(uint)globalMaxSubcycles; ++step) { if(step > 0) { From cfc5e4af563781895d66d0f3317e0670840a5b8a Mon Sep 17 00:00:00 2001 From: tkoskela Date: Tue, 6 Nov 2018 16:33:22 +0200 Subject: [PATCH 133/602] more debugging lines --- sysboundary/sysboundary.cpp | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/sysboundary/sysboundary.cpp b/sysboundary/sysboundary.cpp index 6044d76fe..63ac59ffc 100644 --- a/sysboundary/sysboundary.cpp +++ b/sysboundary/sysboundary.cpp @@ -336,6 +336,8 @@ bool SysBoundary::classifyCells(dccrg::Dccrg cells = mpiGrid.get_cells(); + const bool printLines = false; + /*set all cells to default value, not_sysboundary*/ for(uint i=0; isysBoundaryFlag = sysboundarytype::NOT_SYSBOUNDARY; @@ -356,6 +358,8 @@ bool SysBoundary::classifyCells(dccrg::DccrgsysBoundaryLayer=0; /*Initial value*/ @@ -438,11 +444,18 @@ bool SysBoundary::classifyCells(dccrg::DccrgsysBoundaryFlag = sysboundarytype::DO_NOT_COMPUTE; } } + + if(printLines) cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; // The following is done so that everyone knows their neighbour's // layer flags. This is needed for the correct use of the system From f5164681cade71809f324eddf414f39a9e05b529 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Tue, 6 Nov 2018 16:33:46 +0200 Subject: [PATCH 134/602] Linking with -g --- MAKE/Makefile.appa | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/MAKE/Makefile.appa b/MAKE/Makefile.appa index 796c7a873..670daf45c 100644 --- a/MAKE/Makefile.appa +++ b/MAKE/Makefile.appa @@ -53,7 +53,7 @@ CXXFLAGS += -g -O2 -fopenmp -funroll-loops -std=c++11 -W -Wall -Wno-unused -fabi testpackage: CXXFLAGS = -O2 -fopenmp -funroll-loops -std=c++0x -fabi-version=0 -mavx MATHFLAGS = -ffast-math -LDFLAGS = +LDFLAGS = -g LIB_MPI = -lgomp # BOOST_VERSION = current trilinos version From 2ce4faeb191e79e276116496fffd6dbec1c7ab85 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Wed, 7 Nov 2018 10:31:02 +0200 Subject: [PATCH 135/602] Disabled debug prints, set send_single_cells to false in vlasovmover.cpp. --- projects/testAmr/testAmr.cpp | 4 ++-- vlasiator.cpp | 2 +- vlasovsolver/cpu_trans_map.cpp | 2 +- vlasovsolver/cpu_trans_map_amr.cpp | 6 +++--- vlasovsolver/vlasovmover.cpp | 12 ++++++------ 5 files changed, 13 insertions(+), 13 deletions(-) diff --git a/projects/testAmr/testAmr.cpp b/projects/testAmr/testAmr.cpp index 3fefefff1..cc574af8e 100644 --- a/projects/testAmr/testAmr.cpp +++ b/projects/testAmr/testAmr.cpp @@ -254,7 +254,7 @@ namespace projects { bool testAmr::refineSpatialCells( dccrg::Dccrg& mpiGrid ) const { - cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; + // cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; std::cout << "Maximum refinement level is " << mpiGrid.mapping.get_maximum_refinement_level() << std::endl; std::array xyz_mid; @@ -314,7 +314,7 @@ namespace projects { mpiGrid.balance_load(); - cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; + //cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; // return std::all_of(refineSuccess.begin(), refineSuccess.end(), [](bool v) { return v; }); return true; diff --git a/vlasiator.cpp b/vlasiator.cpp index e47d2bc8a..e1984cedb 100644 --- a/vlasiator.cpp +++ b/vlasiator.cpp @@ -269,7 +269,7 @@ int main(int argn,char* args[]) { Real newDt; bool dtIsChanged; - const bool printLines = true; + const bool printLines = false; const bool printCells = false; const bool printSums = false; diff --git a/vlasovsolver/cpu_trans_map.cpp b/vlasovsolver/cpu_trans_map.cpp index 294ca9e4c..8bee14d69 100644 --- a/vlasovsolver/cpu_trans_map.cpp +++ b/vlasovsolver/cpu_trans_map.cpp @@ -650,7 +650,7 @@ void update_remote_mapping_contribution( vector receiveBuffers; int myRank; - const bool printLines = true; + const bool printLines = false; MPI_Comm_rank(MPI_COMM_WORLD,&myRank); if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; diff --git a/vlasovsolver/cpu_trans_map_amr.cpp b/vlasovsolver/cpu_trans_map_amr.cpp index 58c6211f5..6004de6bf 100644 --- a/vlasovsolver/cpu_trans_map_amr.cpp +++ b/vlasovsolver/cpu_trans_map_amr.cpp @@ -621,7 +621,7 @@ void getSeedIds(const dccrg::Dccrg& mpiGr const uint dimension, vector &seedIds) { - const bool debug = true; + const bool debug = false; int neighborhood = getNeighborhood(dimension,1); //#pragma omp parallel for @@ -870,8 +870,8 @@ bool trans_map_1d_amr(const dccrg::Dccrg& const Realv dt, const uint popID) { - const bool printPencils = true; - const bool printLines = true; + const bool printPencils = false; + const bool printLines = false; Realv dvz,vz_min; uint cell_indices_to_id[3]; /*< used when computing id of target cell in block*/ unsigned char cellid_transpose[WID3]; /*< defines the transpose for the solver internal (transposed) id: i + j*WID + k*WID2 to actual one*/ diff --git a/vlasovsolver/vlasovmover.cpp b/vlasovsolver/vlasovmover.cpp index 1d517eca6..bfc8554c4 100644 --- a/vlasovsolver/vlasovmover.cpp +++ b/vlasovsolver/vlasovmover.cpp @@ -76,7 +76,7 @@ void calculateSpatialTranslation( int trans_timer; bool localTargetGridGenerated = false; - const bool printLines = true; + const bool printLines = false; int myRank; MPI_Comm_rank(MPI_COMM_WORLD,&myRank); @@ -113,7 +113,7 @@ void calculateSpatialTranslation( if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; - mpiGrid.set_send_single_cells(true); + mpiGrid.set_send_single_cells(false); mpiGrid.update_copies_of_remote_neighbors(VLASOV_SOLVER_X_NEIGHBORHOOD_ID); phiprof::stop(trans_timer); @@ -137,14 +137,14 @@ void calculateSpatialTranslation( if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; // ------------- SLICE - map dist function in Y --------------- // - if(P::ycells_ini > 1 && false){ + if(P::ycells_ini > 1){ trans_timer=phiprof::initializeTimer("transfer-stencil-data-y","MPI"); phiprof::start(trans_timer); SpatialCell::set_mpi_transfer_type(Transfer::VEL_BLOCK_DATA); if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; - mpiGrid.set_send_single_cells(true); + mpiGrid.set_send_single_cells(false); mpiGrid.update_copies_of_remote_neighbors(VLASOV_SOLVER_Y_NEIGHBORHOOD_ID); phiprof::stop(trans_timer); @@ -342,12 +342,12 @@ void calculateAcceleration(dccrg::Dccrg& typedef Parameters P; const vector& cells = getLocalCells(); - const bool printLines = true; + const bool printLines = false; int myRank; MPI_Comm_rank(MPI_COMM_WORLD,&myRank); if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__; - cout << " " << dt << " " << P::tstep << endl; + if(printLines) cout << " " << dt << " " << P::tstep << endl; if (dt == 0.0 && P::tstep > 0) { if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; From 7a69f8e7b6f395d7bdca83c2d5ce8e8ca49b86a1 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Wed, 7 Nov 2018 10:33:17 +0200 Subject: [PATCH 136/602] Enabled debug lines in vlasovmover --- vlasovsolver/vlasovmover.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vlasovsolver/vlasovmover.cpp b/vlasovsolver/vlasovmover.cpp index bfc8554c4..4fa293ba0 100644 --- a/vlasovsolver/vlasovmover.cpp +++ b/vlasovsolver/vlasovmover.cpp @@ -76,7 +76,7 @@ void calculateSpatialTranslation( int trans_timer; bool localTargetGridGenerated = false; - const bool printLines = false; + const bool printLines = true; int myRank; MPI_Comm_rank(MPI_COMM_WORLD,&myRank); From 83e5d24ce25d08d6e2c624f1a63081d536afff5f Mon Sep 17 00:00:00 2001 From: tkoskela Date: Thu, 8 Nov 2018 12:41:03 +0200 Subject: [PATCH 137/602] Correct size of cellsToReceive in getFeildDataFromFsGrid --- fieldsolver/gridGlue.hpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/fieldsolver/gridGlue.hpp b/fieldsolver/gridGlue.hpp index 7deaae6fa..026929793 100644 --- a/fieldsolver/gridGlue.hpp +++ b/fieldsolver/gridGlue.hpp @@ -170,7 +170,8 @@ template< unsigned int numFields > void getFieldDataFromFsGrid( dccrg::Dccrg& mpiGrid, const std::vector& cells, int index) { - sourceGrid.setupForTransferOut(cells.size()); + int nCells = getNumberOfCellsOnMaxRefLvl(mpiGrid, cells); + sourceGrid.setupForTransferOut(nCells); for(CellID dccrgId : cells) { // TODO: This assumes that the field data are lying continuous in memory. From 72755415a0be420d02716e889743bd0b7b7d3862 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Thu, 8 Nov 2018 15:53:21 +0200 Subject: [PATCH 138/602] Updated getIndices to be more robust against more exotic load balancing schemes. Ilja suggested checking against crossing periodic boundaries by using the indices from grid.mapping.get_indices(). This seems to work much better than the previous hack. --- vlasovsolver/cpu_trans_map_amr.cpp | 82 ++++++++++-------------------- 1 file changed, 27 insertions(+), 55 deletions(-) diff --git a/vlasovsolver/cpu_trans_map_amr.cpp b/vlasovsolver/cpu_trans_map_amr.cpp index 6004de6bf..16af38d37 100644 --- a/vlasovsolver/cpu_trans_map_amr.cpp +++ b/vlasovsolver/cpu_trans_map_amr.cpp @@ -621,22 +621,37 @@ void getSeedIds(const dccrg::Dccrg& mpiGr const uint dimension, vector &seedIds) { - const bool debug = false; + const bool debug = true; + int myRank; + MPI_Comm_rank(MPI_COMM_WORLD,&myRank); + int neighborhood = getNeighborhood(dimension,1); //#pragma omp parallel for for(auto celli: localPropagatedCells) { + + auto myIndices = mpiGrid.mapping.get_indices(celli); + // Collect a list of cell ids that do not have a neighbor in the negative direction // These are the seed ids for the pencils. vector negativeNeighbors; // Returns all neighbors as (id, direction-dimension) pair pointers. for ( const auto nbrPair : *(mpiGrid.get_neighbors_of(celli, neighborhood)) ) { - + if ( nbrPair.second[dimension] == -1 && mpiGrid.is_local(nbrPair.first)) { - // select the first local neighbor in the negative direction and - // add the id of the neighbor to a list - negativeNeighbors.push_back(nbrPair.first); + // Check that the neighbor is not across a periodic boundary by calculating + // the distance in indices between this cell and its neighbor. + auto nbrIndices = mpiGrid.mapping.get_indices(nbrPair.first); + + if ( abs ( myIndices[dimension] - nbrIndices[dimension] ) <= + pow(2,mpiGrid.get_maximum_refinement_level()) ) { + + // select the first local neighbor in the negative direction and + // add the id of the neighbor to a list + negativeNeighbors.push_back(nbrPair.first); + + } } } //cout << endl; @@ -645,50 +660,9 @@ void getSeedIds(const dccrg::Dccrg& mpiGr seedIds.push_back(celli); } - // If no seed ids were found, let's assume we have a periodic boundary and - // a single process in the dimension of propagation. In this case we start from - // the first cells of the plane perpendicular to the propagation dimension - if (seedIds.size() == 0) { - for (uint ix = 0; ix < P::xcells_ini; ix++) { - for (uint iy = 0; iy < P::ycells_ini; iy++) { - for (uint iz = 0; iz < P::zcells_ini; iz++) { - CellID seedId; - switch (dimension) { - case 0: - // yz - plane - if(ix == 0) { - seedId = P::xcells_ini * P::ycells_ini * iz + P::xcells_ini * iy + 1; - if(mpiGrid.is_local(seedId)) - seedIds.push_back(seedId); - - } - break; - case 1: - // xz - plane - if(iy == 0) { - seedId = P::xcells_ini * P::ycells_ini * iz + ix + 1; - if(mpiGrid.is_local(seedId)) - seedIds.push_back(seedId); - - } - break; - case 2: - // xy - plane - if(iz == 0) { - seedId = P::xcells_ini * iy + ix + 1; - if(mpiGrid.is_local(seedId)) - seedIds.push_back(seedId); - } - break; - } - } - } - } - } - if(debug) { - cout << "Number of seed ids is " << seedIds.size() << endl; - cout << "Seed ids are: "; + //cout << "Number of seed ids is " << seedIds.size() << endl; + cout << "Rank " << myRank << ", Seed ids are: "; for (const auto seedId : seedIds) { cout << seedId << " "; } @@ -884,8 +858,8 @@ bool trans_map_1d_amr(const dccrg::Dccrg& int myRank; if(printLines || printPencils) MPI_Comm_rank(MPI_COMM_WORLD,&myRank); - - //cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; + + if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; // Vector with all cell ids vector allCells(localPropagatedCells); @@ -943,8 +917,6 @@ bool trans_map_1d_amr(const dccrg::Dccrg& } // **************************************************************************** - //cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; - // compute pencils => set of pencils (shared datastructure) // std::cout << "LocalPropagatedCells: "; @@ -964,15 +936,15 @@ bool trans_map_1d_amr(const dccrg::Dccrg& setOfPencils pencils; vector pencilSets; - // std::cout << "Starting cell ids for pencils are "; - // for (const auto seedId : seedIds) std::cout << seedId << " "; - // std::cout << endl; + if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; for (const auto seedId : seedIds) { // Construct pencils from the seedIds into a set of pencils. pencils = buildPencilsWithNeighbors(mpiGrid, pencils, seedId, ids, dimension, path); } + if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; + // Print out ids of pencils (if needed for debugging) if (printPencils) { uint ibeg = 0; From 0169d7a60faaebe1455f1562ff915cb22fe5cef4 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Fri, 9 Nov 2018 10:44:01 +0200 Subject: [PATCH 139/602] Debugging crash in mpiGrid.update_copies_of_remote_neighbors, cpu_trans_map_amr.cpp L763. --- Makefile | 2 +- grid.cpp | 4 +++- projects/testAmr/testAmr.cfg | 6 +++--- spatial_cell.cpp | 4 ++++ vlasiator.cpp | 2 +- vlasovsolver/cpu_trans_map.cpp | 10 ++++++---- vlasovsolver/cpu_trans_map_amr.cpp | 12 +++++++----- vlasovsolver/vlasovmover.cpp | 7 +++++++ 8 files changed, 32 insertions(+), 15 deletions(-) diff --git a/Makefile b/Makefile index 7747028f8..74644940d 100644 --- a/Makefile +++ b/Makefile @@ -34,7 +34,7 @@ FIELDSOLVER ?= londrillo_delzanna COMPFLAGS += -DPROFILE #Add -DNDEBUG to turn debugging off. If debugging is enabled performance will degrade significantly -COMPFLAGS += -DNDEBUG +COMPFLAGS += -DDEBUG # CXXFLAGS += -DDEBUG_SOLVERS # CXXFLAGS += -DDEBUG_IONOSPHERE diff --git a/grid.cpp b/grid.cpp index 9548086d5..ae9d236f2 100644 --- a/grid.cpp +++ b/grid.cpp @@ -123,9 +123,11 @@ void initializeGrid( .set_geometry(geom_params); + MPI_Barrier(comm); if(project.refineSpatialCells(mpiGrid)) { recalculateLocalCellsCache(); - } + } + MPI_Barrier(comm); // Init velocity mesh on all cells initVelocityGridGeometry(mpiGrid); diff --git a/projects/testAmr/testAmr.cfg b/projects/testAmr/testAmr.cfg index 6af0d38ae..10915b372 100644 --- a/projects/testAmr/testAmr.cfg +++ b/projects/testAmr/testAmr.cfg @@ -27,11 +27,11 @@ x_length = 5 y_length = 5 z_length = 3 x_min = 0.0 -x_max = 1.8e6 +x_max = 1.0e6 y_min = 0.0 -y_max = 1.8e6 +y_max = 1.0e6 z_min = 0.0 -z_max = 0.6e6 +z_max = 1.0e6 timestep_max = 100 [proton_vspace] diff --git a/spatial_cell.cpp b/spatial_cell.cpp index 72d426c9a..587878cc9 100644 --- a/spatial_cell.cpp +++ b/spatial_cell.cpp @@ -833,6 +833,10 @@ namespace spatial_cell { datatype = MPI_BYTE; } + int mpiSize; + MPI_Type_size(datatype,&mpiSize); + cout << "get_mpi_datatype: " << cellID << " " << sender_rank << " " << receiver_rank << " " << mpiSize << endl; + return std::make_tuple(address,count,datatype); } diff --git a/vlasiator.cpp b/vlasiator.cpp index e1984cedb..e47d2bc8a 100644 --- a/vlasiator.cpp +++ b/vlasiator.cpp @@ -269,7 +269,7 @@ int main(int argn,char* args[]) { Real newDt; bool dtIsChanged; - const bool printLines = false; + const bool printLines = true; const bool printCells = false; const bool printSums = false; diff --git a/vlasovsolver/cpu_trans_map.cpp b/vlasovsolver/cpu_trans_map.cpp index 8bee14d69..7b6a6078c 100644 --- a/vlasovsolver/cpu_trans_map.cpp +++ b/vlasovsolver/cpu_trans_map.cpp @@ -650,7 +650,7 @@ void update_remote_mapping_contribution( vector receiveBuffers; int myRank; - const bool printLines = false; + const bool printLines = true; MPI_Comm_rank(MPI_COMM_WORLD,&myRank); if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; @@ -670,7 +670,7 @@ void update_remote_mapping_contribution( //TODO: prepare arrays, make parallel by avoidin push_back and by checking also for other stuff for (size_t c = 0; c < local_cells.size(); ++c) { - if(printLines) std::cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << std::endl; + //if(printLines) std::cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << std::endl; SpatialCell *ccell = mpiGrid[local_cells[c]]; //default values, to avoid any extra sends and receives @@ -718,7 +718,7 @@ void update_remote_mapping_contribution( m_ngbr = NbrPairVector->front().first; p_ngbr = NbrPairVector->back().first; - if(printLines) std::cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << std::endl; + //if(printLines) std::cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << std::endl; //internal cell, not much to do if (mpiGrid.is_local(p_ngbr) && mpiGrid.is_local(m_ngbr)) continue; @@ -752,9 +752,9 @@ void update_remote_mapping_contribution( } } + MPI_Barrier(MPI_COMM_WORLD); if(printLines) std::cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << " " << direction << " " << dimension < 0) { + while (id != INVALID_CELLID) { periodic = false; bool neighborExists = false; @@ -451,6 +451,7 @@ setOfPencils buildPencilsWithNeighbors( const dccrg::Dccrg& const Realv dt, const uint popID) { - const bool printPencils = false; + const bool printPencils = true; const bool printLines = false; Realv dvz,vz_min; uint cell_indices_to_id[3]; /*< used when computing id of target cell in block*/ diff --git a/vlasovsolver/vlasovmover.cpp b/vlasovsolver/vlasovmover.cpp index 4fa293ba0..194e65e12 100644 --- a/vlasovsolver/vlasovmover.cpp +++ b/vlasovsolver/vlasovmover.cpp @@ -153,6 +153,10 @@ void calculateSpatialTranslation( phiprof::start("compute-mapping-y"); trans_map_1d_amr(mpiGrid,local_propagated_cells, remoteTargetCellsy, 1,dt,popID); // map along y// phiprof::stop("compute-mapping-y"); + + MPI_Barrier(MPI_COMM_WORLD); + + if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; trans_timer=phiprof::initializeTimer("update_remote-y","MPI"); phiprof::start("update_remote-y"); @@ -160,6 +164,9 @@ void calculateSpatialTranslation( update_remote_mapping_contribution(mpiGrid, 1,-1,popID); phiprof::stop("update_remote-y"); } + + if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; + // bailout(true, "", __FILE__, __LINE__); // throw; } From 9087a7d53e146e1ca81c03f1109580a546dbbcd3 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Tue, 20 Nov 2018 15:12:36 +0200 Subject: [PATCH 140/602] Implementation for multi-neighbor communications. Compiles and runs through a few time steps before crashing on multiple ranks. Single rank works as before but seems slow. --- spatial_cell.cpp | 17 ++- spatial_cell.hpp | 7 +- vlasiator.cpp | 8 +- vlasovsolver/cpu_trans_map.cpp | 4 + vlasovsolver/cpu_trans_map.hpp | 10 +- vlasovsolver/cpu_trans_map_amr.cpp | 189 ++++++++++++++++++++++++++++- vlasovsolver/cpu_trans_map_amr.hpp | 7 ++ vlasovsolver/vlasovmover.cpp | 2 +- 8 files changed, 224 insertions(+), 20 deletions(-) diff --git a/spatial_cell.cpp b/spatial_cell.cpp index 587878cc9..c416bdd6e 100644 --- a/spatial_cell.cpp +++ b/spatial_cell.cpp @@ -658,9 +658,11 @@ namespace spatial_cell { /*We are actually transferring the data of a * neighbor. The values of neighbor_block_data * and neighbor_number_of_blocks should be set in - * solver.*/ - displacements.push_back((uint8_t*) this->neighbor_block_data - (uint8_t*) this); - block_lengths.push_back(sizeof(Realf) * VELOCITY_BLOCK_LENGTH* this->neighbor_number_of_blocks); + * solver.*/ + for ( int i = 0; i < this->neighbor_block_data.size(); ++i) { + displacements.push_back((uint8_t*) this->neighbor_block_data[i] - (uint8_t*) this); + block_lengths.push_back(sizeof(Realf) * VELOCITY_BLOCK_LENGTH* this->neighbor_number_of_blocks[i]); + } } // send spatial cell parameters @@ -833,9 +835,12 @@ namespace spatial_cell { datatype = MPI_BYTE; } - int mpiSize; - MPI_Type_size(datatype,&mpiSize); - cout << "get_mpi_datatype: " << cellID << " " << sender_rank << " " << receiver_rank << " " << mpiSize << endl; + const bool printMpiDatatype = false; + if(printMpiDatatype) { + int mpiSize; + MPI_Type_size(datatype,&mpiSize); + cout << "get_mpi_datatype: " << cellID << " " << sender_rank << " " << receiver_rank << " " << mpiSize << endl; + } return std::make_tuple(address,count,datatype); } diff --git a/spatial_cell.hpp b/spatial_cell.hpp index 714d5257d..083f0c971 100644 --- a/spatial_cell.hpp +++ b/spatial_cell.hpp @@ -330,9 +330,12 @@ namespace spatial_cell { uint64_t ioLocalCellId; /**< Local cell ID used for IO, not needed elsewhere * and thus not being kept up-to-date.*/ //vmesh::LocalID mpi_number_of_blocks; /**< Number of blocks in mpi_velocity_block_list.*/ - Realf* neighbor_block_data; /**< Pointers for translation operator. We can point to neighbor + //Realf* neighbor_block_data; /**< Pointers for translation operator. We can point to neighbor + // * cell block data. We do not allocate memory for the pointer.*/ + //vmesh::LocalID neighbor_number_of_blocks; + std::vector neighbor_block_data; /**< Pointers for translation operator. We can point to neighbor * cell block data. We do not allocate memory for the pointer.*/ - vmesh::LocalID neighbor_number_of_blocks; + std::vector neighbor_number_of_blocks; uint sysBoundaryFlag; /**< What type of system boundary does the cell belong to. * Enumerated in the sysboundarytype namespace's enum.*/ uint sysBoundaryLayer; /**< Layers counted from closest systemBoundary. If 0 then it has not diff --git a/vlasiator.cpp b/vlasiator.cpp index e47d2bc8a..c64c62e4f 100644 --- a/vlasiator.cpp +++ b/vlasiator.cpp @@ -269,9 +269,9 @@ int main(int argn,char* args[]) { Real newDt; bool dtIsChanged; - const bool printLines = true; + const bool printLines = false; const bool printCells = false; - const bool printSums = false; + const bool printSums = true; // Init MPI: int required=MPI_THREAD_FUNNELED; @@ -747,7 +747,7 @@ int main(int argn,char* args[]) { nSum += rho*dx*dy*dz; if(printCells) cout << "Cell " << cell << " rho = " << rho << " x: " << x << " y: " << y << " z: " << z << endl; } - if(printSums) cout << "nSum = " << nSum << endl; + if(printSums) cout << "Rank " << myRank << ", nSum = " << nSum << endl; while(P::tstep <= P::tstep_max && P::t-P::dt <= P::t_max+DT_EPSILON && @@ -1135,7 +1135,7 @@ int main(int argn,char* args[]) { nSum += rho*dx*dy*dz; if(printCells) cout << "Cell " << cell << " rho = " << rho << " x: " << x << " y: " << y << " z: " << z << endl; } - if (printSums) cout << "nSum = " << nSum << endl; + if(printSums) cout << "Rank " << myRank << ", nSum = " << nSum << endl; if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; phiprof::stop("Spatial-space",computedCells,"Cells"); diff --git a/vlasovsolver/cpu_trans_map.cpp b/vlasovsolver/cpu_trans_map.cpp index 7b6a6078c..ad8a9ff47 100644 --- a/vlasovsolver/cpu_trans_map.cpp +++ b/vlasovsolver/cpu_trans_map.cpp @@ -637,6 +637,8 @@ bool trans_map_1d(const dccrg::Dccrg& mpi \par dimension: 0,1,2 for x,y,z \par direction: 1 for + dir, -1 for - dir */ + +/* void update_remote_mapping_contribution( dccrg::Dccrg& mpiGrid, const uint dimension, @@ -814,3 +816,5 @@ void update_remote_mapping_contribution( } } +*/ + diff --git a/vlasovsolver/cpu_trans_map.hpp b/vlasovsolver/cpu_trans_map.hpp index 3f993cf7b..7ecefcd96 100644 --- a/vlasovsolver/cpu_trans_map.hpp +++ b/vlasovsolver/cpu_trans_map.hpp @@ -51,11 +51,11 @@ bool trans_map_1d(const dccrg::Dccrg& mpiGrid, - const uint dimension, - int direction, - const uint popID); +// void update_remote_mapping_contribution(dccrg::Dccrg& mpiGrid, +// const uint dimension, +// int direction, +// const uint popID); void compute_spatial_source_neighbors(const dccrg::Dccrg& mpiGrid, diff --git a/vlasovsolver/cpu_trans_map_amr.cpp b/vlasovsolver/cpu_trans_map_amr.cpp index 0dbb8b9e2..4826e514e 100644 --- a/vlasovsolver/cpu_trans_map_amr.cpp +++ b/vlasovsolver/cpu_trans_map_amr.cpp @@ -623,7 +623,7 @@ void getSeedIds(const dccrg::Dccrg& mpiGr const uint dimension, vector &seedIds) { - const bool debug = true; + const bool debug = false; int myRank; MPI_Comm_rank(MPI_COMM_WORLD,&myRank); @@ -846,7 +846,7 @@ bool trans_map_1d_amr(const dccrg::Dccrg& const Realv dt, const uint popID) { - const bool printPencils = true; + const bool printPencils = false; const bool printLines = false; Realv dvz,vz_min; uint cell_indices_to_id[3]; /*< used when computing id of target cell in block*/ @@ -1285,3 +1285,188 @@ bool trans_map_1d_amr(const dccrg::Dccrg& if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; return true; } + + + +/*! + + This function communicates the mapping on process boundaries, and then updates the data to their correct values. + TODO, this could be inside an openmp region, in which case some m ore barriers and masters should be added + + \par dimension: 0,1,2 for x,y,z + \par direction: 1 for + dir, -1 for - dir +*/ +void update_remote_mapping_contribution( + dccrg::Dccrg& mpiGrid, + const uint dimension, + int direction, + const uint popID) { + + const vector local_cells = mpiGrid.get_cells(); + const vector remote_cells = mpiGrid.get_remote_cells_on_process_boundary(VLASOV_SOLVER_NEIGHBORHOOD_ID); + vector receive_cells; + vector send_cells; + vector receiveBuffers; + + int myRank; + const bool printLines = false; + + MPI_Comm_rank(MPI_COMM_WORLD,&myRank); + if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; + + //normalize + if(direction > 0) direction = 1; + if(direction < 0) direction = -1; + for (size_t c=0; cneighbor_block_data.push_back(ccell->get_data(popID)); + ccell->neighbor_number_of_blocks.push_back(0); + } + + if(printLines) std::cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << std::endl; + + //TODO: prepare arrays, make parallel by avoidin push_back and by checking also for other stuff + for (size_t c = 0; c < local_cells.size(); ++c) { + + //if(printLines) std::cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << std::endl; + + SpatialCell *ccell = mpiGrid[local_cells[c]]; + //default values, to avoid any extra sends and receives + ccell->neighbor_block_data.push_back(ccell->get_data(popID)); + ccell->neighbor_number_of_blocks.push_back(0); + CellID p_ngbr,m_ngbr; + + int neighborhood = 0; + switch (dimension) { + case 0: + neighborhood = VLASOV_SOLVER_TARGET_X_NEIGHBORHOOD_ID; + break; + case 1: + neighborhood = VLASOV_SOLVER_TARGET_Y_NEIGHBORHOOD_ID; + break; + case 2: + neighborhood = VLASOV_SOLVER_TARGET_Z_NEIGHBORHOOD_ID; + break; + } + auto nbrPairVector = mpiGrid.get_neighbors_of(local_cells[c], neighborhood); + + //if(printLines) std::cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << std::endl; + + if (all_of(nbrPairVector->begin(), nbrPairVector->end(), + [mpiGrid](pair > p){return mpiGrid.is_local(p.first);})) { + continue; + } + + vector n_nbrs; + vector p_nbrs; + + // Collect neighbors on the positive and negative sides into separate lists + for (auto nbrPair : *nbrPairVector) { + + if (nbrPair.second.at(dimension) == direction) { + p_nbrs.push_back(nbrPair.first); + } + + if (nbrPair.second.at(dimension) == -direction) { + n_nbrs.push_back(nbrPair.first); + } + } + + for (auto nbr : p_nbrs) { + + SpatialCell *pcell = NULL; + if (nbr != INVALID_CELLID) { + pcell = mpiGrid[nbr]; + } + if (nbr != INVALID_CELLID && !mpiGrid.is_local(nbr) && do_translate_cell(ccell) && + pcell->sysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY) { + //Send data in nbr target array that we just mapped to if 1) it is a valid target, + //2) is remote cell, 3) the source cell in center was translated, 4) it is not a boundary cell? + ccell->neighbor_block_data.push_back(pcell->get_data(popID)); + ccell->neighbor_number_of_blocks.push_back(pcell->get_number_of_velocity_blocks(popID)); + send_cells.push_back(nbr); + } + } + + for (auto nbr : n_nbrs) { + + SpatialCell *ncell = NULL; + if (nbr != INVALID_CELLID) { + ncell = mpiGrid[nbr]; + } + if (nbr != INVALID_CELLID && !mpiGrid.is_local(nbr) && + ccell->sysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY) { + //Receive data that mcell mapped to ccell to this local cell + //data array, if 1) m is a valid source cell, 2) center cell is to be updated (normal cell) 3) m is remote + //we will here allocate a receive buffer, since we need to aggregate values + ncell->neighbor_number_of_blocks.push_back(ccell->get_number_of_velocity_blocks(popID)); + ncell->neighbor_block_data.push_back((Realf*) aligned_malloc(ncell->neighbor_number_of_blocks.back() * WID3 * sizeof(Realf), 64)); + + receive_cells.push_back(local_cells[c]); + receiveBuffers.push_back(ncell->neighbor_block_data.back()); + } + } + } + + MPI_Barrier(MPI_COMM_WORLD); + if(printLines) std::cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << " " << direction << " " << dimension < 0) mpiGrid.update_copies_of_remote_neighbors(SHIFT_P_X_NEIGHBORHOOD_ID); + if(direction < 0) mpiGrid.update_copies_of_remote_neighbors(SHIFT_M_X_NEIGHBORHOOD_ID); + break; + case 1: + if(direction > 0) mpiGrid.update_copies_of_remote_neighbors(SHIFT_P_Y_NEIGHBORHOOD_ID); + if(direction < 0) mpiGrid.update_copies_of_remote_neighbors(SHIFT_M_Y_NEIGHBORHOOD_ID); + break; + case 2: + if(direction > 0) mpiGrid.update_copies_of_remote_neighbors(SHIFT_P_Z_NEIGHBORHOOD_ID); + if(direction < 0) mpiGrid.update_copies_of_remote_neighbors(SHIFT_M_Z_NEIGHBORHOOD_ID); + break; + } + + MPI_Barrier(MPI_COMM_WORLD); + + if(printLines) std::cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << std::endl; + +#pragma omp parallel + { + //reduce data: sum received data in the data array to + // the target grid in the temporary block container + for (size_t c=0; c < receive_cells.size(); ++c) { + SpatialCell* spatial_cell = mpiGrid[receive_cells[c]]; + Realf *blockData = spatial_cell->get_data(popID); + +#pragma omp for + for(unsigned int cell = 0; cellget_number_of_velocity_blocks(popID); ++cell) { + blockData[cell] += receiveBuffers[c][cell]; + } + } + + // send cell data is set to zero. This is to avoid double copy if + // one cell is the neighbor on bot + and - side to the same + // process + for (size_t c=0; cget_data(popID); + +#pragma omp for nowait + for(unsigned int cell = 0; cell< VELOCITY_BLOCK_LENGTH * spatial_cell->get_number_of_velocity_blocks(popID); ++cell) { + // copy received target data to temporary array where target data is stored. + blockData[cell] = 0; + } + } + } + + if(printLines) std::cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << std::endl; + + //and finally free temporary receive buffer + for (size_t c=0; c < receiveBuffers.size(); ++c) { + aligned_free(receiveBuffers[c]); + } +} diff --git a/vlasovsolver/cpu_trans_map_amr.hpp b/vlasovsolver/cpu_trans_map_amr.hpp index d0275c334..1824be354 100644 --- a/vlasovsolver/cpu_trans_map_amr.hpp +++ b/vlasovsolver/cpu_trans_map_amr.hpp @@ -149,4 +149,11 @@ bool trans_map_1d_amr(const dccrg::Dccrg& mpiGrid, + const uint dimension, + int direction, + const uint popID); + + #endif diff --git a/vlasovsolver/vlasovmover.cpp b/vlasovsolver/vlasovmover.cpp index 194e65e12..aaa634c7f 100644 --- a/vlasovsolver/vlasovmover.cpp +++ b/vlasovsolver/vlasovmover.cpp @@ -76,7 +76,7 @@ void calculateSpatialTranslation( int trans_timer; bool localTargetGridGenerated = false; - const bool printLines = true; + const bool printLines = false; int myRank; MPI_Comm_rank(MPI_COMM_WORLD,&myRank); From 577343b6971a862cf83f9567636808abbb7a69d2 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Wed, 21 Nov 2018 15:06:59 +0200 Subject: [PATCH 141/602] Clearing spatial_cell pointer arrays at the beginning of update_remote_mapping_contributions. Setting default values in else - clauses for the if statements that determine if a good remote cell has been found. --- vlasovsolver/cpu_trans_map_amr.cpp | 40 ++++++++++++++++++++++-------- 1 file changed, 30 insertions(+), 10 deletions(-) diff --git a/vlasovsolver/cpu_trans_map_amr.cpp b/vlasovsolver/cpu_trans_map_amr.cpp index 4826e514e..73e8b9d33 100644 --- a/vlasovsolver/cpu_trans_map_amr.cpp +++ b/vlasovsolver/cpu_trans_map_amr.cpp @@ -1320,8 +1320,11 @@ void update_remote_mapping_contribution( for (size_t c=0; cneighbor_block_data.push_back(ccell->get_data(popID)); - ccell->neighbor_number_of_blocks.push_back(0); + ccell->neighbor_block_data.clear(); + ccell->neighbor_number_of_blocks.clear(); + + //ccell->neighbor_block_data.push_back(ccell->get_data(popID)); + //ccell->neighbor_number_of_blocks.push_back(0); } if(printLines) std::cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << std::endl; @@ -1329,12 +1332,10 @@ void update_remote_mapping_contribution( //TODO: prepare arrays, make parallel by avoidin push_back and by checking also for other stuff for (size_t c = 0; c < local_cells.size(); ++c) { - //if(printLines) std::cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << std::endl; - SpatialCell *ccell = mpiGrid[local_cells[c]]; - //default values, to avoid any extra sends and receives - ccell->neighbor_block_data.push_back(ccell->get_data(popID)); - ccell->neighbor_number_of_blocks.push_back(0); + // Initialize to empty vectors, add default values at the end. + ccell->neighbor_block_data.clear(); + ccell->neighbor_number_of_blocks.clear(); CellID p_ngbr,m_ngbr; int neighborhood = 0; @@ -1351,8 +1352,6 @@ void update_remote_mapping_contribution( } auto nbrPairVector = mpiGrid.get_neighbors_of(local_cells[c], neighborhood); - //if(printLines) std::cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << std::endl; - if (all_of(nbrPairVector->begin(), nbrPairVector->end(), [mpiGrid](pair > p){return mpiGrid.is_local(p.first);})) { continue; @@ -1386,7 +1385,10 @@ void update_remote_mapping_contribution( ccell->neighbor_block_data.push_back(pcell->get_data(popID)); ccell->neighbor_number_of_blocks.push_back(pcell->get_number_of_velocity_blocks(popID)); send_cells.push_back(nbr); - } + } else { + ccell->neighbor_block_data.push_back(ccell->get_data(popID)); + ccell->neighbor_number_of_blocks.push_back(0); + } } for (auto nbr : n_nbrs) { @@ -1405,8 +1407,20 @@ void update_remote_mapping_contribution( receive_cells.push_back(local_cells[c]); receiveBuffers.push_back(ncell->neighbor_block_data.back()); + } else { + ncell->neighbor_block_data.push_back(ncell->get_data(popID)); + ncell->neighbor_number_of_blocks.push_back(0); } } + + //default values, to avoid any extra sends and receives + if(ccell->neighbor_number_of_blocks.empty()) { + ccell->neighbor_number_of_blocks.push_back(0); + } + if(ccell->neighbor_block_data.empty()) { + ccell->neighbor_block_data.push_back(ccell->get_data(popID)); + } + } MPI_Barrier(MPI_COMM_WORLD); @@ -1446,6 +1460,9 @@ void update_remote_mapping_contribution( for(unsigned int cell = 0; cellget_number_of_velocity_blocks(popID); ++cell) { blockData[cell] += receiveBuffers[c][cell]; } + + spatial_cell->neighbor_number_of_blocks.clear(); + spatial_cell->neighbor_block_data.clear(); } // send cell data is set to zero. This is to avoid double copy if @@ -1460,6 +1477,9 @@ void update_remote_mapping_contribution( // copy received target data to temporary array where target data is stored. blockData[cell] = 0; } + + spatial_cell->neighbor_number_of_blocks.clear(); + spatial_cell->neighbor_block_data.clear(); } } From 1257b035b0e332081fe1b421becbe1125d386d15 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Fri, 23 Nov 2018 13:46:23 +0200 Subject: [PATCH 142/602] Changed the algorithm for calculating seed ids for pencils to fix a bug where cells with both local and remote refined neighbors were not being included. Added some debugging quantities to update_remote_contribution that will probably be removed later. Also updated printout for pencils to include path. --- vlasovsolver/cpu_trans_map_amr.cpp | 84 +++++++++++++++++------------- 1 file changed, 49 insertions(+), 35 deletions(-) diff --git a/vlasovsolver/cpu_trans_map_amr.cpp b/vlasovsolver/cpu_trans_map_amr.cpp index 73e8b9d33..4faa583d9 100644 --- a/vlasovsolver/cpu_trans_map_amr.cpp +++ b/vlasovsolver/cpu_trans_map_amr.cpp @@ -634,42 +634,43 @@ void getSeedIds(const dccrg::Dccrg& mpiGr auto myIndices = mpiGrid.mapping.get_indices(celli); - // Collect a list of cell ids that do not have a neighbor in the negative direction - // These are the seed ids for the pencils. - vector negativeNeighbors; + bool remoteNeighborExists = false; // Returns all neighbors as (id, direction-dimension) pair pointers. for ( const auto nbrPair : *(mpiGrid.get_neighbors_of(celli, neighborhood)) ) { - if ( nbrPair.second[dimension] == -1 && mpiGrid.is_local(nbrPair.first)) { + if ( nbrPair.second[dimension] == -1 ) { // Check that the neighbor is not across a periodic boundary by calculating // the distance in indices between this cell and its neighbor. auto nbrIndices = mpiGrid.mapping.get_indices(nbrPair.first); - if ( abs ( myIndices[dimension] - nbrIndices[dimension] ) <= - pow(2,mpiGrid.get_maximum_refinement_level()) ) { - - // select the first local neighbor in the negative direction and - // add the id of the neighbor to a list - negativeNeighbors.push_back(nbrPair.first); + // If a neighbor is non-local or across a periodic boundary, then we use this + // cell as a seed for pencils + if ( abs ( myIndices[dimension] - nbrIndices[dimension] ) > + pow(2,mpiGrid.get_maximum_refinement_level()) || + !mpiGrid.is_local(nbrPair.first)) { + + remoteNeighborExists = true; } } } + + if (remoteNeighborExists) { + seedIds.push_back(celli); + } + //cout << endl; - // if no neighbors were found in the negative direction, add this cell id to the seed cells - if (negativeNeighbors.size() == 0) - seedIds.push_back(celli); } - if(debug) { - //cout << "Number of seed ids is " << seedIds.size() << endl; - cout << "Rank " << myRank << ", Seed ids are: "; - for (const auto seedId : seedIds) { - cout << seedId << " "; - } - cout << endl; - } + // if(debug) { + //cout << "Number of seed ids is " << seedIds.size() << endl; + cout << "Rank " << myRank << ", Seed ids are: "; + for (const auto seedId : seedIds) { + cout << seedId << " "; + } + cout << endl; + // } } @@ -846,7 +847,7 @@ bool trans_map_1d_amr(const dccrg::Dccrg& const Realv dt, const uint popID) { - const bool printPencils = false; + const bool printPencils = true; const bool printLines = false; Realv dvz,vz_min; uint cell_indices_to_id[3]; /*< used when computing id of target cell in block*/ @@ -954,7 +955,7 @@ bool trans_map_1d_amr(const dccrg::Dccrg& std::cout << "I am rank " << myRank << ", I have created " << pencils.N << " pencils along dimension " << dimension << ":\n"; MPI_Barrier(MPI_COMM_WORLD); if(myRank == MASTER_RANK) { - std::cout << "N, mpirank, (x, y): indices " << std::endl; + std::cout << "N, mpirank, (x, y): indices {path} " << std::endl; std::cout << "-----------------------------------------------------------------" << std::endl; } MPI_Barrier(MPI_COMM_WORLD); @@ -967,6 +968,13 @@ bool trans_map_1d_amr(const dccrg::Dccrg& std::cout << *j << " "; } ibeg = iend; + + std::cout << "{"; + for (auto step : pencils.path[i]) { + std::cout << step << ", "; + } + std::cout << "}"; + std::cout << std::endl; } @@ -1307,6 +1315,9 @@ void update_remote_mapping_contribution( vector receive_cells; vector send_cells; vector receiveBuffers; + + vector send_origin_cells; + vector receive_origin_cells; int myRank; const bool printLines = false; @@ -1359,10 +1370,10 @@ void update_remote_mapping_contribution( vector n_nbrs; vector p_nbrs; - + // Collect neighbors on the positive and negative sides into separate lists for (auto nbrPair : *nbrPairVector) { - + if (nbrPair.second.at(dimension) == direction) { p_nbrs.push_back(nbrPair.first); } @@ -1385,6 +1396,8 @@ void update_remote_mapping_contribution( ccell->neighbor_block_data.push_back(pcell->get_data(popID)); ccell->neighbor_number_of_blocks.push_back(pcell->get_number_of_velocity_blocks(popID)); send_cells.push_back(nbr); + send_origin_cells.push_back(local_cells[c]); + } else { ccell->neighbor_block_data.push_back(ccell->get_data(popID)); ccell->neighbor_number_of_blocks.push_back(0); @@ -1399,15 +1412,18 @@ void update_remote_mapping_contribution( } if (nbr != INVALID_CELLID && !mpiGrid.is_local(nbr) && ccell->sysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY) { - //Receive data that mcell mapped to ccell to this local cell + //Receive data that ncell mapped to this local cell //data array, if 1) m is a valid source cell, 2) center cell is to be updated (normal cell) 3) m is remote //we will here allocate a receive buffer, since we need to aggregate values ncell->neighbor_number_of_blocks.push_back(ccell->get_number_of_velocity_blocks(popID)); ncell->neighbor_block_data.push_back((Realf*) aligned_malloc(ncell->neighbor_number_of_blocks.back() * WID3 * sizeof(Realf), 64)); receive_cells.push_back(local_cells[c]); + receive_origin_cells.push_back(nbr); + receiveBuffers.push_back(ncell->neighbor_block_data.back()); - } else { + + } else { ncell->neighbor_block_data.push_back(ncell->get_data(popID)); ncell->neighbor_number_of_blocks.push_back(0); } @@ -1445,24 +1461,25 @@ void update_remote_mapping_contribution( } MPI_Barrier(MPI_COMM_WORLD); - + if(printLines) std::cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << std::endl; #pragma omp parallel { + std::vector receive_cells_sums; //reduce data: sum received data in the data array to // the target grid in the temporary block container for (size_t c=0; c < receive_cells.size(); ++c) { SpatialCell* spatial_cell = mpiGrid[receive_cells[c]]; Realf *blockData = spatial_cell->get_data(popID); - + + Realf checksum = 0.0; #pragma omp for for(unsigned int cell = 0; cellget_number_of_velocity_blocks(popID); ++cell) { + checksum += receiveBuffers[c][cell]; blockData[cell] += receiveBuffers[c][cell]; } - - spatial_cell->neighbor_number_of_blocks.clear(); - spatial_cell->neighbor_block_data.clear(); + receive_cells_sums.push_back(checksum); } // send cell data is set to zero. This is to avoid double copy if @@ -1477,9 +1494,6 @@ void update_remote_mapping_contribution( // copy received target data to temporary array where target data is stored. blockData[cell] = 0; } - - spatial_cell->neighbor_number_of_blocks.clear(); - spatial_cell->neighbor_block_data.clear(); } } From 3873a74ece36e23add93908084e0143131f5cbe9 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Tue, 27 Nov 2018 10:22:15 +0200 Subject: [PATCH 143/602] Fixed check_ghost_cells which was not adding the split pencils to the set. Moved debugging prints of pencils to a separate function. Fixed how paths are set in selectNeighbors for cases where some neighbors are local and some are remote. Minor fixes to source cell calculation. --- vlasovsolver/cpu_trans_map_amr.cpp | 376 ++++++++++++----------------- vlasovsolver/cpu_trans_map_amr.hpp | 60 +++-- 2 files changed, 196 insertions(+), 240 deletions(-) diff --git a/vlasovsolver/cpu_trans_map_amr.cpp b/vlasovsolver/cpu_trans_map_amr.cpp index 4faa583d9..a1a06332b 100644 --- a/vlasovsolver/cpu_trans_map_amr.cpp +++ b/vlasovsolver/cpu_trans_map_amr.cpp @@ -71,28 +71,18 @@ void computeSpatialSourceCellsForPencil(const dccrg::Dccrg ids = pencils.getIds(iPencil); - + int neighborhood = getNeighborhood(dimension,2); - // std::cout << "Source cells: "; // Get pointers for each cell id of the pencil for (int i = 0; i < L; ++i) { sourceCells[i + VLASOV_STENCIL_WIDTH] = mpiGrid[ids[i]]; - // std::cout << ids[i] << " "; } - // std::cout << endl; // Insert pointers for neighbors of ids.front() and ids.back() auto* frontNbrPairs = mpiGrid.get_neighbors_of(ids.front(), neighborhood); auto* backNbrPairs = mpiGrid.get_neighbors_of(ids.back(), neighborhood); - // std::cout << "Ghost cells: "; - // std::cout << (*frontNbrPairs)[0].first << ", " << (*frontNbrPairs)[1].first << "; "; - // std::cout << (*backNbrPairs)[VLASOV_STENCIL_WIDTH].first << ", " << (*backNbrPairs)[VLASOV_STENCIL_WIDTH + 1].first; - - const bool printDebug = false; - if(printDebug) std::cout << "Ghost cells of pencil " << iPencil << ": "; - int maxRefLvl = mpiGrid.get_maximum_refinement_level(); int iSrc = 0; @@ -100,61 +90,42 @@ void computeSpatialSourceCellsForPencil(const dccrg::Dccrg distances; for (auto nbrPair : *frontNbrPairs) { if(nbrPair.second[dimension] < 0) { - int distanceInRefinedCells = nbrPair.second[dimension] * (maxRefLvl - nbrPair.second.back() + 2); - distances.insert(distanceInRefinedCells); + distances.insert(nbrPair.second[dimension]); } } // Iterate through distances for VLASOV_STENCIL_WIDTH elements starting from the largest distance. - // Distances are negative here so the largest distance has the smallest value. + // Distances are negative here so largest distance has smallest value auto ibeg = distances.begin(); std::advance(ibeg, distances.size() - VLASOV_STENCIL_WIDTH); for (auto it = ibeg; it != distances.end(); ++it) { // Collect all neighbors at distance *it to a vector std::vector< CellID > neighbors; for (auto nbrPair : *frontNbrPairs) { - int distanceInRefinedCells = nbrPair.second[dimension] * (maxRefLvl - nbrPair.second.back() + 2); + int distanceInRefinedCells = nbrPair.second[dimension]; if(distanceInRefinedCells == *it) neighbors.push_back(nbrPair.first); } - switch (neighbors.size()) { - case 1: - { - // If there's only one cell in the neighbors vector, there's no refinement and we can just add it - sourceCells[iSrc++] = mpiGrid[neighbors.at(0)]; - if(printDebug) std::cout << neighbors.at(0) << " "; - break; - } - - case 4: - { - // If there's four cells in the neighbors vector, select one according to the path of the pencil. - int refLvl = mpiGrid.get_refinement_level(ids.front()); - sourceCells[iSrc++] = mpiGrid[neighbors.at(pencils.path[iPencil][refLvl])]; - if(printDebug) std::cout << neighbors.at(pencils.path[iPencil][refLvl]) << " "; - break; - } - - // In principle, 8,12,16 are also possibilities. Or are they? TODO: Investigate - default: - bailout(true, "Unexpected number of neighbors",__FILE__,__LINE__); - break; + int refLvl = mpiGrid.get_refinement_level(ids.front()); + int path = pencils.path[iPencil][refLvl]; + + if (neighbors.size() == 1) { + sourceCells[iSrc++] = mpiGrid[neighbors.at(0)]; + } else if ( path < neighbors.size() ) { + sourceCells[iSrc++] = mpiGrid[neighbors.at(path)]; } } - if(printDebug) std::cout << "... "; - iSrc = L + VLASOV_STENCIL_WIDTH; distances.clear(); // Create list of unique distances in the positive direction from the last cell in pencil for (auto nbrPair : *backNbrPairs) { if(nbrPair.second[dimension] > 0) { - int distanceInRefinedCells = nbrPair.second[dimension] * (maxRefLvl - nbrPair.second.back() + 2); - distances.insert(distanceInRefinedCells); + distances.insert(nbrPair.second[dimension]); } } - // Iterate through distances for VLASOV_STENCIL_WIDTH (2) elements starting from the smallest distance. + // Iterate through distances for VLASOV_STENCIL_WIDTH elements starting from the smallest distance. // Distances are positive here so smallest distance has smallest value. auto iend = distances.begin(); std::advance(iend,VLASOV_STENCIL_WIDTH); @@ -163,37 +134,20 @@ void computeSpatialSourceCellsForPencil(const dccrg::Dccrg neighbors; for (auto nbrPair : *backNbrPairs) { - int distanceInRefinedCells = nbrPair.second[dimension] * (maxRefLvl - nbrPair.second.back() + 2); + int distanceInRefinedCells = nbrPair.second[dimension]; if(distanceInRefinedCells == *it) neighbors.push_back(nbrPair.first); } - switch (neighbors.size()) { - case 1: - { - // If there's only one cell at distance *it, there's no refinement and we can just add it - sourceCells[iSrc++] = mpiGrid[neighbors.at(0)]; - if(printDebug) std::cout << neighbors.at(0) << " "; - break; - } - - case 4: - { - // If there's four neighbor cells, select one according to the path of the pencil. - int refLvl = mpiGrid.get_refinement_level(ids.back()); - sourceCells[iSrc++] = mpiGrid[neighbors.at(pencils.path[iPencil][refLvl])]; - if(printDebug) std::cout << neighbors.at(pencils.path[iPencil][refLvl]) << " "; - break; - } - - // In principle, 8,12,16 are also possibilities. Or are they? TODO: Investigate - default: - bailout(true, "Unexpected number of neighbors",__FILE__,__LINE__); - break; + int refLvl = mpiGrid.get_refinement_level(ids.back()); + int path = pencils.path[iPencil][refLvl]; + + if (neighbors.size() == 1) { + sourceCells[iSrc++] = mpiGrid[neighbors.at(0)]; + } else if ( path < neighbors.size() ) { + sourceCells[iSrc++] = mpiGrid[neighbors.at(path)]; } } - if(printDebug) std::cout << std::endl; - /*loop to neative side and replace all invalid cells with the closest good cell*/ SpatialCell* lastGoodCell = mpiGrid[ids.front()]; for(int i = -1;i>=-VLASOV_STENCIL_WIDTH;i--){ @@ -299,20 +253,22 @@ CellID selectNeighbor(const dccrg::Dccrg const auto* nbrPairs = grid.get_neighbors_of(id, neighborhood); vector < CellID > myNeighbors; - // Collect neighbor ids in the positive direction of the chosen dimension, - // that are on the same process as the origin. + CellID neighbor = INVALID_CELLID; + + // Iterate through neighbor ids in the positive direction of the chosen dimension, + // select the neighbor indicated by path, if it is local to this process. for (const auto nbrPair : *nbrPairs) { - if (nbrPair.second[dimension] == 1 && grid.is_local(nbrPair.first)) + if (nbrPair.second[dimension] == 1) { myNeighbors.push_back(nbrPair.first); + } } - CellID neighbor = INVALID_CELLID; - - if (myNeighbors.size() == 1) { - neighbor = myNeighbors[0]; - } else if ( path < myNeighbors.size() ) { - neighbor = myNeighbors[path]; - } + int neighborIndex = 0; + if (myNeighbors.size() > 1) + neighborIndex = path; + if (grid.is_local(myNeighbors[neighborIndex])) + neighbor = myNeighbors[neighborIndex]; + // std::cout << "selectNeighbor: path = " << path << " neighbors = "; // for (auto nbr : myNeighbors) std::cout << neighbor << " "; @@ -663,14 +619,14 @@ void getSeedIds(const dccrg::Dccrg& mpiGr //cout << endl; } - // if(debug) { - //cout << "Number of seed ids is " << seedIds.size() << endl; - cout << "Rank " << myRank << ", Seed ids are: "; - for (const auto seedId : seedIds) { - cout << seedId << " "; - } - cout << endl; - // } + if(debug) { + //cout << "Number of seed ids is " << seedIds.size() << endl; + cout << "Rank " << myRank << ", Seed ids are: "; + for (const auto seedId : seedIds) { + cout << seedId << " "; + } + cout << endl; + } } @@ -770,74 +726,117 @@ One neighbor pair, but takes as an argument the offset from the pencil. Call mul Multiple ghost cells. */ void check_ghost_cells(const dccrg::Dccrg& mpiGrid, - vector& pencilSets, + setOfPencils& pencils, uint dimension, int offset) { + const bool debug = false; int neighborhoodId = getNeighborhood(dimension,2); + + int myRank; + if(debug) { + MPI_Comm_rank(MPI_COMM_WORLD,&myRank); + } std::vector idsToSplit; - for (setOfPencils pencils : pencilSets) { - - for (uint pencili = 0; pencili < pencils.N; ++pencili) { + for (uint pencili = 0; pencili < pencils.N; ++pencili) { - if(pencils.periodic[pencili]) continue; + if(pencils.periodic[pencili]) continue; - auto ids = pencils.getIds(pencili); - CellID maxId = *std::max_element(ids.begin(),ids.end()); - int maxRefLvl = mpiGrid.mapping.get_refinement_level(maxId); + auto ids = pencils.getIds(pencili); + + // It is possible that the pencil has already been refined by the pencil building algorithm + // and is on a higher refinement level than the refinement level of any of the cells it contains + // due to e.g. process boundaries. + int maxPencilRefLvl = pencils.path.at(pencili).size(); + int maxNbrRefLvl = 0; - const auto* frontNeighbors = mpiGrid.get_neighbors_of(ids.front(),neighborhoodId); - const auto* backNeighbors = mpiGrid.get_neighbors_of(ids.back() ,neighborhoodId); - int refLvl = 0; + const auto* frontNeighbors = mpiGrid.get_neighbors_of(ids.front(),neighborhoodId); + const auto* backNeighbors = mpiGrid.get_neighbors_of(ids.back() ,neighborhoodId); - for (auto nbrPair: *frontNeighbors) { - if(nbrPair.second[dimension] == -offset) { - refLvl = max(refLvl,mpiGrid.mapping.get_refinement_level(nbrPair.first)); - } + for (auto nbrPair: *frontNeighbors) { + if(nbrPair.second[dimension] == -offset) { + maxNbrRefLvl = max(maxNbrRefLvl,mpiGrid.mapping.get_refinement_level(nbrPair.first)); } + } - for (auto nbrPair: *backNeighbors) { - if(nbrPair.second[dimension] == offset) { - refLvl = max(refLvl,mpiGrid.mapping.get_refinement_level(nbrPair.first)); - } + for (auto nbrPair: *backNeighbors) { + if(nbrPair.second[dimension] == offset) { + maxNbrRefLvl = max(maxNbrRefLvl,mpiGrid.get_refinement_level(nbrPair.first)); } + } - if (refLvl > maxRefLvl) { - //std::cout << "Found refinement level " << refLvl << " in one of the ghost cells. Splitting pencil " << pencili << endl; - // Let's avoid modifying pencils while we are looping over it. Write down the indices of pencils - // that need to be split and split them later. - idsToSplit.push_back(pencili); + if (maxNbrRefLvl > maxPencilRefLvl) { + if(debug) { + std::cout << "I am rank " << myRank << ". "; + std::cout << "Found refinement level " << maxNbrRefLvl << " in one of the ghost cells of pencil " << pencili << ". "; + std::cout << "Highest refinement level in this pencil is " << maxPencilRefLvl; + std::cout << ". Splitting pencil " << pencili << endl; } + // Let's avoid modifying pencils while we are looping over it. Write down the indices of pencils + // that need to be split and split them later. + idsToSplit.push_back(pencili); } + } - for (auto pencili: idsToSplit) { - + for (auto pencili: idsToSplit) { - Realv dx = 0.0; - Realv dy = 0.0; - // TODO: Double-check that this gives you the right dimensions! - auto ids = pencils.getIds(pencili); - switch(dimension) { - case 0: - dx = mpiGrid[ids[0]]->SpatialCell::parameters[CellParams::DY]; - dy = mpiGrid[ids[0]]->SpatialCell::parameters[CellParams::DZ]; - break; - case 1: - dx = mpiGrid[ids[0]]->SpatialCell::parameters[CellParams::DX]; - dy = mpiGrid[ids[0]]->SpatialCell::parameters[CellParams::DZ]; - break; - case 2: - dx = mpiGrid[ids[0]]->SpatialCell::parameters[CellParams::DX]; - dy = mpiGrid[ids[0]]->SpatialCell::parameters[CellParams::DY]; - break; - } + Realv dx = 0.0; + Realv dy = 0.0; + // TODO: Double-check that this gives you the right dimensions! + auto ids = pencils.getIds(pencili); + switch(dimension) { + case 0: + dx = mpiGrid[ids[0]]->SpatialCell::parameters[CellParams::DY]; + dy = mpiGrid[ids[0]]->SpatialCell::parameters[CellParams::DZ]; + break; + case 1: + dx = mpiGrid[ids[0]]->SpatialCell::parameters[CellParams::DX]; + dy = mpiGrid[ids[0]]->SpatialCell::parameters[CellParams::DZ]; + break; + case 2: + dx = mpiGrid[ids[0]]->SpatialCell::parameters[CellParams::DX]; + dy = mpiGrid[ids[0]]->SpatialCell::parameters[CellParams::DY]; + break; + } - pencils.split(pencili,dx,dy); + pencils.split(pencili,dx,dy); + } +} + +void printPencilsFunc(const setOfPencils& pencils, const uint dimension, const int myRank) { + + // Print out ids of pencils (if needed for debugging) + uint ibeg = 0; + uint iend = 0; + std::cout << "I am rank " << myRank << ", I have " << pencils.N << " pencils along dimension " << dimension << ":\n"; + MPI_Barrier(MPI_COMM_WORLD); + if(myRank == MASTER_RANK) { + std::cout << "N, mpirank, (x, y): indices {path} " << std::endl; + std::cout << "-----------------------------------------------------------------" << std::endl; + } + MPI_Barrier(MPI_COMM_WORLD); + for (uint i = 0; i < pencils.N; i++) { + iend += pencils.lengthOfPencils[i]; + std::cout << i << ", "; + std::cout << myRank << ", "; + std::cout << "(" << pencils.x[i] << ", " << pencils.y[i] << "): "; + for (auto j = pencils.ids.begin() + ibeg; j != pencils.ids.begin() + iend; ++j) { + std::cout << *j << " "; + } + ibeg = iend; + + std::cout << "{"; + for (auto step : pencils.path[i]) { + std::cout << step << ", "; } + std::cout << "}"; + + std::cout << std::endl; } + } bool trans_map_1d_amr(const dccrg::Dccrg& mpiGrid, @@ -921,10 +920,6 @@ bool trans_map_1d_amr(const dccrg::Dccrg& // **************************************************************************** // compute pencils => set of pencils (shared datastructure) - - // std::cout << "LocalPropagatedCells: "; - // for (const auto id : localPropagatedCells) std::cout << id << " "; - // std::cout << endl; vector seedIds; getSeedIds(mpiGrid, localPropagatedCells, dimension, seedIds); @@ -945,100 +940,28 @@ bool trans_map_1d_amr(const dccrg::Dccrg& // Construct pencils from the seedIds into a set of pencils. pencils = buildPencilsWithNeighbors(mpiGrid, pencils, seedId, ids, dimension, path); } - + if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; - - // Print out ids of pencils (if needed for debugging) - if (printPencils) { - uint ibeg = 0; - uint iend = 0; - std::cout << "I am rank " << myRank << ", I have created " << pencils.N << " pencils along dimension " << dimension << ":\n"; - MPI_Barrier(MPI_COMM_WORLD); - if(myRank == MASTER_RANK) { - std::cout << "N, mpirank, (x, y): indices {path} " << std::endl; - std::cout << "-----------------------------------------------------------------" << std::endl; - } - MPI_Barrier(MPI_COMM_WORLD); - for (uint i = 0; i < pencils.N; i++) { - iend += pencils.lengthOfPencils[i]; - std::cout << i << ", "; - std::cout << myRank << ", "; - std::cout << "(" << pencils.x[i] << ", " << pencils.y[i] << "): "; - for (auto j = pencils.ids.begin() + ibeg; j != pencils.ids.begin() + iend; ++j) { - std::cout << *j << " "; - } - ibeg = iend; - - std::cout << "{"; - for (auto step : pencils.path[i]) { - std::cout << step << ", "; - } - std::cout << "}"; - - std::cout << std::endl; - } - - // CellID idX = 114; - // const auto* neighborsX = mpiGrid.get_neighbors_of(idX, VLASOV_SOLVER_X_NEIGHBORHOOD_ID); - // if (neighborsX != NULL) { - // std::cout << "Neighbors of cell " << idX << " in x dimension" << std::endl; - // for (auto neighbor : *neighborsX) { - // std::cout << neighbor.first << ", "; - // for (int n = 0; n < 4; ++n) { - // std::cout << neighbor.second[n] << " "; - // } - // std::cout << std::endl; - // } - // } - - // CellID idY = 114; - // const auto* neighborsY = mpiGrid.get_neighbors_of(idY, VLASOV_SOLVER_Y_NEIGHBORHOOD_ID); - // if (neighborsY != NULL) { - // std::cout << "Neighbors of cell " << idY << " in y dimension" << std::endl; - // for (auto neighbor : *neighborsY) { - // std::cout << neighbor.first << ", "; - // for (int n = 0; n < 4; ++n) { - // std::cout << neighbor.second[n] << " "; - // } - // std::cout << std::endl; - // } - // } - - // CellID idZ = 114; - // const auto* neighborsZ = mpiGrid.get_neighbors_of(idZ, VLASOV_SOLVER_Z_NEIGHBORHOOD_ID); - // if (neighborsZ != NULL) { - // std::cout << "Neighbors of cell " << idZ << " in z dimension" << std::endl; - // for (auto neighbor : *neighborsZ) { - // std::cout << neighbor.first << ", "; - // for (int n = 0; n < 4; ++n) { - // std::cout << neighbor.second[n] << " "; - // } - // std::cout << std::endl; - // } - // } + + // Check refinement of two ghost cells on each end of each pencil + for (int offset = 1; offset <= VLASOV_STENCIL_WIDTH; ++offset) { + check_ghost_cells(mpiGrid,pencils,dimension,offset); } + // **************************************************************************** + + if(printPencils) printPencilsFunc(pencils,dimension,myRank); // Add the final set of pencils to the pencilSets - vector. // Only one set is created for now but we retain support for multiple sets pencilSets.push_back(pencils); - - // Check refinement of two ghost cells on each end of each pencil - for (int offset = 1; offset <= VLASOV_STENCIL_WIDTH; ++offset) { - check_ghost_cells(mpiGrid,pencilSets,dimension,offset); - } - // **************************************************************************** - + if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; const uint8_t VMESH_REFLEVEL = 0; // Get a pointer to the velocity mesh of the first spatial cell const vmesh::VelocityMesh& vmesh = allCellsPointer[0]->get_velocity_mesh(popID); - - // set cell size in dimension direction - dvz = vmesh.getCellSize(VMESH_REFLEVEL)[dimension]; - vz_min = vmesh.getMeshMinLimits()[dimension]; - + // Get a unique sorted list of blockids that are in any of the // propagated cells. First use set for this, then add to vector (may not // be the most nice way to do this and in any case we could do it along @@ -1118,7 +1041,6 @@ bool trans_map_1d_amr(const dccrg::Dccrg& uint totalTargetLength = 0; for(uint pencili = 0; pencili < pencils.N; ++pencili){ - vector pencilIds = pencils.getIds(pencili); int L = pencils.lengthOfPencils[pencili]; uint targetLength = L + 2; uint sourceLength = L + 2 * VLASOV_STENCIL_WIDTH; @@ -1129,6 +1051,12 @@ bool trans_map_1d_amr(const dccrg::Dccrg& computeSpatialSourceCellsForPencil(mpiGrid, pencils, pencili, dimension, sourceCells.data()); + std::cout << "Source cells for pencil " << pencili << ", rank " << myRank << ": "; + for (auto cell : sourceCells) { + std::cout << cell->parameters[CellParams::CELLID] << " "; + } + std::cout << std::endl; + if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; Vec dz[sourceCells.size()]; @@ -1396,14 +1324,18 @@ void update_remote_mapping_contribution( ccell->neighbor_block_data.push_back(pcell->get_data(popID)); ccell->neighbor_number_of_blocks.push_back(pcell->get_number_of_velocity_blocks(popID)); send_cells.push_back(nbr); - send_origin_cells.push_back(local_cells[c]); - - } else { - ccell->neighbor_block_data.push_back(ccell->get_data(popID)); - ccell->neighbor_number_of_blocks.push_back(0); + send_origin_cells.push_back(local_cells[c]); } } + // Default values, after the loop. + if(ccell->neighbor_block_data.empty()) { + ccell->neighbor_block_data.push_back(ccell->get_data(popID)); + } + if(ccell->neighbor_number_of_blocks.empty()) { + ccell->neighbor_number_of_blocks.push_back(0); + } + for (auto nbr : n_nbrs) { SpatialCell *ncell = NULL; diff --git a/vlasovsolver/cpu_trans_map_amr.hpp b/vlasovsolver/cpu_trans_map_amr.hpp index 1824be354..321c7a87b 100644 --- a/vlasovsolver/cpu_trans_map_amr.hpp +++ b/vlasovsolver/cpu_trans_map_amr.hpp @@ -79,27 +79,51 @@ struct setOfPencils { // Split one pencil into four pencils covering the same space. // dx and dy are the dimensions of the original pencil. - void split(const uint pencilId, const Realv dx, const Realv dy) { + void split(const uint myPencilId, const Realv dx, const Realv dy) { - auto ids = getIds(pencilId); - auto path1 = path.at(pencilId); - auto path2 = path.at(pencilId); - auto path3 = path.at(pencilId); - - path1.push_back(1); - path2.push_back(2); - path3.push_back(3); - - x[pencilId] -= 0.25 * dx; - y[pencilId] += 0.25 * dy; - path.at(pencilId).push_back(0); + auto ids = getIds(myPencilId); - addPencil(ids, x[pencilId] + 0.25 * dx, y[pencilId] + 0.25 * dy, periodic[pencilId], path1); - addPencil(ids, x[pencilId] - 0.25 * dx, y[pencilId] - 0.25 * dy, periodic[pencilId], path2); - addPencil(ids, x[pencilId] + 0.25 * dx, y[pencilId] - 0.25 * dy, periodic[pencilId], path3); - - } + x[myPencilId] -= 0.25 * dx; + y[myPencilId] += 0.25 * dy; + + + // Find paths that members of this pencil may have in other pencils (can happen) + // so that we don't add duplicates. + std::vector existingSteps; + for (int theirPencilId = 0; theirPencilId < N; ++theirPencilId) { + if(theirPencilId == myPencilId) continue; + auto theirIds = getIds(theirPencilId); + for (auto theirId : theirIds) { + for (auto myId : ids) { + if (myId == theirId) { + auto theirPath = path.at(theirPencilId); + auto myPath = path.at(myPencilId); + auto theirStep = theirPath.at(myPath.size()); + existingSteps.push_back(theirStep); + } + } + } + } + bool firstPencil = true; + const auto copy_of_path = path.at(myPencilId); + + // Add those pencils whose steps dont already exist in the pencils struct + for (int step = 0; step < 4; ++step) { + if (std::any_of(existingSteps.begin(), existingSteps.end(), [step](int i){return step == i;})) { + continue; + } + + if(firstPencil) { + path.at(myPencilId).push_back(step); + firstPencil = false; + } else { + auto myPath = copy_of_path; + myPath.push_back(step); + addPencil(ids, x.at(myPencilId) + 0.25 * dx, y.at(myPencilId) + 0.25 * dy, periodic.at(myPencilId), myPath); + } + } + } }; void compute_spatial_source_cells_for_pencil(const dccrg::Dccrg& mpiGrid, From 338bcf73d4e6e49fd072e4d914e9948cc18184d4 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Tue, 27 Nov 2018 14:44:28 +0200 Subject: [PATCH 144/602] Similar modification to spatialTarget calculation as was done to the spatialSource calculation in the previous commit. --- vlasovsolver/cpu_trans_map_amr.cpp | 56 +++++++++++++----------------- 1 file changed, 25 insertions(+), 31 deletions(-) diff --git a/vlasovsolver/cpu_trans_map_amr.cpp b/vlasovsolver/cpu_trans_map_amr.cpp index a1a06332b..dd79942a1 100644 --- a/vlasovsolver/cpu_trans_map_amr.cpp +++ b/vlasovsolver/cpu_trans_map_amr.cpp @@ -211,33 +211,23 @@ void computeSpatialTargetCellsForPencils(const dccrg::Dccrg& const Realv dt, const uint popID) { - const bool printPencils = true; + const bool printPencils = false; const bool printLines = false; Realv dvz,vz_min; uint cell_indices_to_id[3]; /*< used when computing id of target cell in block*/ @@ -1040,7 +1030,7 @@ bool trans_map_1d_amr(const dccrg::Dccrg& // Loop over pencils uint totalTargetLength = 0; for(uint pencili = 0; pencili < pencils.N; ++pencili){ - + int L = pencils.lengthOfPencils[pencili]; uint targetLength = L + 2; uint sourceLength = L + 2 * VLASOV_STENCIL_WIDTH; @@ -1051,11 +1041,11 @@ bool trans_map_1d_amr(const dccrg::Dccrg& computeSpatialSourceCellsForPencil(mpiGrid, pencils, pencili, dimension, sourceCells.data()); - std::cout << "Source cells for pencil " << pencili << ", rank " << myRank << ": "; - for (auto cell : sourceCells) { - std::cout << cell->parameters[CellParams::CELLID] << " "; - } - std::cout << std::endl; + // std::cout << "Source cells for pencil " << pencili << ", rank " << myRank << ": "; + // for (auto cell : sourceCells) { + // std::cout << cell->parameters[CellParams::CELLID] << " "; + // } + // std::cout << std::endl; if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; @@ -1095,9 +1085,11 @@ bool trans_map_1d_amr(const dccrg::Dccrg& propagatePencil(dz, sourceVecData, dimension, blockGID, dt, vmesh, L); if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; - + + //std::cout << "Target cells for pencil " << pencili << ", rank " << myRank << ": "; // sourcedata => targetdata[this pencil]) for (uint i = 0; i < targetLength; i++) { + //std::cout << targetCells[i + totalTargetLength]->parameters[CellParams::CELLID] << " "; for (uint k=0; k& } } } + //std::cout << std::endl; totalTargetLength += targetLength; // dealloc source data -- Should be automatic since it's declared in this loop iteration? } // reset blocks in all non-sysboundary neighbor spatial cells for this block id // At this point the data is saved in targetVecData so we can reset the spatial cells + for (auto *spatial_cell: targetCells) { // Check for system boundary if(spatial_cell->sysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY) { From 9f3b137dd9f4a2593e52abe0395255b5538b34e7 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Tue, 27 Nov 2018 15:40:58 +0200 Subject: [PATCH 145/602] Calculation of areaRatio in trans_map_1d_amr was using the highest refinement level within the cells of the pencil as a measure of the cell volume covered by the pencil. This was not correct in the edge case where remote neighbors determine the pencil size. --- vlasovsolver/cpu_trans_map_amr.cpp | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/vlasovsolver/cpu_trans_map_amr.cpp b/vlasovsolver/cpu_trans_map_amr.cpp index dd79942a1..db033fc0b 100644 --- a/vlasovsolver/cpu_trans_map_amr.cpp +++ b/vlasovsolver/cpu_trans_map_amr.cpp @@ -1142,7 +1142,8 @@ bool trans_map_1d_amr(const dccrg::Dccrg& int maxRefLvl = 0; int minRefLvl = mpiGrid.get_maximum_refinement_level(); for (auto id : pencilIds) { - int refLvl = mpiGrid.get_refinement_level(id); + //int refLvl = mpiGrid.get_refinement_level(id); + int refLvl = pencils.path[pencili].size(); maxRefLvl = max(maxRefLvl,refLvl); minRefLvl = min(minRefLvl,refLvl); } @@ -1198,7 +1199,7 @@ bool trans_map_1d_amr(const dccrg::Dccrg& if (spatial_cell->parameters[CellParams::REFINEMENT_LEVEL] < maxRefLvl) { areaRatio = 1.0 / pow(pow(2, maxRefLvl - spatial_cell->parameters[CellParams::REFINEMENT_LEVEL]), 2); } - + for(int i = 0; i < WID3 ; i++) { blockData[i] += targetBlockData[GID * WID3 + i] * areaRatio; } From e002270faf3144907b3548e193dd5fa990972674 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Wed, 28 Nov 2018 15:03:38 +0200 Subject: [PATCH 146/602] Added private temporary variables for neighbor_number_of_blocks and neighbor_block_data for clarity. Also clarified velocity space loop index variables. --- vlasovsolver/cpu_trans_map_amr.cpp | 26 +++++++++++++++----------- 1 file changed, 15 insertions(+), 11 deletions(-) diff --git a/vlasovsolver/cpu_trans_map_amr.cpp b/vlasovsolver/cpu_trans_map_amr.cpp index db033fc0b..23fe229cd 100644 --- a/vlasovsolver/cpu_trans_map_amr.cpp +++ b/vlasovsolver/cpu_trans_map_amr.cpp @@ -1086,10 +1086,10 @@ bool trans_map_1d_amr(const dccrg::Dccrg& if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; - //std::cout << "Target cells for pencil " << pencili << ", rank " << myRank << ": "; + if (printPencils) std::cout << "Target cells for pencil " << pencili << ", rank " << myRank << ": "; // sourcedata => targetdata[this pencil]) for (uint i = 0; i < targetLength; i++) { - //std::cout << targetCells[i + totalTargetLength]->parameters[CellParams::CELLID] << " "; + if (printPencils) std::cout << targetCells[i + totalTargetLength]->parameters[CellParams::CELLID] << " "; for (uint k=0; k& } } } - //std::cout << std::endl; + if (printPencils) std::cout << std::endl; totalTargetLength += targetLength; // dealloc source data -- Should be automatic since it's declared in this loop iteration? } @@ -1342,13 +1342,17 @@ void update_remote_mapping_contribution( //Receive data that ncell mapped to this local cell //data array, if 1) m is a valid source cell, 2) center cell is to be updated (normal cell) 3) m is remote //we will here allocate a receive buffer, since we need to aggregate values - ncell->neighbor_number_of_blocks.push_back(ccell->get_number_of_velocity_blocks(popID)); - ncell->neighbor_block_data.push_back((Realf*) aligned_malloc(ncell->neighbor_number_of_blocks.back() * WID3 * sizeof(Realf), 64)); + + vmesh::LocalID neighbor_number_of_blocks = ccell->get_number_of_velocity_blocks(popID); + Realf* neighbor_block_data_pointer = (Realf*) aligned_malloc(neighbor_number_of_blocks * WID3 * sizeof(Realf), 64); + + ncell->neighbor_number_of_blocks.push_back(neighbor_number_of_blocks); + ncell->neighbor_block_data.push_back(neighbor_block_data_pointer); receive_cells.push_back(local_cells[c]); receive_origin_cells.push_back(nbr); - receiveBuffers.push_back(ncell->neighbor_block_data.back()); + receiveBuffers.push_back(neighbor_block_data_pointer); } else { ncell->neighbor_block_data.push_back(ncell->get_data(popID)); @@ -1402,9 +1406,9 @@ void update_remote_mapping_contribution( Realf checksum = 0.0; #pragma omp for - for(unsigned int cell = 0; cellget_number_of_velocity_blocks(popID); ++cell) { - checksum += receiveBuffers[c][cell]; - blockData[cell] += receiveBuffers[c][cell]; + for(unsigned int vCell = 0; vCellget_number_of_velocity_blocks(popID); ++vCell) { + checksum += receiveBuffers[c][vCell]; + blockData[vCell] += receiveBuffers[c][vCell]; } receive_cells_sums.push_back(checksum); } @@ -1417,9 +1421,9 @@ void update_remote_mapping_contribution( Realf * blockData = spatial_cell->get_data(popID); #pragma omp for nowait - for(unsigned int cell = 0; cell< VELOCITY_BLOCK_LENGTH * spatial_cell->get_number_of_velocity_blocks(popID); ++cell) { + for(unsigned int vCell = 0; vCell< VELOCITY_BLOCK_LENGTH * spatial_cell->get_number_of_velocity_blocks(popID); ++vCell) { // copy received target data to temporary array where target data is stored. - blockData[cell] = 0; + blockData[vCell] = 0; } } } From 1e935191c3c7cd13142a70fdfebdebea389ff424 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Thu, 29 Nov 2018 12:02:41 +0200 Subject: [PATCH 147/602] Cleanup, mostly cosmetic. --- vlasovsolver/cpu_trans_map_amr.cpp | 119 +++++++++++------------------ 1 file changed, 46 insertions(+), 73 deletions(-) diff --git a/vlasovsolver/cpu_trans_map_amr.cpp b/vlasovsolver/cpu_trans_map_amr.cpp index 23fe229cd..03e4e3a6e 100644 --- a/vlasovsolver/cpu_trans_map_amr.cpp +++ b/vlasovsolver/cpu_trans_map_amr.cpp @@ -1245,52 +1245,42 @@ void update_remote_mapping_contribution( int myRank; const bool printLines = false; - MPI_Comm_rank(MPI_COMM_WORLD,&myRank); + if(printLines) MPI_Comm_rank(MPI_COMM_WORLD,&myRank); if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; //normalize if(direction > 0) direction = 1; if(direction < 0) direction = -1; - for (size_t c=0; cneighbor_block_data.clear(); ccell->neighbor_number_of_blocks.clear(); - - //ccell->neighbor_block_data.push_back(ccell->get_data(popID)); - //ccell->neighbor_number_of_blocks.push_back(0); } - if(printLines) std::cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << std::endl; - - //TODO: prepare arrays, make parallel by avoidin push_back and by checking also for other stuff - for (size_t c = 0; c < local_cells.size(); ++c) { - - SpatialCell *ccell = mpiGrid[local_cells[c]]; + for (auto c : local_cells) { + SpatialCell *ccell = mpiGrid[c]; // Initialize to empty vectors, add default values at the end. ccell->neighbor_block_data.clear(); ccell->neighbor_number_of_blocks.clear(); - CellID p_ngbr,m_ngbr; + } - int neighborhood = 0; - switch (dimension) { - case 0: - neighborhood = VLASOV_SOLVER_TARGET_X_NEIGHBORHOOD_ID; - break; - case 1: - neighborhood = VLASOV_SOLVER_TARGET_Y_NEIGHBORHOOD_ID; - break; - case 2: - neighborhood = VLASOV_SOLVER_TARGET_Z_NEIGHBORHOOD_ID; - break; - } - auto nbrPairVector = mpiGrid.get_neighbors_of(local_cells[c], neighborhood); + if(printLines) std::cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << std::endl; + + for (auto c : local_cells) { + + SpatialCell *ccell = mpiGrid[c]; + + int neighborhood = getNeighborhood(dimension,1); + auto* nbrPairVector = mpiGrid.get_neighbors_of(c, neighborhood); if (all_of(nbrPairVector->begin(), nbrPairVector->end(), [mpiGrid](pair > p){return mpiGrid.is_local(p.first);})) { continue; } + // Initialize to empty vectors, add default values at the end. vector n_nbrs; vector p_nbrs; @@ -1308,66 +1298,50 @@ void update_remote_mapping_contribution( for (auto nbr : p_nbrs) { - SpatialCell *pcell = NULL; - if (nbr != INVALID_CELLID) { - pcell = mpiGrid[nbr]; - } - if (nbr != INVALID_CELLID && !mpiGrid.is_local(nbr) && do_translate_cell(ccell) && - pcell->sysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY) { - //Send data in nbr target array that we just mapped to if 1) it is a valid target, - //2) is remote cell, 3) the source cell in center was translated, 4) it is not a boundary cell? - ccell->neighbor_block_data.push_back(pcell->get_data(popID)); - ccell->neighbor_number_of_blocks.push_back(pcell->get_number_of_velocity_blocks(popID)); - send_cells.push_back(nbr); - send_origin_cells.push_back(local_cells[c]); + if (nbr != INVALID_CELLID && !mpiGrid.is_local(nbr) && do_translate_cell(ccell)) { + SpatialCell *pcell = mpiGrid[nbr]; + if(pcell->sysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY) { + //Send data in nbr target array that we just mapped to if 1) it is a valid target, + //2) is remote cell, 3) the source cell in center was translated, 4) it is not a boundary cell? + + ccell->neighbor_block_data.push_back(pcell->get_data(popID)); + ccell->neighbor_number_of_blocks.push_back(pcell->get_number_of_velocity_blocks(popID)); + send_cells.push_back(nbr); + send_origin_cells.push_back(c); + } } } // Default values, after the loop. - if(ccell->neighbor_block_data.empty()) { + if(ccell->neighbor_block_data.empty() && ccell->neighbor_number_of_blocks.empty()) { ccell->neighbor_block_data.push_back(ccell->get_data(popID)); - } - if(ccell->neighbor_number_of_blocks.empty()) { ccell->neighbor_number_of_blocks.push_back(0); } for (auto nbr : n_nbrs) { - SpatialCell *ncell = NULL; - if (nbr != INVALID_CELLID) { - ncell = mpiGrid[nbr]; - } if (nbr != INVALID_CELLID && !mpiGrid.is_local(nbr) && - ccell->sysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY) { - //Receive data that ncell mapped to this local cell - //data array, if 1) m is a valid source cell, 2) center cell is to be updated (normal cell) 3) m is remote + ccell->sysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY) { + //Receive data that ncell mapped to this local cell data array, + //if 1) ncell is a valid source cell, 2) center cell is to be updated (normal cell) 3) ncell is remote //we will here allocate a receive buffer, since we need to aggregate values + SpatialCell *ncell = mpiGrid[nbr]; + vmesh::LocalID neighbor_number_of_blocks = ccell->get_number_of_velocity_blocks(popID); Realf* neighbor_block_data_pointer = (Realf*) aligned_malloc(neighbor_number_of_blocks * WID3 * sizeof(Realf), 64); ncell->neighbor_number_of_blocks.push_back(neighbor_number_of_blocks); ncell->neighbor_block_data.push_back(neighbor_block_data_pointer); - receive_cells.push_back(local_cells[c]); - receive_origin_cells.push_back(nbr); - + receive_cells.push_back(c); receiveBuffers.push_back(neighbor_block_data_pointer); - } else { - ncell->neighbor_block_data.push_back(ncell->get_data(popID)); - ncell->neighbor_number_of_blocks.push_back(0); + // For debugging + receive_origin_cells.push_back(nbr); + } } - - //default values, to avoid any extra sends and receives - if(ccell->neighbor_number_of_blocks.empty()) { - ccell->neighbor_number_of_blocks.push_back(0); - } - if(ccell->neighbor_block_data.empty()) { - ccell->neighbor_block_data.push_back(ccell->get_data(popID)); - } - } MPI_Barrier(MPI_COMM_WORLD); @@ -1395,18 +1369,18 @@ void update_remote_mapping_contribution( if(printLines) std::cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << std::endl; -#pragma omp parallel + //#pragma omp parallel { std::vector receive_cells_sums; //reduce data: sum received data in the data array to // the target grid in the temporary block container - for (size_t c=0; c < receive_cells.size(); ++c) { + for (size_t c = 0; c < receive_cells.size(); ++c) { SpatialCell* spatial_cell = mpiGrid[receive_cells[c]]; Realf *blockData = spatial_cell->get_data(popID); Realf checksum = 0.0; -#pragma omp for - for(unsigned int vCell = 0; vCellget_number_of_velocity_blocks(popID); ++vCell) { + //#pragma omp for + for(unsigned int vCell = 0; vCell < VELOCITY_BLOCK_LENGTH * spatial_cell->get_number_of_velocity_blocks(popID); ++vCell) { checksum += receiveBuffers[c][vCell]; blockData[vCell] += receiveBuffers[c][vCell]; } @@ -1414,14 +1388,13 @@ void update_remote_mapping_contribution( } // send cell data is set to zero. This is to avoid double copy if - // one cell is the neighbor on bot + and - side to the same - // process - for (size_t c=0; cget_data(popID); -#pragma omp for nowait - for(unsigned int vCell = 0; vCell< VELOCITY_BLOCK_LENGTH * spatial_cell->get_number_of_velocity_blocks(popID); ++vCell) { + //#pragma omp for nowait + for(unsigned int vCell = 0; vCell < VELOCITY_BLOCK_LENGTH * spatial_cell->get_number_of_velocity_blocks(popID); ++vCell) { // copy received target data to temporary array where target data is stored. blockData[vCell] = 0; } From 04392a5df2ead628b09200e70c89d972fc1012a6 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Thu, 29 Nov 2018 15:01:03 +0200 Subject: [PATCH 148/602] Bug sort-of fixed in update_remote_mapping_contribution. Receivebuffer values have been scaled by the number of cells the target cell is receiving from. This seems to work. --- vlasovsolver/cpu_trans_map_amr.cpp | 10 +++++++--- vlasovsolver/cpu_trans_map_amr.hpp | 1 + 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/vlasovsolver/cpu_trans_map_amr.cpp b/vlasovsolver/cpu_trans_map_amr.cpp index 03e4e3a6e..964e34217 100644 --- a/vlasovsolver/cpu_trans_map_amr.cpp +++ b/vlasovsolver/cpu_trans_map_amr.cpp @@ -1370,7 +1370,7 @@ void update_remote_mapping_contribution( if(printLines) std::cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << std::endl; //#pragma omp parallel - { + //{ std::vector receive_cells_sums; //reduce data: sum received data in the data array to // the target grid in the temporary block container @@ -1379,10 +1379,13 @@ void update_remote_mapping_contribution( Realf *blockData = spatial_cell->get_data(popID); Realf checksum = 0.0; + + int numReceiveCells = count(receive_cells.begin(), receive_cells.end(), receive_cells[c]); + //#pragma omp for for(unsigned int vCell = 0; vCell < VELOCITY_BLOCK_LENGTH * spatial_cell->get_number_of_velocity_blocks(popID); ++vCell) { checksum += receiveBuffers[c][vCell]; - blockData[vCell] += receiveBuffers[c][vCell]; + blockData[vCell] += receiveBuffers[c][vCell] / numReceiveCells; } receive_cells_sums.push_back(checksum); } @@ -1399,8 +1402,9 @@ void update_remote_mapping_contribution( blockData[vCell] = 0; } } - } + //} + MPI_Barrier(MPI_COMM_WORLD); if(printLines) std::cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << std::endl; //and finally free temporary receive buffer diff --git a/vlasovsolver/cpu_trans_map_amr.hpp b/vlasovsolver/cpu_trans_map_amr.hpp index 321c7a87b..e3600cca2 100644 --- a/vlasovsolver/cpu_trans_map_amr.hpp +++ b/vlasovsolver/cpu_trans_map_amr.hpp @@ -115,6 +115,7 @@ struct setOfPencils { } if(firstPencil) { + //TODO: set x and y correctly. Right now they are not used anywhere. path.at(myPencilId).push_back(step); firstPencil = false; } else { From 9bd2f12ebc5a5db1cf0ad7c3a43a69d66f835fd2 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Thu, 29 Nov 2018 15:08:56 +0200 Subject: [PATCH 149/602] Added comments --- vlasovsolver/vlasovmover.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/vlasovsolver/vlasovmover.cpp b/vlasovsolver/vlasovmover.cpp index aaa634c7f..ec44accea 100644 --- a/vlasovsolver/vlasovmover.cpp +++ b/vlasovsolver/vlasovmover.cpp @@ -166,9 +166,9 @@ void calculateSpatialTranslation( } if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; - + + // MPI_Barrier(MPI_COMM_WORLD); // bailout(true, "", __FILE__, __LINE__); - // throw; } /*! From 59d5e80228921f23a91408df9f3fab8ca4c5a159 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Mon, 3 Dec 2018 11:46:31 +0200 Subject: [PATCH 150/602] Changed neighborhood for update of ioLocalCellId from NEAREST to FULL --- iowrite.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/iowrite.cpp b/iowrite.cpp index a84281d58..0b262684b 100644 --- a/iowrite.cpp +++ b/iowrite.cpp @@ -80,7 +80,7 @@ bool updateLocalIds( dccrg::Dccrg& mpiGr } //Update the local ids (let the other processes know they've been updated) SpatialCell::set_mpi_transfer_type(Transfer::CELL_IOLOCALCELLID); - mpiGrid.update_copies_of_remote_neighbors(NEAREST_NEIGHBORHOOD_ID); + mpiGrid.update_copies_of_remote_neighbors(FULL_NEIGHBORHOOD_ID); return true; } From af123941b97ffea32ac6d501fc24e3f3025e702e Mon Sep 17 00:00:00 2001 From: tkoskela Date: Mon, 3 Dec 2018 11:47:35 +0200 Subject: [PATCH 151/602] Changed vector fields of spatial_cell neighbor_block_data and neighbor_number_of_blocks to static arrays of size 4 to avoid communication hiccups. --- definitions.h | 3 + spatial_cell.cpp | 6 +- spatial_cell.hpp | 4 +- vlasovsolver/cpu_trans_map_amr.cpp | 96 ++++++++++++++++++++---------- 4 files changed, 73 insertions(+), 36 deletions(-) diff --git a/definitions.h b/definitions.h index 96cb1a392..8f8308494 100644 --- a/definitions.h +++ b/definitions.h @@ -129,4 +129,7 @@ typedef Realf (*AmrVelRefinement)(const Realf* velBlock); #define VLASOV_STENCIL_WIDTH 3 #endif +// Max number of face neighbors per dimension with AMR +#define MAX_FACE_NEIGHBORS_PER_DIM 4 + #endif diff --git a/spatial_cell.cpp b/spatial_cell.cpp index c416bdd6e..4ee18d2c6 100644 --- a/spatial_cell.cpp +++ b/spatial_cell.cpp @@ -659,7 +659,7 @@ namespace spatial_cell { * neighbor. The values of neighbor_block_data * and neighbor_number_of_blocks should be set in * solver.*/ - for ( int i = 0; i < this->neighbor_block_data.size(); ++i) { + for ( int i = 0; i < MAX_FACE_NEIGHBORS_PER_DIM; ++i) { displacements.push_back((uint8_t*) this->neighbor_block_data[i] - (uint8_t*) this); block_lengths.push_back(sizeof(Realf) * VELOCITY_BLOCK_LENGTH* this->neighbor_number_of_blocks[i]); } @@ -838,8 +838,10 @@ namespace spatial_cell { const bool printMpiDatatype = false; if(printMpiDatatype) { int mpiSize; + int myRank; MPI_Type_size(datatype,&mpiSize); - cout << "get_mpi_datatype: " << cellID << " " << sender_rank << " " << receiver_rank << " " << mpiSize << endl; + MPI_Comm_rank(MPI_COMM_WORLD,&myRank); + cout << myRank << " get_mpi_datatype: " << cellID << " " << sender_rank << " " << receiver_rank << " " << mpiSize << endl; } return std::make_tuple(address,count,datatype); diff --git a/spatial_cell.hpp b/spatial_cell.hpp index 083f0c971..b5d343617 100644 --- a/spatial_cell.hpp +++ b/spatial_cell.hpp @@ -333,9 +333,9 @@ namespace spatial_cell { //Realf* neighbor_block_data; /**< Pointers for translation operator. We can point to neighbor // * cell block data. We do not allocate memory for the pointer.*/ //vmesh::LocalID neighbor_number_of_blocks; - std::vector neighbor_block_data; /**< Pointers for translation operator. We can point to neighbor + std::array neighbor_block_data; /**< Pointers for translation operator. We can point to neighbor * cell block data. We do not allocate memory for the pointer.*/ - std::vector neighbor_number_of_blocks; + std::array neighbor_number_of_blocks; uint sysBoundaryFlag; /**< What type of system boundary does the cell belong to. * Enumerated in the sysboundarytype namespace's enum.*/ uint sysBoundaryLayer; /**< Layers counted from closest systemBoundary. If 0 then it has not diff --git a/vlasovsolver/cpu_trans_map_amr.cpp b/vlasovsolver/cpu_trans_map_amr.cpp index 964e34217..7d7e41943 100644 --- a/vlasovsolver/cpu_trans_map_amr.cpp +++ b/vlasovsolver/cpu_trans_map_amr.cpp @@ -1242,7 +1242,10 @@ void update_remote_mapping_contribution( vector send_origin_cells; vector receive_origin_cells; - int myRank; + + std::map i_remote_cells; + + int myRank; const bool printLines = false; if(printLines) MPI_Comm_rank(MPI_COMM_WORLD,&myRank); @@ -1251,29 +1254,34 @@ void update_remote_mapping_contribution( //normalize if(direction > 0) direction = 1; if(direction < 0) direction = -1; + for (auto c : remote_cells) { SpatialCell *ccell = mpiGrid[c]; - // Initialize to empty vectors, add default values at the end. - ccell->neighbor_block_data.clear(); - ccell->neighbor_number_of_blocks.clear(); - } + // Initialize number of blocks to 0 and block data to a default value + for (uint i = 0; i < MAX_FACE_NEIGHBORS_PER_DIM; ++i) { + ccell->neighbor_block_data[i] = ccell->get_data(popID); + ccell->neighbor_number_of_blocks[i] = 0; + } - for (auto c : local_cells) { - SpatialCell *ccell = mpiGrid[c]; - // Initialize to empty vectors, add default values at the end. - ccell->neighbor_block_data.clear(); - ccell->neighbor_number_of_blocks.clear(); + i_remote_cells[c] = 0; } + if(printLines) std::cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << std::endl; - for (auto c : local_cells) { + for (uint c = 0; c < local_cells.size(); ++c) { - SpatialCell *ccell = mpiGrid[c]; - + SpatialCell *ccell = mpiGrid[local_cells[c]]; + + // Initialize number of blocks to 0 and block data to a default value + for (uint i = 0; i < MAX_FACE_NEIGHBORS_PER_DIM; ++i) { + ccell->neighbor_block_data[i] = ccell->get_data(popID); + ccell->neighbor_number_of_blocks[i] = 0; + } + int neighborhood = getNeighborhood(dimension,1); - auto* nbrPairVector = mpiGrid.get_neighbors_of(c, neighborhood); + auto* nbrPairVector = mpiGrid.get_neighbors_of(local_cells[c], neighborhood); if (all_of(nbrPairVector->begin(), nbrPairVector->end(), [mpiGrid](pair > p){return mpiGrid.is_local(p.first);})) { @@ -1296,30 +1304,43 @@ void update_remote_mapping_contribution( } } + uint i_nbr = 0; + // ccell adds a neighbor_block_data block for each neighbor in the positive direction to its local data for (auto nbr : p_nbrs) { + if (i_nbr >= MAX_FACE_NEIGHBORS_PER_DIM) { + std::cout << "Error: neighbor count is greater than 4"; + break; + } + if (nbr != INVALID_CELLID && !mpiGrid.is_local(nbr) && do_translate_cell(ccell)) { SpatialCell *pcell = mpiGrid[nbr]; if(pcell->sysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY) { //Send data in nbr target array that we just mapped to if 1) it is a valid target, //2) is remote cell, 3) the source cell in center was translated, 4) it is not a boundary cell? - ccell->neighbor_block_data.push_back(pcell->get_data(popID)); - ccell->neighbor_number_of_blocks.push_back(pcell->get_number_of_velocity_blocks(popID)); + ccell->neighbor_block_data[i_nbr] = pcell->get_data(popID); + ccell->neighbor_number_of_blocks[i_nbr] = pcell->get_number_of_velocity_blocks(popID); send_cells.push_back(nbr); - send_origin_cells.push_back(c); + send_origin_cells.push_back(local_cells[c]); + i_nbr++; } } } - // Default values, after the loop. - if(ccell->neighbor_block_data.empty() && ccell->neighbor_number_of_blocks.empty()) { - ccell->neighbor_block_data.push_back(ccell->get_data(popID)); - ccell->neighbor_number_of_blocks.push_back(0); - } for (auto nbr : n_nbrs) { - + + auto it = i_remote_cells.find(nbr); + if(it == i_remote_cells.end()) { + i_remote_cells[nbr] = 0; + } + + if (i_remote_cells[nbr] >= MAX_FACE_NEIGHBORS_PER_DIM) { + std::cout << "Error: neighbor count is greater than 4"; + break; + } + if (nbr != INVALID_CELLID && !mpiGrid.is_local(nbr) && ccell->sysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY) { //Receive data that ncell mapped to this local cell data array, @@ -1327,16 +1348,15 @@ void update_remote_mapping_contribution( //we will here allocate a receive buffer, since we need to aggregate values SpatialCell *ncell = mpiGrid[nbr]; + + ncell->neighbor_number_of_blocks[i_remote_cells[nbr]] = ccell->get_number_of_velocity_blocks(popID); + ncell->neighbor_block_data[i_remote_cells[nbr]] = + (Realf*) aligned_malloc(ncell->neighbor_number_of_blocks[i_remote_cells[nbr]] * WID3 * sizeof(Realf), 64); - vmesh::LocalID neighbor_number_of_blocks = ccell->get_number_of_velocity_blocks(popID); - Realf* neighbor_block_data_pointer = (Realf*) aligned_malloc(neighbor_number_of_blocks * WID3 * sizeof(Realf), 64); + receive_cells.push_back(local_cells[c]); + receiveBuffers.push_back(ncell->neighbor_block_data[i_remote_cells[nbr]]); + i_remote_cells[nbr]++; - ncell->neighbor_number_of_blocks.push_back(neighbor_number_of_blocks); - ncell->neighbor_block_data.push_back(neighbor_block_data_pointer); - - receive_cells.push_back(c); - receiveBuffers.push_back(neighbor_block_data_pointer); - // For debugging receive_origin_cells.push_back(nbr); @@ -1344,6 +1364,12 @@ void update_remote_mapping_contribution( } } + std::vector a; + for( auto c : local_cells ) { + a.push_back(mpiGrid[c]->ioLocalCellId); + } + std::cout << a.back() << std::endl; + MPI_Barrier(MPI_COMM_WORLD); if(printLines) std::cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << " " << direction << " " << dimension < b; + for( auto c : local_cells ) { + b.push_back(mpiGrid[c]->ioLocalCellId); + } + std::cout << b.back() << std::endl; + MPI_Barrier(MPI_COMM_WORLD); if(printLines) std::cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << std::endl; @@ -1384,8 +1416,8 @@ void update_remote_mapping_contribution( //#pragma omp for for(unsigned int vCell = 0; vCell < VELOCITY_BLOCK_LENGTH * spatial_cell->get_number_of_velocity_blocks(popID); ++vCell) { - checksum += receiveBuffers[c][vCell]; blockData[vCell] += receiveBuffers[c][vCell] / numReceiveCells; + checksum += blockData[vCell]; } receive_cells_sums.push_back(checksum); } From 3430dc7f2d9eebd2c664c0a1162aefb698c57ed3 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Mon, 3 Dec 2018 15:19:43 +0200 Subject: [PATCH 152/602] Fixes to the previous merge --- grid.cpp | 2 +- spatial_cell.cpp | 44 ++++++++++++++------------------------------ spatial_cell.hpp | 18 ++++++++++++++++++ 3 files changed, 33 insertions(+), 31 deletions(-) diff --git a/grid.cpp b/grid.cpp index b1ee29653..ae9d236f2 100644 --- a/grid.cpp +++ b/grid.cpp @@ -115,7 +115,7 @@ void initializeGrid( mpiGrid.set_initial_length(grid_length) .set_load_balancing_method(&P::loadBalanceAlgorithm[0]) .set_neighborhood_length(neighborhood_size) - .set_maximum_refinement_level(0) + .set_maximum_refinement_level(P::amrMaxSpatialRefLevel) .set_periodic(sysBoundaries.isBoundaryPeriodic(0), sysBoundaries.isBoundaryPeriodic(1), sysBoundaries.isBoundaryPeriodic(2)) diff --git a/spatial_cell.cpp b/spatial_cell.cpp index 64867060f..cc23752df 100644 --- a/spatial_cell.cpp +++ b/spatial_cell.cpp @@ -71,36 +71,20 @@ namespace spatial_cell { } } - // SpatialCell::SpatialCell(const SpatialCell& other): - // sysBoundaryFlag(other.sysBoundaryFlag), - // sysBoundaryLayer(other.sysBoundaryLayer), - // sysBoundaryLayerNew(other.sysBoundaryLayerNew), - // velocity_block_with_content_list(other.velocity_block_with_content_list), - // velocity_block_with_no_content_list(other.velocity_block_with_no_content_list), - // initialized(other.initialized), - // mpiTransferEnabled(other.mpiTransferEnabled), - // populations(other.populations), - // parameters(other.parameters), - // derivatives(other.derivatives), - // derivativesBVOL(other.derivativesBVOL), - // null_block_data(std::array {}) { - - // // //copy parameters - // // for(unsigned int i=0;i< CellParams::N_SPATIAL_CELL_PARAMS;i++){ - // // parameters[i]=other.parameters[i]; - // // } - // // //copy derivatives - // // for(unsigned int i=0;i< fieldsolver::N_SPATIAL_CELL_DERIVATIVES;i++){ - // // derivatives[i]=other.derivatives[i]; - // // } - // // //copy BVOL derivatives - // // for(unsigned int i=0;i< bvolderivatives::N_BVOL_DERIVATIVES;i++){ - // // derivativesBVOL[i]=other.derivativesBVOL[i]; - // // } - // // //set null block data - // // for (unsigned int i=0; i {}) { + } /** Adds "important" and removes "unimportant" velocity blocks * to/from this cell. diff --git a/spatial_cell.hpp b/spatial_cell.hpp index ded3fec3b..771385185 100644 --- a/spatial_cell.hpp +++ b/spatial_cell.hpp @@ -1894,11 +1894,29 @@ namespace spatial_cell { #endif return populations[popID].vmesh.hasGrandParent(blockGID); } + + inline SpatialCell& SpatialCell::operator=(const SpatialCell& other) { + this->sysBoundaryFlag = other.sysBoundaryFlag; + this->sysBoundaryLayer = other.sysBoundaryLayer; + this->sysBoundaryLayerNew = other.sysBoundaryLayerNew; + this->velocity_block_with_content_list = other.velocity_block_with_content_list; + this->velocity_block_with_no_content_list = other.velocity_block_with_no_content_list; + this->initialized = other.initialized; + this->mpiTransferEnabled = other.mpiTransferEnabled; + this->parameters = other.parameters; + this->derivatives = other.derivatives; + this->derivativesBVOL = other.derivativesBVOL; + this->null_block_data = other.null_block_data; + this->populations = other.populations; + + return *this; + } // inline SpatialCell& SpatialCell::operator=(const SpatialCell&) { // return *this; // } + } // namespaces #endif From b8282dd7560fddc4c38649b7834a393f4a4a811e Mon Sep 17 00:00:00 2001 From: tkoskela Date: Tue, 4 Dec 2018 16:08:17 +0200 Subject: [PATCH 153/602] Fix for accumulation bug on 4 ranks: New version of calculation for initial path when starting in a refined cell in buildPencilsWithNeighbors. --- spatial_cell.cpp | 2 +- vlasovsolver/cpu_trans_map_amr.cpp | 99 ++++++++++++++------------ vlasovsolver/vlasovmover.cpp | 107 ++++++++++++++++++++++++----- 3 files changed, 148 insertions(+), 60 deletions(-) diff --git a/spatial_cell.cpp b/spatial_cell.cpp index cc23752df..683507a69 100644 --- a/spatial_cell.cpp +++ b/spatial_cell.cpp @@ -645,7 +645,7 @@ namespace spatial_cell { * solver.*/ for ( int i = 0; i < MAX_FACE_NEIGHBORS_PER_DIM; ++i) { displacements.push_back((uint8_t*) this->neighbor_block_data[i] - (uint8_t*) this); - block_lengths.push_back(sizeof(Realf) * VELOCITY_BLOCK_LENGTH* this->neighbor_number_of_blocks[i]); + block_lengths.push_back(sizeof(Realf) * VELOCITY_BLOCK_LENGTH * this->neighbor_number_of_blocks[i]); } } diff --git a/vlasovsolver/cpu_trans_map_amr.cpp b/vlasovsolver/cpu_trans_map_amr.cpp index 7d7e41943..8c0c557d0 100644 --- a/vlasovsolver/cpu_trans_map_amr.cpp +++ b/vlasovsolver/cpu_trans_map_amr.cpp @@ -275,7 +275,7 @@ setOfPencils buildPencilsWithNeighbors( const dccrg::Dccrg path.size() ) { - for ( uint i = path.size(); i < startingRefLvl; i++) { - auto parent = grid.get_parent(id); - auto children = grid.get_all_children(parent); - auto it = std::find(children.begin(),children.end(),id); - auto index = std::distance(children.begin(),it); - auto index2 = index; + + vector localIndices; + auto indices = grid.mapping.get_indices(id); + auto length = grid.mapping.get_cell_length_in_indices(grid.mapping.get_level_0_parent(id)); + for (auto index : indices) { + localIndices.push_back(index % length); + } + for ( uint i = path.size(); i < startingRefLvl; i++) { + + vector localIndicesOnRefLvl; + + for ( auto lid : localIndices ) { + localIndicesOnRefLvl.push_back( lid / pow(2, startingRefLvl - (i + 1) )); + } + + int i1 = 0; + int i2 = 0; + switch( dimension ) { - case 0: { - index2 = index / 2; + case 0: + i1 = localIndicesOnRefLvl.at(1); + i2 = localIndicesOnRefLvl.at(2); break; - } - case 1: { - index2 = index - index / 2; + case 1: + i1 = localIndicesOnRefLvl.at(0); + i2 = localIndicesOnRefLvl.at(2); break; - } - case 2: { - index2 = index % 4; + case 2: + i1 = localIndicesOnRefLvl.at(0); + i2 = localIndicesOnRefLvl.at(1); break; } + + if( i1 > 1 || i2 > 1) { + std::cout << __FILE__ << " " << __LINE__ << " Something went wrong, i1 = " << i1 << ", i2 = " << i2 << std::endl; } - path.insert(path.begin(),index2); - id = parent; + + path.push_back(i1 + 2 * i2); } } @@ -836,7 +852,8 @@ bool trans_map_1d_amr(const dccrg::Dccrg& const Realv dt, const uint popID) { - const bool printPencils = false; + const bool printPencils = true; + const bool printTargets = false; const bool printLines = false; Realv dvz,vz_min; uint cell_indices_to_id[3]; /*< used when computing id of target cell in block*/ @@ -1086,10 +1103,10 @@ bool trans_map_1d_amr(const dccrg::Dccrg& if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; - if (printPencils) std::cout << "Target cells for pencil " << pencili << ", rank " << myRank << ": "; + if (printTargets) std::cout << "Target cells for pencil " << pencili << ", rank " << myRank << ": "; // sourcedata => targetdata[this pencil]) for (uint i = 0; i < targetLength; i++) { - if (printPencils) std::cout << targetCells[i + totalTargetLength]->parameters[CellParams::CELLID] << " "; + if (printTargets) std::cout << targetCells[i + totalTargetLength]->parameters[CellParams::CELLID] << " "; for (uint k=0; k& } } } - if (printPencils) std::cout << std::endl; + if (printTargets) std::cout << std::endl; totalTargetLength += targetLength; // dealloc source data -- Should be automatic since it's declared in this loop iteration? } @@ -1331,15 +1348,12 @@ void update_remote_mapping_contribution( for (auto nbr : n_nbrs) { - auto it = i_remote_cells.find(nbr); - if(it == i_remote_cells.end()) { - i_remote_cells[nbr] = 0; - } + // This is not necessary, the initialization initializes all remote cells + // auto it = i_remote_cells.find(nbr); + // if(it == i_remote_cells.end()) { + // i_remote_cells[nbr] = 0; + // } - if (i_remote_cells[nbr] >= MAX_FACE_NEIGHBORS_PER_DIM) { - std::cout << "Error: neighbor count is greater than 4"; - break; - } if (nbr != INVALID_CELLID && !mpiGrid.is_local(nbr) && ccell->sysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY) { @@ -1347,6 +1361,11 @@ void update_remote_mapping_contribution( //if 1) ncell is a valid source cell, 2) center cell is to be updated (normal cell) 3) ncell is remote //we will here allocate a receive buffer, since we need to aggregate values + if (i_remote_cells[nbr] >= MAX_FACE_NEIGHBORS_PER_DIM) { + std::cout << "Error: neighbor count is greater than 4"; + break; + } + SpatialCell *ncell = mpiGrid[nbr]; ncell->neighbor_number_of_blocks[i_remote_cells[nbr]] = ccell->get_number_of_velocity_blocks(popID); @@ -1363,12 +1382,6 @@ void update_remote_mapping_contribution( } } } - - std::vector a; - for( auto c : local_cells ) { - a.push_back(mpiGrid[c]->ioLocalCellId); - } - std::cout << a.back() << std::endl; MPI_Barrier(MPI_COMM_WORLD); if(printLines) std::cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << " " << direction << " " << dimension < b; - for( auto c : local_cells ) { - b.push_back(mpiGrid[c]->ioLocalCellId); - } - std::cout << b.back() << std::endl; + // std::vector b; + // for( auto c : local_cells ) { + // b.push_back(mpiGrid[c]->ioLocalCellId); + // } + // std::cout << b.back() << std::endl; MPI_Barrier(MPI_COMM_WORLD); @@ -1410,16 +1423,16 @@ void update_remote_mapping_contribution( SpatialCell* spatial_cell = mpiGrid[receive_cells[c]]; Realf *blockData = spatial_cell->get_data(popID); - Realf checksum = 0.0; + //Realf checksum = 0.0; int numReceiveCells = count(receive_cells.begin(), receive_cells.end(), receive_cells[c]); //#pragma omp for for(unsigned int vCell = 0; vCell < VELOCITY_BLOCK_LENGTH * spatial_cell->get_number_of_velocity_blocks(popID); ++vCell) { blockData[vCell] += receiveBuffers[c][vCell] / numReceiveCells; - checksum += blockData[vCell]; + //checksum += blockData[vCell]; } - receive_cells_sums.push_back(checksum); + //receive_cells_sums.push_back(checksum); } // send cell data is set to zero. This is to avoid double copy if diff --git a/vlasovsolver/vlasovmover.cpp b/vlasovsolver/vlasovmover.cpp index ec44accea..0cdde29ba 100644 --- a/vlasovsolver/vlasovmover.cpp +++ b/vlasovsolver/vlasovmover.cpp @@ -105,26 +105,20 @@ void calculateSpatialTranslation( if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; // ------------- SLICE - map dist function in X --------------- // - if(P::xcells_ini > 1 ){ + if(P::xcells_ini > 1){ trans_timer=phiprof::initializeTimer("transfer-stencil-data-x","MPI"); phiprof::start(trans_timer); SpatialCell::set_mpi_transfer_type(Transfer::VEL_BLOCK_DATA); - if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; - mpiGrid.set_send_single_cells(false); mpiGrid.update_copies_of_remote_neighbors(VLASOV_SOLVER_X_NEIGHBORHOOD_ID); phiprof::stop(trans_timer); - - if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; phiprof::start("compute-mapping-x"); trans_map_1d_amr(mpiGrid,local_propagated_cells, remoteTargetCellsx, 0,dt,popID); // map along x// phiprof::stop("compute-mapping-x"); - if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; - trans_timer=phiprof::initializeTimer("update_remote-x","MPI"); phiprof::start("update_remote-x"); update_remote_mapping_contribution(mpiGrid, 0,+1,popID); @@ -137,38 +131,119 @@ void calculateSpatialTranslation( if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; // ------------- SLICE - map dist function in Y --------------- // - if(P::ycells_ini > 1){ + if(P::ycells_ini > 1) { + + std::vector sum_local_initial;; + std::vector sum_local_before_trans; + std::vector sum_local_after_trans; + std::vector sum_local_after_update; + std::vector sum_remote_initial; + std::vector sum_remote_before_trans; + std::vector sum_remote_after_trans; + std::vector sum_remote_after_update; + trans_timer=phiprof::initializeTimer("transfer-stencil-data-y","MPI"); phiprof::start(trans_timer); SpatialCell::set_mpi_transfer_type(Transfer::VEL_BLOCK_DATA); - if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; + // for (auto c : local_propagated_cells) { + // Realf sum = 0.0; + // SpatialCell* spatial_cell = mpiGrid[c]; + // Realf *blockData = spatial_cell->get_data(popID); + // for(unsigned int vCell = 0; vCell < VELOCITY_BLOCK_LENGTH * spatial_cell->get_number_of_velocity_blocks(popID); ++vCell) { + // sum += blockData[vCell]; + // } + // sum_local_initial.push_back(sum); + // } + // for (auto c : remoteTargetCellsy) { + // Realf sum = 0.0; + // SpatialCell* spatial_cell = mpiGrid[c]; + // Realf *blockData = spatial_cell->get_data(popID); + // for(unsigned int vCell = 0; vCell < VELOCITY_BLOCK_LENGTH * spatial_cell->get_number_of_velocity_blocks(popID); ++vCell) { + // sum += blockData[vCell]; + // } + // sum_remote_initial.push_back(sum); + // } mpiGrid.set_send_single_cells(false); mpiGrid.update_copies_of_remote_neighbors(VLASOV_SOLVER_Y_NEIGHBORHOOD_ID); phiprof::stop(trans_timer); - if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; + // for (auto c : local_propagated_cells) { + // Realf sum = 0.0; + // SpatialCell* spatial_cell = mpiGrid[c]; + // Realf *blockData = spatial_cell->get_data(popID); + // for(unsigned int vCell = 0; vCell < VELOCITY_BLOCK_LENGTH * spatial_cell->get_number_of_velocity_blocks(popID); ++vCell) { + // sum += blockData[vCell]; + // } + // sum_local_before_trans.push_back(sum); + // } + // for (auto c : remoteTargetCellsy) { + // Realf sum = 0.0; + // SpatialCell* spatial_cell = mpiGrid[c]; + // Realf *blockData = spatial_cell->get_data(popID); + // for(unsigned int vCell = 0; vCell < VELOCITY_BLOCK_LENGTH * spatial_cell->get_number_of_velocity_blocks(popID); ++vCell) { + // sum += blockData[vCell]; + // } + // sum_remote_before_trans.push_back(sum); + // } phiprof::start("compute-mapping-y"); trans_map_1d_amr(mpiGrid,local_propagated_cells, remoteTargetCellsy, 1,dt,popID); // map along y// phiprof::stop("compute-mapping-y"); - MPI_Barrier(MPI_COMM_WORLD); + // for (auto c : local_propagated_cells) { + // Realf sum = 0.0; + // SpatialCell* spatial_cell = mpiGrid[c]; + // Realf *blockData = spatial_cell->get_data(popID); + // for(unsigned int vCell = 0; vCell < VELOCITY_BLOCK_LENGTH * spatial_cell->get_number_of_velocity_blocks(popID); ++vCell) { + // sum += blockData[vCell]; + // } + // sum_local_after_trans.push_back(sum); + // } + // for (auto c : remoteTargetCellsy) { + // Realf sum = 0.0; + // SpatialCell* spatial_cell = mpiGrid[c]; + // Realf *blockData = spatial_cell->get_data(popID); + // for(unsigned int vCell = 0; vCell < VELOCITY_BLOCK_LENGTH * spatial_cell->get_number_of_velocity_blocks(popID); ++vCell) { + // sum += blockData[vCell]; + // } + // sum_remote_after_trans.push_back(sum); + // } - if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; + MPI_Barrier(MPI_COMM_WORLD); trans_timer=phiprof::initializeTimer("update_remote-y","MPI"); phiprof::start("update_remote-y"); update_remote_mapping_contribution(mpiGrid, 1,+1,popID); update_remote_mapping_contribution(mpiGrid, 1,-1,popID); phiprof::stop("update_remote-y"); - } - if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; + // for (auto c : local_propagated_cells) { + // Realf sum = 0.0; + // SpatialCell* spatial_cell = mpiGrid[c]; + // Realf *blockData = spatial_cell->get_data(popID); + // for(unsigned int vCell = 0; vCell < VELOCITY_BLOCK_LENGTH * spatial_cell->get_number_of_velocity_blocks(popID); ++vCell) { + // sum += blockData[vCell]; + // } + // sum_local_after_update.push_back(sum); + // } + // for (auto c : remoteTargetCellsy) { + // Realf sum = 0.0; + // SpatialCell* spatial_cell = mpiGrid[c]; + // Realf *blockData = spatial_cell->get_data(popID); + // for(unsigned int vCell = 0; vCell < VELOCITY_BLOCK_LENGTH * spatial_cell->get_number_of_velocity_blocks(popID); ++vCell) { + // sum += blockData[vCell]; + // } + // sum_remote_after_update.push_back(sum); + // } + + + MPI_Barrier(MPI_COMM_WORLD); + } - // MPI_Barrier(MPI_COMM_WORLD); - // bailout(true, "", __FILE__, __LINE__); + MPI_Barrier(MPI_COMM_WORLD); + bailout(true, "", __FILE__, __LINE__); } /*! From 7e7d143080adc43eb044e3b67afa78c67211753d Mon Sep 17 00:00:00 2001 From: tkoskela Date: Tue, 4 Dec 2018 16:08:59 +0200 Subject: [PATCH 154/602] Display global sums when printing out. --- vlasiator.cpp | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/vlasiator.cpp b/vlasiator.cpp index c64c62e4f..d0e11b993 100644 --- a/vlasiator.cpp +++ b/vlasiator.cpp @@ -747,7 +747,13 @@ int main(int argn,char* args[]) { nSum += rho*dx*dy*dz; if(printCells) cout << "Cell " << cell << " rho = " << rho << " x: " << x << " y: " << y << " z: " << z << endl; } - if(printSums) cout << "Rank " << myRank << ", nSum = " << nSum << endl; + if(printSums) { + cout << "Rank " << myRank << ", nSum = " << nSum << endl; + Real globalSum = 0.0; + MPI_Reduce(&nSum, &globalSum, 1, MPI_DOUBLE, MPI_SUM, MASTER_RANK, MPI_COMM_WORLD); + MPI_Barrier(MPI_COMM_WORLD); + if(myRank == MASTER_RANK) cout << " Global sum = " << globalSum << endl; + } while(P::tstep <= P::tstep_max && P::t-P::dt <= P::t_max+DT_EPSILON && @@ -1135,7 +1141,13 @@ int main(int argn,char* args[]) { nSum += rho*dx*dy*dz; if(printCells) cout << "Cell " << cell << " rho = " << rho << " x: " << x << " y: " << y << " z: " << z << endl; } - if(printSums) cout << "Rank " << myRank << ", nSum = " << nSum << endl; + if(printSums) { + cout << "Rank " << myRank << ", Local sum = " << nSum << endl; + Real globalSum = 0.0; + MPI_Reduce(&nSum, &globalSum, 1, MPI_DOUBLE, MPI_SUM, MASTER_RANK, MPI_COMM_WORLD); + MPI_Barrier(MPI_COMM_WORLD); + if(printSums && myRank == MASTER_RANK) cout << " Global sum = " << globalSum << endl; + } if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; phiprof::stop("Spatial-space",computedCells,"Cells"); From eafdff42dc13f3bd7eae6b5b68afc1fe6a1c7783 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Tue, 4 Dec 2018 16:10:54 +0200 Subject: [PATCH 155/602] Removed bailout from vlasovmover and prints from cpu_trans_map_amr --- vlasovsolver/cpu_trans_map_amr.cpp | 2 +- vlasovsolver/vlasovmover.cpp | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/vlasovsolver/cpu_trans_map_amr.cpp b/vlasovsolver/cpu_trans_map_amr.cpp index 8c0c557d0..d328b4f86 100644 --- a/vlasovsolver/cpu_trans_map_amr.cpp +++ b/vlasovsolver/cpu_trans_map_amr.cpp @@ -852,7 +852,7 @@ bool trans_map_1d_amr(const dccrg::Dccrg& const Realv dt, const uint popID) { - const bool printPencils = true; + const bool printPencils = false; const bool printTargets = false; const bool printLines = false; Realv dvz,vz_min; diff --git a/vlasovsolver/vlasovmover.cpp b/vlasovsolver/vlasovmover.cpp index 0cdde29ba..dae410643 100644 --- a/vlasovsolver/vlasovmover.cpp +++ b/vlasovsolver/vlasovmover.cpp @@ -242,8 +242,8 @@ void calculateSpatialTranslation( MPI_Barrier(MPI_COMM_WORLD); } - MPI_Barrier(MPI_COMM_WORLD); - bailout(true, "", __FILE__, __LINE__); + // MPI_Barrier(MPI_COMM_WORLD); + // bailout(true, "", __FILE__, __LINE__); } /*! From 97387d5caf11f787e1b915933e2a9b8d8c14d02f Mon Sep 17 00:00:00 2001 From: tkoskela Date: Wed, 5 Dec 2018 10:31:12 +0200 Subject: [PATCH 156/602] Removed barriers that were used for debugging. Some other code cleanup and preparation for runs. --- vlasiator.cpp | 2 +- vlasovsolver/cpu_trans_map_amr.cpp | 16 +++++++++------- vlasovsolver/vlasovmover.cpp | 8 ++++---- 3 files changed, 14 insertions(+), 12 deletions(-) diff --git a/vlasiator.cpp b/vlasiator.cpp index d0e11b993..5965f6108 100644 --- a/vlasiator.cpp +++ b/vlasiator.cpp @@ -271,7 +271,7 @@ int main(int argn,char* args[]) { const bool printLines = false; const bool printCells = false; - const bool printSums = true; + const bool printSums = false; // Init MPI: int required=MPI_THREAD_FUNNELED; diff --git a/vlasovsolver/cpu_trans_map_amr.cpp b/vlasovsolver/cpu_trans_map_amr.cpp index d328b4f86..35d8578a6 100644 --- a/vlasovsolver/cpu_trans_map_amr.cpp +++ b/vlasovsolver/cpu_trans_map_amr.cpp @@ -254,14 +254,16 @@ CellID selectNeighbor(const dccrg::Dccrg } int neighborIndex = 0; - if (myNeighbors.size() > 1) + if (myNeighbors.size() > 1) { neighborIndex = path; - if (grid.is_local(myNeighbors[neighborIndex])) + } + if (grid.is_local(myNeighbors[neighborIndex])) { neighbor = myNeighbors[neighborIndex]; + } - // std::cout << "selectNeighbor: path = " << path << " neighbors = "; - // for (auto nbr : myNeighbors) std::cout << neighbor << " "; + // std::cout << "selectNeighbor: id = " << id << " path = " << path << " neighbors = "; + // for (auto nbr : myNeighbors) std::cout << nbr << " "; // std::cout << ", returning " << neighbor << std::endl; return neighbor; @@ -1383,7 +1385,7 @@ void update_remote_mapping_contribution( } } - MPI_Barrier(MPI_COMM_WORLD); + // MPI_Barrier(MPI_COMM_WORLD); if(printLines) std::cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << " " << direction << " " << dimension < 1 && false){ + if(P::zcells_ini > 1){ trans_timer=phiprof::initializeTimer("transfer-stencil-data-z","MPI"); phiprof::start(trans_timer); SpatialCell::set_mpi_transfer_type(Transfer::VEL_BLOCK_DATA); @@ -211,7 +211,7 @@ void calculateSpatialTranslation( // sum_remote_after_trans.push_back(sum); // } - MPI_Barrier(MPI_COMM_WORLD); + // MPI_Barrier(MPI_COMM_WORLD); trans_timer=phiprof::initializeTimer("update_remote-y","MPI"); phiprof::start("update_remote-y"); @@ -239,11 +239,11 @@ void calculateSpatialTranslation( // } - MPI_Barrier(MPI_COMM_WORLD); + // MPI_Barrier(MPI_COMM_WORLD); } // MPI_Barrier(MPI_COMM_WORLD); - // bailout(true, "", __FILE__, __LINE__); + // bailout(true, "", __FILE__, __LINE__); } /*! From 436e2ee04ba6461882ed680a16aa030168d32094 Mon Sep 17 00:00:00 2001 From: Tuomas Koskela Date: Wed, 5 Dec 2018 15:21:11 +0200 Subject: [PATCH 157/602] Added a config file that produces null pointers from dccrg when run on 24 ranks on sisu. --- projects/testAmr/nullPointersOnSisu.cfg | 95 +++++++++++++++++++++++++ 1 file changed, 95 insertions(+) create mode 100644 projects/testAmr/nullPointersOnSisu.cfg diff --git a/projects/testAmr/nullPointersOnSisu.cfg b/projects/testAmr/nullPointersOnSisu.cfg new file mode 100644 index 000000000..1a4aa0e63 --- /dev/null +++ b/projects/testAmr/nullPointersOnSisu.cfg @@ -0,0 +1,95 @@ +dynamic_timestep = 1 +project = testAmr +ParticlePopulations = proton +propagate_field = 0 +propagate_vlasov_acceleration = 0 +propagate_vlasov_translation = 1 + +[proton_properties] +mass = 1 +mass_units = PROTON +charge = 1 + +[io] +diagnostic_write_interval = -1 +write_initial_state = 0 + +system_write_t_interval = -0.01 +system_write_file_name = fullf +system_write_distribution_stride = 0 +system_write_distribution_xline_stride = 0 +system_write_distribution_yline_stride = 0 +system_write_distribution_zline_stride = 0 + + +[gridbuilder] +x_length = 15 +y_length = 15 +z_length = 10 +x_min = 0.0 +x_max = 1.0e6 +y_min = 0.0 +y_max = 1.0e6 +z_min = 0.0 +z_max = 1.0e6 +timestep_max = 100 + +[proton_vspace] +vx_min = -2.0e6 +vx_max = +2.0e6 +vy_min = -2.0e6 +vy_max = +2.0e6 +vz_min = -2.0e6 +vz_max = +2.0e6 +vx_length = 1 +vy_length = 1 +vz_length = 1 +max_refinement_level = 1 +[proton_sparse] +minValue = 1.0e-16 + +[boundaries] +periodic_x = yes +periodic_y = yes +periodic_z = yes + +[variables] +output = populations_Rho +output = B +output = Pressure +output = populations_V +output = E +output = MPIrank +output = populations_Blocks +#output = VelocitySubSteps + +diagnostic = populations_Blocks +#diagnostic = Pressure +#diagnostic = populations_Rho +#diagnostic = populations_RhoLossAdjust +#diagnostic = populations_RhoLossVelBoundary + +[testAmr] +#magnitude of 1.82206867e-10 gives a period of 360s, useful for testing... +Bx = 1.2e-10 +By = 0.8e-10 +Bz = 1.1135233442526334e-10 +magXPertAbsAmp = 0 +magYPertAbsAmp = 0 +magZPertAbsAmp = 0 +densityModel = testcase +nVelocitySamples = 3 + +[proton_testAmr] +n = 1 +Vx = 5e5 +Vy = 5e5 +Vz = 0.0 +Tx = 500000.0 +Ty = 500000.0 +Tz = 500000.0 +rho = 1.0e6 +rhoPertAbsAmp = 0.0 + +[loadBalance] +algorithm = RCB \ No newline at end of file From c2d685c2d26b32cee009a9250c27ef83a6f93452 Mon Sep 17 00:00:00 2001 From: Ilja Date: Mon, 10 Dec 2018 14:56:18 +0200 Subject: [PATCH 158/602] Fix ambiguous call to abs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fixes compilation error: ``` vlasovsolver/cpu_trans_map_amr.cpp:613:89: virhe: call of overloaded ”abs(std::array::value_type)” is ambiguous ``` --- vlasovsolver/cpu_trans_map_amr.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vlasovsolver/cpu_trans_map_amr.cpp b/vlasovsolver/cpu_trans_map_amr.cpp index 35d8578a6..ab38f34e9 100644 --- a/vlasovsolver/cpu_trans_map_amr.cpp +++ b/vlasovsolver/cpu_trans_map_amr.cpp @@ -610,7 +610,7 @@ void getSeedIds(const dccrg::Dccrg& mpiGr // If a neighbor is non-local or across a periodic boundary, then we use this // cell as a seed for pencils - if ( abs ( myIndices[dimension] - nbrIndices[dimension] ) > + if ( abs ( (int64_t)(myIndices[dimension] - nbrIndices[dimension]) ) > pow(2,mpiGrid.get_maximum_refinement_level()) || !mpiGrid.is_local(nbrPair.first)) { From 6a374ed8a59848e7dde59773c3b3a4e2d9ccc7a8 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Tue, 11 Dec 2018 12:01:18 +0200 Subject: [PATCH 159/602] Added averaging of values from fsgrid to dccrg in getFieldDataFromFsGrid when fsgrid has higher resolution, similarly to what is done in get-functions in gridGlue.cpp. This one was missing by mistake. --- fieldsolver/gridGlue.hpp | 41 +++++++++++++++++++++++++++++++++++++--- 1 file changed, 38 insertions(+), 3 deletions(-) diff --git a/fieldsolver/gridGlue.hpp b/fieldsolver/gridGlue.hpp index 026929793..373ca4194 100644 --- a/fieldsolver/gridGlue.hpp +++ b/fieldsolver/gridGlue.hpp @@ -171,18 +171,53 @@ template< unsigned int numFields > void getFieldDataFromFsGrid( const std::vector& cells, int index) { int nCells = getNumberOfCellsOnMaxRefLvl(mpiGrid, cells); + std::vector< std::array > transferBuffer(nCells); + std::vector< std::array*> transferBufferPointer; sourceGrid.setupForTransferOut(nCells); + int k = 0; for(CellID dccrgId : cells) { // TODO: This assumes that the field data are lying continuous in memory. // Check definition of CellParams in common.h if unsure. - std::array* cellDataPointer = reinterpret_cast*>( - &(mpiGrid[dccrgId]->get_cell_parameters()[index])); + + //std::array* cellDataPointer = reinterpret_cast*>( + // &(mpiGrid[dccrgId]->get_cell_parameters()[index])); + + transferBufferPointer.push_back(&transferBuffer[k]); + const auto fsgridIds = mapDccrgIdToFsGridGlobalID(mpiGrid, dccrgId); for (auto fsgridId : fsgridIds) { - sourceGrid.transferDataIn(fsgridId, cellDataPointer); + std::array* cellDataPointer = &transferBuffer[k++]; + sourceGrid.transferDataOut(fsgridId, cellDataPointer); } } sourceGrid.finishTransfersOut(); + + // Average data in transferBuffer +#pragma omp parallel for + for(uint i = 0; i < cells.size(); ++i) { + + CellID dccrgId = cells[i]; + + // Calculate the number of fsgrid cells we need to average into the current dccrg cell + auto refLvl = mpiGrid.mapping.get_refinement_level(dccrgId); + int nCells = pow(pow(2,mpiGrid.mapping.get_maximum_refinement_level() - mpiGrid.mapping.get_refinement_level(dccrgId)),3); + + for(int iCell = 0; iCell < nCells; ++iCell) { + + std::array* cellDataPointer = transferBufferPointer[i] + iCell; + + for (int iField = 0; iField < numFields; ++iField) { + mpiGrid[dccrgId]->get_cell_parameters()[index+iField] += cellDataPointer->at(iField); + } + + } + + for (int iField = 0; iField < numFields; ++iField) { + mpiGrid[dccrgId]->get_cell_parameters()[index+iField] /= nCells; + } + + } + } From 8bcf161c4c39e22ba3d3451f5db2e74c4d867a6e Mon Sep 17 00:00:00 2001 From: Tuomas Koskela Date: Tue, 11 Dec 2018 16:48:35 +0200 Subject: [PATCH 160/602] check_ghost_cells checks all available neighbors instead of trying to determine neighbors at a given offset. path is not accessed by cells that don't have it in the source and target calculations. --- vlasovsolver/cpu_trans_map_amr.cpp | 73 ++++++++++++++++-------------- 1 file changed, 40 insertions(+), 33 deletions(-) diff --git a/vlasovsolver/cpu_trans_map_amr.cpp b/vlasovsolver/cpu_trans_map_amr.cpp index 35d8578a6..2f7232168 100644 --- a/vlasovsolver/cpu_trans_map_amr.cpp +++ b/vlasovsolver/cpu_trans_map_amr.cpp @@ -107,12 +107,11 @@ void computeSpatialSourceCellsForPencil(const dccrg::Dccrg=-VLASOV_STENCIL_WIDTH;i--){ - if(sourceCells[i + VLASOV_STENCIL_WIDTH] == NULL) - sourceCells[i + VLASOV_STENCIL_WIDTH] = lastGoodCell; + for(int i = VLASOV_STENCIL_WIDTH - 1; i >= 0 ;i--){ + if(sourceCells[i] == NULL) + sourceCells[i] = lastGoodCell; else - lastGoodCell = sourceCells[i + VLASOV_STENCIL_WIDTH]; + lastGoodCell = sourceCells[i]; } /*loop to positive side and replace all invalid cells with the closest good cell*/ lastGoodCell = mpiGrid[ids.back()]; - for(int i = L + 1; i <= L + VLASOV_STENCIL_WIDTH; i++){ - if(sourceCells[i + VLASOV_STENCIL_WIDTH] == NULL) - sourceCells[i + VLASOV_STENCIL_WIDTH] = lastGoodCell; + for(int i = L + VLASOV_STENCIL_WIDTH; i < L + 2*VLASOV_STENCIL_WIDTH; i++){ + if(sourceCells[i] == NULL) + sourceCells[i] = lastGoodCell; else - lastGoodCell = sourceCells[i + VLASOV_STENCIL_WIDTH]; + lastGoodCell = sourceCells[i]; } } @@ -204,6 +202,10 @@ void computeSpatialTargetCellsForPencils(const dccrg::Dccrg backNeighborIds; for( auto nbrPair: *backNbrPairs ) { @@ -212,22 +214,24 @@ void computeSpatialTargetCellsForPencils(const dccrg::Dccrg& mpiGrid, setOfPencils& pencils, - uint dimension, - int offset) { + uint dimension) { const bool debug = false; - int neighborhoodId = getNeighborhood(dimension,2); + int neighborhoodId = getNeighborhood(dimension,VLASOV_STENCIL_WIDTH); int myRank; if(debug) { @@ -764,15 +767,15 @@ void check_ghost_cells(const dccrg::Dccrg const auto* backNeighbors = mpiGrid.get_neighbors_of(ids.back() ,neighborhoodId); for (auto nbrPair: *frontNeighbors) { - if(nbrPair.second[dimension] == -offset) { - maxNbrRefLvl = max(maxNbrRefLvl,mpiGrid.mapping.get_refinement_level(nbrPair.first)); - } + //if((nbrPair.second[dimension] + 1) / pow(2,mpiGrid.get_refinement_level(nbrPair.first)) == -offset) { + maxNbrRefLvl = max(maxNbrRefLvl,mpiGrid.mapping.get_refinement_level(nbrPair.first)); + //} } for (auto nbrPair: *backNeighbors) { - if(nbrPair.second[dimension] == offset) { - maxNbrRefLvl = max(maxNbrRefLvl,mpiGrid.get_refinement_level(nbrPair.first)); - } + //if((nbrPair.second[dimension] + 1) / pow(2,mpiGrid.get_refinement_level(nbrPair.first)) == offset) { + maxNbrRefLvl = max(maxNbrRefLvl,mpiGrid.get_refinement_level(nbrPair.first)); + //} } if (maxNbrRefLvl > maxPencilRefLvl) { @@ -953,9 +956,7 @@ bool trans_map_1d_amr(const dccrg::Dccrg& if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; // Check refinement of two ghost cells on each end of each pencil - for (int offset = 1; offset <= VLASOV_STENCIL_WIDTH; ++offset) { - check_ghost_cells(mpiGrid,pencils,dimension,offset); - } + check_ghost_cells(mpiGrid,pencils,dimension); // **************************************************************************** if(printPencils) printPencilsFunc(pencils,dimension,myRank); @@ -1108,7 +1109,13 @@ bool trans_map_1d_amr(const dccrg::Dccrg& if (printTargets) std::cout << "Target cells for pencil " << pencili << ", rank " << myRank << ": "; // sourcedata => targetdata[this pencil]) for (uint i = 0; i < targetLength; i++) { - if (printTargets) std::cout << targetCells[i + totalTargetLength]->parameters[CellParams::CELLID] << " "; + if (printTargets) { + if( targetCells[i + totalTargetLength] != NULL) { + std::cout << targetCells[i + totalTargetLength]->parameters[CellParams::CELLID] << " "; + } else { + std::cout << "NULL" << " "; + } + } for (uint k=0; k Date: Wed, 12 Dec 2018 08:58:32 +0200 Subject: [PATCH 161/602] Added remove pencil function. --- vlasovsolver/cpu_trans_map_amr.hpp | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/vlasovsolver/cpu_trans_map_amr.hpp b/vlasovsolver/cpu_trans_map_amr.hpp index e3600cca2..b96666001 100644 --- a/vlasovsolver/cpu_trans_map_amr.hpp +++ b/vlasovsolver/cpu_trans_map_amr.hpp @@ -56,6 +56,25 @@ struct setOfPencils { path.push_back(pathIn); } + void removePencil(const uint pencilId) { + + x.erase(x.begin() + pencilId); + y.erase(y.begin() + pencilId); + periodic.erase(periodic.begin() + pencilId); + path.erase(path.begin() + pencilId); + + CellID ibeg = 0; + for (uint i = 0; i < pencilId; ++i) { + ibeg += lengthOfPencils[i]; + } + ids.erase(ids.begin() + ibeg, ids.begin() + ibeg + lengthOfPencils[pencilId]); + + N--; + sumOfLengths -= lengthOfPencils[pencilId]; + lengthOfPencils.erase(lengthOfPencils.begin() + pencilId); + + } + std::vector getIds(const uint pencilId) const { std::vector idsOut; From c8c2c1c547fdb18ea98e613a2ed3db9f62fa947c Mon Sep 17 00:00:00 2001 From: tkoskela Date: Thu, 13 Dec 2018 15:02:53 +0200 Subject: [PATCH 162/602] Modifications to buildPencilsWithNeighbors: The list of seed ids is passed in as a list of "red" cells that stop the pencil builder. Simplifies logic at the end and avoids overlap problems in some corner cases. --- vlasovsolver/cpu_trans_map_amr.cpp | 106 ++++++++++++++++++++++------- 1 file changed, 82 insertions(+), 24 deletions(-) diff --git a/vlasovsolver/cpu_trans_map_amr.cpp b/vlasovsolver/cpu_trans_map_amr.cpp index 2f7232168..6f6c0a3b6 100644 --- a/vlasovsolver/cpu_trans_map_amr.cpp +++ b/vlasovsolver/cpu_trans_map_amr.cpp @@ -274,10 +274,69 @@ CellID selectNeighbor(const dccrg::Dccrg } +void removeDuplicates(setOfPencils &pencils) { + + vector duplicatePencilIds; + + // Loop over all pencils twice to do cross-comparisons + for (uint myPencilId = 0; myPencilId < pencils.N; ++myPencilId) { + + vector myCellIds = pencils.getIds(myPencilId); + + for (uint theirPencilId = 0; theirPencilId < pencils.N; ++theirPencilId) { + + // Do not compare with self + if (myPencilId == theirPencilId) { + continue; + } + + // we check if all cells of pencil b ("their") are included in pencil a ("my") + bool removeThisPencil = true; + + vector theirCellIds = pencils.getIds(theirPencilId); + + for (auto theirCellId : theirCellIds) { + bool matchFound = false; + for (auto myCellId : myCellIds) { + // Compare each "my" cell to all "their" cells, if any of them match + // update a logical value matchFound to true. + if (myCellId == theirCellId && pencils.path[myPencilId] == pencils.path[theirPencilId]) { + matchFound = true; + } + } + // If no match was found for this "my" cell, we can end the comparison, these pencils + // are not duplicates. + if(!matchFound) { + removeThisPencil = false; + continue; + } + } + + if(removeThisPencil) { + if(std::find(duplicatePencilIds.begin(), duplicatePencilIds.end(), myPencilId) == duplicatePencilIds.end() ) { + duplicatePencilIds.push_back(theirPencilId); + } + + } + + } + + } + + int myRank; + MPI_Comm_rank(MPI_COMM_WORLD,&myRank); + + for (auto id : duplicatePencilIds) { + //pencils.removePencil(id); + cout << "I am rank " << myRank << ", I would like to remove pencil number " << id << endl; + } + +} + setOfPencils buildPencilsWithNeighbors( const dccrg::Dccrg &grid, - setOfPencils &pencils, CellID startingId, - vector ids, uint dimension, - vector path) { + setOfPencils &pencils, const CellID startingId, + vector ids, const uint dimension, + vector path, const vector &endIds) { const bool debug = false; CellID nextNeighbor; @@ -403,7 +462,7 @@ setOfPencils buildPencilsWithNeighbors( const dccrg::Dccrg& mpiGr const uint dimension, vector &seedIds) { - const bool debug = false; + const bool debug = true; int myRank; MPI_Comm_rank(MPI_COMM_WORLD,&myRank); @@ -950,7 +1005,7 @@ bool trans_map_1d_amr(const dccrg::Dccrg& for (const auto seedId : seedIds) { // Construct pencils from the seedIds into a set of pencils. - pencils = buildPencilsWithNeighbors(mpiGrid, pencils, seedId, ids, dimension, path); + pencils = buildPencilsWithNeighbors(mpiGrid, pencils, seedId, ids, dimension, path, seedIds); } if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; @@ -958,9 +1013,12 @@ bool trans_map_1d_amr(const dccrg::Dccrg& // Check refinement of two ghost cells on each end of each pencil check_ghost_cells(mpiGrid,pencils,dimension); // **************************************************************************** - + if(printPencils) printPencilsFunc(pencils,dimension,myRank); + // // Remove duplicates + // removeDuplicates(pencils); + // Add the final set of pencils to the pencilSets - vector. // Only one set is created for now but we retain support for multiple sets pencilSets.push_back(pencils); From 1982aa84e4cc3040f2e2bd2be97720b739bdf62d Mon Sep 17 00:00:00 2001 From: tkoskela Date: Tue, 18 Dec 2018 12:14:39 +0200 Subject: [PATCH 163/602] Overhaul of update_remote_mapping_contribution. The previous version broke down when neighbors were on more than 1 process. This version tries to take that into account by assigning the sent data pointer to the correct element of the neighbor_block_data and pointing the receiveBuffers to the corresponding element. Note: Something is wrong in this commit, occasionally uninitialized block data values are aggregated resulting in NaNs and other nastiness. --- vlasovsolver/cpu_trans_map_amr.cpp | 170 +++++++++++++++++++---------- 1 file changed, 110 insertions(+), 60 deletions(-) diff --git a/vlasovsolver/cpu_trans_map_amr.cpp b/vlasovsolver/cpu_trans_map_amr.cpp index 6f6c0a3b6..69b63eefe 100644 --- a/vlasovsolver/cpu_trans_map_amr.cpp +++ b/vlasovsolver/cpu_trans_map_amr.cpp @@ -646,7 +646,7 @@ void getSeedIds(const dccrg::Dccrg& mpiGr const uint dimension, vector &seedIds) { - const bool debug = true; + const bool debug = false; int myRank; MPI_Comm_rank(MPI_COMM_WORLD,&myRank); @@ -872,6 +872,27 @@ void check_ghost_cells(const dccrg::Dccrg } } +bool checkPencils(const std::vector &cells, const setOfPencils& pencils) { + + + bool correct = true; + + for (auto id : cells) { + + int myCount = std::count(pencils.ids.begin(), pencils.ids.end(), id); + + if( myCount == 0 || (myCount != 1 && myCount % 4 != 0)) { + + std::cerr << "ERROR: Cell ID " << id << " Appears in pencils " << myCount << " times!"; + correct = false; + } + + } + + return correct; + +} + void printPencilsFunc(const setOfPencils& pencils, const uint dimension, const int myRank) { // Print out ids of pencils (if needed for debugging) @@ -1015,6 +1036,10 @@ bool trans_map_1d_amr(const dccrg::Dccrg& // **************************************************************************** if(printPencils) printPencilsFunc(pencils,dimension,myRank); + + if(!checkPencils(localPropagatedCells, pencils)) { + throw; + } // // Remove duplicates // removeDuplicates(pencils); @@ -1325,9 +1350,6 @@ void update_remote_mapping_contribution( vector send_origin_cells; vector receive_origin_cells; - - - std::map i_remote_cells; int myRank; const bool printLines = false; @@ -1347,9 +1369,7 @@ void update_remote_mapping_contribution( ccell->neighbor_block_data[i] = ccell->get_data(popID); ccell->neighbor_number_of_blocks[i] = 0; } - - i_remote_cells[c] = 0; - } + } if(printLines) std::cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << std::endl; @@ -1357,21 +1377,10 @@ void update_remote_mapping_contribution( for (uint c = 0; c < local_cells.size(); ++c) { SpatialCell *ccell = mpiGrid[local_cells[c]]; - - // Initialize number of blocks to 0 and block data to a default value - for (uint i = 0; i < MAX_FACE_NEIGHBORS_PER_DIM; ++i) { - ccell->neighbor_block_data[i] = ccell->get_data(popID); - ccell->neighbor_number_of_blocks[i] = 0; - } int neighborhood = getNeighborhood(dimension,1); auto* nbrPairVector = mpiGrid.get_neighbors_of(local_cells[c], neighborhood); - if (all_of(nbrPairVector->begin(), nbrPairVector->end(), - [mpiGrid](pair > p){return mpiGrid.is_local(p.first);})) { - continue; - } - // Initialize to empty vectors, add default values at the end. vector n_nbrs; vector p_nbrs; @@ -1388,14 +1397,36 @@ void update_remote_mapping_contribution( } } - uint i_nbr = 0; + int maxNeighborSize = max(p_nbrs.size(),n_nbrs.size()); + + for (uint i = 0; i < MAX_FACE_NEIGHBORS_PER_DIM; ++i) { + if(maxNeighborSize == 1) { + // Initialize number of blocks to 0 and block data to a default value + ccell->neighbor_block_data[i] = ccell->get_data(popID); + ccell->neighbor_number_of_blocks[i] = 0; + } else { + // Initialize number of blocks to the number of blocks in this cell (neighbors should have same number) + // and the block data to 0. We need to do this to make multi-process communications work. + ccell->neighbor_number_of_blocks[i] = ccell->get_number_of_velocity_blocks(popID); + ccell->neighbor_block_data[i] = + (Realf*) aligned_malloc(ccell->neighbor_number_of_blocks[i] * WID3 * sizeof(Realf), 64); + for (uint j = 0; j < ccell->neighbor_number_of_blocks[i] * WID3; ++j) { + ccell->neighbor_block_data[i][j] = 0.0; + } + } + } + + if (all_of(nbrPairVector->begin(), nbrPairVector->end(), + [mpiGrid](pair > p){return mpiGrid.is_local(p.first);})) { + // Only local neighbors, move on. + continue; + } + + // ccell adds a neighbor_block_data block for each neighbor in the positive direction to its local data - for (auto nbr : p_nbrs) { + for (uint i_nbr = 0; i_nbr < p_nbrs.size(); ++i_nbr) { - if (i_nbr >= MAX_FACE_NEIGHBORS_PER_DIM) { - std::cout << "Error: neighbor count is greater than 4"; - break; - } + CellID nbr = p_nbrs[i_nbr]; if (nbr != INVALID_CELLID && !mpiGrid.is_local(nbr) && do_translate_cell(ccell)) { SpatialCell *pcell = mpiGrid[nbr]; @@ -1403,49 +1434,76 @@ void update_remote_mapping_contribution( //Send data in nbr target array that we just mapped to if 1) it is a valid target, //2) is remote cell, 3) the source cell in center was translated, 4) it is not a boundary cell? - ccell->neighbor_block_data[i_nbr] = pcell->get_data(popID); - ccell->neighbor_number_of_blocks[i_nbr] = pcell->get_number_of_velocity_blocks(popID); + ccell->neighbor_block_data.at(i_nbr) = pcell->get_data(popID); + ccell->neighbor_number_of_blocks.at(i_nbr) = pcell->get_number_of_velocity_blocks(popID); send_cells.push_back(nbr); send_origin_cells.push_back(local_cells[c]); - i_nbr++; } } } - for (auto nbr : n_nbrs) { + for (uint i_nbr = 0; i_nbr < n_nbrs.size(); ++i_nbr) { - // This is not necessary, the initialization initializes all remote cells - // auto it = i_remote_cells.find(nbr); - // if(it == i_remote_cells.end()) { - // i_remote_cells[nbr] = 0; - // } - + CellID nbr = n_nbrs[i_nbr]; if (nbr != INVALID_CELLID && !mpiGrid.is_local(nbr) && - ccell->sysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY) { + ccell->sysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY) { //Receive data that ncell mapped to this local cell data array, //if 1) ncell is a valid source cell, 2) center cell is to be updated (normal cell) 3) ncell is remote //we will here allocate a receive buffer, since we need to aggregate values - if (i_remote_cells[nbr] >= MAX_FACE_NEIGHBORS_PER_DIM) { - std::cout << "Error: neighbor count is greater than 4"; - break; + SpatialCell *ncell = mpiGrid[nbr]; + + // There are three possibilities for how we receive data + // 1) sibling of 1 receiving from sibling of 1 + // Receiving cell reads from 0th element of receiveBuffer + // 2) sibling of 4 receiving from sibling of 1 + // Receiving cell reads the element from receiveBuffer equal to their sibling index + // 3) sibling of 1 receiving from sibling of 4 + // Receiving cell reads all elements from receiveBuffer that have data from remote neighbors + + // Find out which cell in the list of siblings this cell is. That will determine which + // neighbor_block_data gets stored as the transferBuffer. + auto myIndices = mpiGrid.mapping.get_indices(local_cells[c]); + auto allSiblings = mpiGrid.get_all_children(mpiGrid.get_parent(local_cells[c])); + vector siblings; + + for (auto sibling : allSiblings) { + auto indices = mpiGrid.mapping.get_indices(sibling); + if(indices[dimension] == myIndices[dimension]) { + siblings.push_back(sibling); + } } - SpatialCell *ncell = mpiGrid[nbr]; - - ncell->neighbor_number_of_blocks[i_remote_cells[nbr]] = ccell->get_number_of_velocity_blocks(popID); - ncell->neighbor_block_data[i_remote_cells[nbr]] = - (Realf*) aligned_malloc(ncell->neighbor_number_of_blocks[i_remote_cells[nbr]] * WID3 * sizeof(Realf), 64); + auto myLocation = std::find(siblings.begin(),siblings.end(),local_cells[c]); + uint nSiblings = 1; + uint recvBufferIndex = 0; + uint bufferSize = 1; + if(myLocation != siblings.end()) { + nSiblings = 4; + recvBufferIndex = std::distance(siblings.begin(), myLocation); + bufferSize = 4; + } + + if(nSiblings == 1 && n_nbrs.size() == 4) { + recvBufferIndex = i_nbr; + bufferSize = 4; + } + + // We have to allocate memory for each sibling to receive all the data sent by ncell. + // There should be either 1 or 4 siblings, if there is only 1 sibling, the other receive + // blocks will remain at number_of_blocks = 0 as initialized. + for (uint i_buf = 0; i_buf < bufferSize; ++i_buf) { + ncell->neighbor_number_of_blocks.at(i_buf) = ccell->get_number_of_velocity_blocks(popID); + ncell->neighbor_block_data.at(i_buf) = + (Realf*) aligned_malloc(ncell->neighbor_number_of_blocks.at(i_buf) * WID3 * sizeof(Realf), 64); + } + receiveBuffers.push_back(ncell->neighbor_block_data.at(recvBufferIndex)); receive_cells.push_back(local_cells[c]); - receiveBuffers.push_back(ncell->neighbor_block_data[i_remote_cells[nbr]]); - i_remote_cells[nbr]++; - // For debugging - receive_origin_cells.push_back(nbr); - + receive_origin_cells.push_back(nbr); } } } @@ -1471,14 +1529,6 @@ void update_remote_mapping_contribution( break; } - // std::vector b; - // for( auto c : local_cells ) { - // b.push_back(mpiGrid[c]->ioLocalCellId); - // } - // std::cout << b.back() << std::endl; - - // MPI_Barrier(MPI_COMM_WORLD); - if(printLines) std::cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << std::endl; //#pragma omp parallel @@ -1490,16 +1540,16 @@ void update_remote_mapping_contribution( SpatialCell* spatial_cell = mpiGrid[receive_cells[c]]; Realf *blockData = spatial_cell->get_data(popID); - //Realf checksum = 0.0; + Realf checksum = 0.0; int numReceiveCells = count(receive_cells.begin(), receive_cells.end(), receive_cells[c]); //#pragma omp for for(unsigned int vCell = 0; vCell < VELOCITY_BLOCK_LENGTH * spatial_cell->get_number_of_velocity_blocks(popID); ++vCell) { blockData[vCell] += receiveBuffers[c][vCell] / numReceiveCells; - //checksum += blockData[vCell]; + checksum += blockData[vCell]; } - //receive_cells_sums.push_back(checksum); + receive_cells_sums.push_back(checksum); } // send cell data is set to zero. This is to avoid double copy if @@ -1507,7 +1557,7 @@ void update_remote_mapping_contribution( for (size_t c = 0; c < send_cells.size(); ++c) { SpatialCell* spatial_cell = mpiGrid[send_cells[c]]; Realf * blockData = spatial_cell->get_data(popID); - + //#pragma omp for nowait for(unsigned int vCell = 0; vCell < VELOCITY_BLOCK_LENGTH * spatial_cell->get_number_of_velocity_blocks(popID); ++vCell) { // copy received target data to temporary array where target data is stored. From ccce97c64fad10ec97d4a6a86719c9104ad7bb7b Mon Sep 17 00:00:00 2001 From: tkoskela Date: Wed, 19 Dec 2018 10:52:49 +0200 Subject: [PATCH 164/602] Changed loop over local cells to c++11 format. Changed the condition for initializing neighbor_number_of_blocks to non-0. Same error still appears. --- vlasovsolver/cpu_trans_map_amr.cpp | 51 +++++++++++++++++++----------- 1 file changed, 33 insertions(+), 18 deletions(-) diff --git a/vlasovsolver/cpu_trans_map_amr.cpp b/vlasovsolver/cpu_trans_map_amr.cpp index 69b63eefe..4df4757d9 100644 --- a/vlasovsolver/cpu_trans_map_amr.cpp +++ b/vlasovsolver/cpu_trans_map_amr.cpp @@ -1365,42 +1365,57 @@ void update_remote_mapping_contribution( for (auto c : remote_cells) { SpatialCell *ccell = mpiGrid[c]; // Initialize number of blocks to 0 and block data to a default value + // We need the default for 1 to 1 communications for (uint i = 0; i < MAX_FACE_NEIGHBORS_PER_DIM; ++i) { ccell->neighbor_block_data[i] = ccell->get_data(popID); ccell->neighbor_number_of_blocks[i] = 0; } - } - - - if(printLines) std::cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << std::endl; + } - for (uint c = 0; c < local_cells.size(); ++c) { + for (auto c : local_cells) { - SpatialCell *ccell = mpiGrid[local_cells[c]]; + SpatialCell *ccell = mpiGrid[c]; - int neighborhood = getNeighborhood(dimension,1); - auto* nbrPairVector = mpiGrid.get_neighbors_of(local_cells[c], neighborhood); + int neighborhood = getNeighborhood(dimension,1); + auto* nbrPairVector = mpiGrid.get_neighbors_of(c, neighborhood); - // Initialize to empty vectors, add default values at the end. - vector n_nbrs; + // Initialize to empty vectors, add default values at the end. + + // neighbors in the positive direction vector p_nbrs; + // neighbors on the negative direction + vector n_nbrs; + + auto myRefLvl = mpiGrid.get_refinement_level(c); + bool sameRefinementAsNeighbors = true; // Collect neighbors on the positive and negative sides into separate lists for (auto nbrPair : *nbrPairVector) { if (nbrPair.second.at(dimension) == direction) { p_nbrs.push_back(nbrPair.first); + + if(mpiGrid.get_refinement_level(nbrPair.first) != myRefLvl) { + sameRefinementAsNeighbors = false; + } + } if (nbrPair.second.at(dimension) == -direction) { n_nbrs.push_back(nbrPair.first); + + // if(mpiGrid.get_refinement_level(nbrPair.first) != myRefLvl) { + // sameRefinementAsNeighbors = false; + // } + } } - - int maxNeighborSize = max(p_nbrs.size(),n_nbrs.size()); + + // int maxNeighborSize = max(p_nbrs.size(),n_nbrs.size()); for (uint i = 0; i < MAX_FACE_NEIGHBORS_PER_DIM; ++i) { - if(maxNeighborSize == 1) { + if(sameRefinementAsNeighbors) { + //if(maxNeighborSize == 1) { // Initialize number of blocks to 0 and block data to a default value ccell->neighbor_block_data[i] = ccell->get_data(popID); ccell->neighbor_number_of_blocks[i] = 0; @@ -1437,7 +1452,7 @@ void update_remote_mapping_contribution( ccell->neighbor_block_data.at(i_nbr) = pcell->get_data(popID); ccell->neighbor_number_of_blocks.at(i_nbr) = pcell->get_number_of_velocity_blocks(popID); send_cells.push_back(nbr); - send_origin_cells.push_back(local_cells[c]); + send_origin_cells.push_back(c); } } } @@ -1465,8 +1480,8 @@ void update_remote_mapping_contribution( // Find out which cell in the list of siblings this cell is. That will determine which // neighbor_block_data gets stored as the transferBuffer. - auto myIndices = mpiGrid.mapping.get_indices(local_cells[c]); - auto allSiblings = mpiGrid.get_all_children(mpiGrid.get_parent(local_cells[c])); + auto myIndices = mpiGrid.mapping.get_indices(c); + auto allSiblings = mpiGrid.get_all_children(mpiGrid.get_parent(c)); vector siblings; for (auto sibling : allSiblings) { @@ -1476,7 +1491,7 @@ void update_remote_mapping_contribution( } } - auto myLocation = std::find(siblings.begin(),siblings.end(),local_cells[c]); + auto myLocation = std::find(siblings.begin(),siblings.end(),c); uint nSiblings = 1; uint recvBufferIndex = 0; uint bufferSize = 1; @@ -1501,7 +1516,7 @@ void update_remote_mapping_contribution( } receiveBuffers.push_back(ncell->neighbor_block_data.at(recvBufferIndex)); - receive_cells.push_back(local_cells[c]); + receive_cells.push_back(c); // For debugging receive_origin_cells.push_back(nbr); } From 5905414352a9da98032cdf020a010ae46332de22 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Thu, 20 Dec 2018 12:42:02 +0200 Subject: [PATCH 165/602] Changed hard-coded value 4 to MAX_FACE_NEIGHBORS_PER_DIM --- spatial_cell.hpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/spatial_cell.hpp b/spatial_cell.hpp index 771385185..1a642cef4 100644 --- a/spatial_cell.hpp +++ b/spatial_cell.hpp @@ -333,9 +333,9 @@ namespace spatial_cell { //Realf* neighbor_block_data; /**< Pointers for translation operator. We can point to neighbor // * cell block data. We do not allocate memory for the pointer.*/ //vmesh::LocalID neighbor_number_of_blocks; - std::array neighbor_block_data; /**< Pointers for translation operator. We can point to neighbor + std::array neighbor_block_data; /**< Pointers for translation operator. We can point to neighbor * cell block data. We do not allocate memory for the pointer.*/ - std::array neighbor_number_of_blocks; + std::array neighbor_number_of_blocks; uint sysBoundaryFlag; /**< What type of system boundary does the cell belong to. * Enumerated in the sysboundarytype namespace's enum.*/ uint sysBoundaryLayer; /**< Layers counted from closest systemBoundary. If 0 then it has not From b5ad4bd5650f205d1946a998e7e273f996d8bbcc Mon Sep 17 00:00:00 2001 From: tkoskela Date: Thu, 20 Dec 2018 12:42:53 +0200 Subject: [PATCH 166/602] Fixed typo --- spatial_cell.hpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/spatial_cell.hpp b/spatial_cell.hpp index 1a642cef4..671c19893 100644 --- a/spatial_cell.hpp +++ b/spatial_cell.hpp @@ -335,7 +335,7 @@ namespace spatial_cell { //vmesh::LocalID neighbor_number_of_blocks; std::array neighbor_block_data; /**< Pointers for translation operator. We can point to neighbor * cell block data. We do not allocate memory for the pointer.*/ - std::array neighbor_number_of_blocks; + std::array neighbor_number_of_blocks; uint sysBoundaryFlag; /**< What type of system boundary does the cell belong to. * Enumerated in the sysboundarytype namespace's enum.*/ uint sysBoundaryLayer; /**< Layers counted from closest systemBoundary. If 0 then it has not From f224ecdc63385938aa5e859e5e56dddedea416f7 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Thu, 20 Dec 2018 12:46:48 +0200 Subject: [PATCH 167/602] One more --- spatial_cell.hpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/spatial_cell.hpp b/spatial_cell.hpp index 671c19893..5d8437d56 100644 --- a/spatial_cell.hpp +++ b/spatial_cell.hpp @@ -333,7 +333,7 @@ namespace spatial_cell { //Realf* neighbor_block_data; /**< Pointers for translation operator. We can point to neighbor // * cell block data. We do not allocate memory for the pointer.*/ //vmesh::LocalID neighbor_number_of_blocks; - std::array neighbor_block_data; /**< Pointers for translation operator. We can point to neighbor + std::array neighbor_block_data; /**< Pointers for translation operator. We can point to neighbor * cell block data. We do not allocate memory for the pointer.*/ std::array neighbor_number_of_blocks; uint sysBoundaryFlag; /**< What type of system boundary does the cell belong to. From 9c6715cb3a54f58aa417002f748ba96c8f530498 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Fri, 21 Dec 2018 15:19:52 +0200 Subject: [PATCH 168/602] Version for debugging MPI issues in update_remote_neighbors. Lots of print statements etc. --- MAKE/Makefile.appa | 2 +- projects/testAmr/testAmr.cfg | 19 +-- vlasiator.cpp | 2 +- vlasovsolver/cpu_trans_map_amr.cpp | 216 ++++++++++++++++------------- vlasovsolver/vlasovmover.cpp | 4 +- 5 files changed, 137 insertions(+), 106 deletions(-) diff --git a/MAKE/Makefile.appa b/MAKE/Makefile.appa index 670daf45c..4984ad871 100644 --- a/MAKE/Makefile.appa +++ b/MAKE/Makefile.appa @@ -48,7 +48,7 @@ FLAGS = #GNU flags: CC_BRAND = gcc CC_BRAND_VERSION = 5.4.0 -CXXFLAGS += -g -O2 -fopenmp -funroll-loops -std=c++11 -W -Wall -Wno-unused -fabi-version=0 -mavx2 +CXXFLAGS += -g -O0 -fopenmp -funroll-loops -std=c++11 -W -Wall -Wno-unused -fabi-version=0 -mavx2 #CXXFLAGS += -g -fopenmp -funroll-loops -std=c++11 -W -Wall -Wno-unused -fabi-version=0 -mavx2 testpackage: CXXFLAGS = -O2 -fopenmp -funroll-loops -std=c++0x -fabi-version=0 -mavx diff --git a/projects/testAmr/testAmr.cfg b/projects/testAmr/testAmr.cfg index 10915b372..aef397546 100644 --- a/projects/testAmr/testAmr.cfg +++ b/projects/testAmr/testAmr.cfg @@ -23,16 +23,16 @@ system_write_distribution_zline_stride = 0 [gridbuilder] -x_length = 5 -y_length = 5 -z_length = 3 +x_length = 7 +y_length = 7 +z_length = 7 x_min = 0.0 x_max = 1.0e6 y_min = 0.0 y_max = 1.0e6 z_min = 0.0 z_max = 1.0e6 -timestep_max = 100 +timestep_max = 5 [proton_vspace] vx_min = -2.0e6 @@ -41,9 +41,9 @@ vy_min = -2.0e6 vy_max = +2.0e6 vz_min = -2.0e6 vz_max = +2.0e6 -vx_length = 1 -vy_length = 1 -vz_length = 1 +vx_length = 2 +vy_length = 2 +vz_length = 2 max_refinement_level = 1 [proton_sparse] minValue = 1.0e-16 @@ -77,7 +77,7 @@ Bz = 1.1135233442526334e-10 magXPertAbsAmp = 0 magYPertAbsAmp = 0 magZPertAbsAmp = 0 -densityModel = testcase +densityModel = uniform nVelocitySamples = 3 [proton_testAmr] @@ -92,4 +92,5 @@ rho = 1.0e6 rhoPertAbsAmp = 0.0 [loadBalance] -algorithm = RCB \ No newline at end of file +algorithm = RCB +#algorithm = random \ No newline at end of file diff --git a/vlasiator.cpp b/vlasiator.cpp index 5965f6108..d0e11b993 100644 --- a/vlasiator.cpp +++ b/vlasiator.cpp @@ -271,7 +271,7 @@ int main(int argn,char* args[]) { const bool printLines = false; const bool printCells = false; - const bool printSums = false; + const bool printSums = true; // Init MPI: int required=MPI_THREAD_FUNNELED; diff --git a/vlasovsolver/cpu_trans_map_amr.cpp b/vlasovsolver/cpu_trans_map_amr.cpp index 4df4757d9..556f2f77e 100644 --- a/vlasovsolver/cpu_trans_map_amr.cpp +++ b/vlasovsolver/cpu_trans_map_amr.cpp @@ -1095,8 +1095,6 @@ bool trans_map_1d_amr(const dccrg::Dccrg& uint8_t vRefLevel; vmesh.getIndices(blockGID,vRefLevel, block_indices[0], block_indices[1], block_indices[2]); - - if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; // Loop over sets of pencils // This loop only has one iteration for now @@ -1106,15 +1104,11 @@ bool trans_map_1d_amr(const dccrg::Dccrg& // Allocate vectorized targetvecdata sum(lengths of pencils)*WID3 / VECL) // Add padding by 2 for each pencil Vec targetVecData[(pencils.sumOfLengths + 2 * pencils.N) * WID3 / VECL]; - - if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; // Initialize targetvecdata to 0 for( uint i = 0; i < (pencils.sumOfLengths + 2 * pencils.N) * WID3 / VECL; i++ ) { targetVecData[i] = Vec(0.0); } - - if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; // TODO: There's probably a smarter way to keep track of where we are writing // in the target data array. @@ -1124,12 +1118,8 @@ bool trans_map_1d_amr(const dccrg::Dccrg& // For targets we need the local cells, plus a padding of 1 cell at both ends std::vector targetCells(pencils.sumOfLengths + pencils.N * 2 ); - if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; - computeSpatialTargetCellsForPencils(mpiGrid, pencils, dimension, targetCells.data()); - if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; - // Loop over pencils uint totalTargetLength = 0; for(uint pencili = 0; pencili < pencils.N; ++pencili){ @@ -1150,8 +1140,6 @@ bool trans_map_1d_amr(const dccrg::Dccrg& // } // std::cout << std::endl; - if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; - Vec dz[sourceCells.size()]; uint i = 0; for(auto cell: sourceCells) { @@ -1169,26 +1157,18 @@ bool trans_map_1d_amr(const dccrg::Dccrg& i++; } - if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; - // Allocate source data: sourcedata sourcedata) / (proper xy reconstruction in future) copy_trans_block_data_amr(sourceCells.data(), blockGID, L, sourceVecData, cellid_transpose, popID); - if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; - // Dz and sourceVecData are both padded by VLASOV_STENCIL_WIDTH // Dz has 1 value/cell, sourceVecData has WID3 values/cell propagatePencil(dz, sourceVecData, dimension, blockGID, dt, vmesh, L); - if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; - if (printTargets) std::cout << "Target cells for pencil " << pencili << ", rank " << myRank << ": "; // sourcedata => targetdata[this pencil]) for (uint i = 0; i < targetLength; i++) { @@ -1233,8 +1213,6 @@ bool trans_map_1d_amr(const dccrg::Dccrg& } } - if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; - // store_data(target_data => targetCells) :Aggregate data for blockid to original location // Loop over pencils again totalTargetLength = 0; @@ -1248,14 +1226,9 @@ bool trans_map_1d_amr(const dccrg::Dccrg& // All cells that are not on the max refinement level will be split // Into multiple pencils. This has to be taken into account when adding // up the contributions from each pencil. - int maxRefLvl = 0; - int minRefLvl = mpiGrid.get_maximum_refinement_level(); - for (auto id : pencilIds) { - //int refLvl = mpiGrid.get_refinement_level(id); - int refLvl = pencils.path[pencili].size(); - maxRefLvl = max(maxRefLvl,refLvl); - minRefLvl = min(minRefLvl,refLvl); - } + // The most convenient way is to just count how many refinement steps the + // pencil has taken on its path. + int maxRefLvl = pencils.path[pencili].size(); // Unpack the vector data @@ -1346,15 +1319,16 @@ void update_remote_mapping_contribution( const vector remote_cells = mpiGrid.get_remote_cells_on_process_boundary(VLASOV_SOLVER_NEIGHBORHOOD_ID); vector receive_cells; vector send_cells; - vector receiveBuffers; + //vector receiveBuffers; vector send_origin_cells; vector receive_origin_cells; + vector receive_origin_index; int myRank; const bool printLines = false; - if(printLines) MPI_Comm_rank(MPI_COMM_WORLD,&myRank); + MPI_Comm_rank(MPI_COMM_WORLD,&myRank); if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; //normalize @@ -1367,8 +1341,17 @@ void update_remote_mapping_contribution( // Initialize number of blocks to 0 and block data to a default value // We need the default for 1 to 1 communications for (uint i = 0; i < MAX_FACE_NEIGHBORS_PER_DIM; ++i) { - ccell->neighbor_block_data[i] = ccell->get_data(popID); - ccell->neighbor_number_of_blocks[i] = 0; + //ccell->neighbor_block_data[i] = ccell->get_data(popID); + //ccell->neighbor_number_of_blocks[i] = 0; + + + ccell->neighbor_number_of_blocks[i] = ccell->get_number_of_velocity_blocks(popID); + ccell->neighbor_block_data[i] = + (Realf*) aligned_malloc(ccell->neighbor_number_of_blocks[i] * WID3 * sizeof(Realf), 64); + for (uint j = 0; j < ccell->neighbor_number_of_blocks[i] * WID3; ++j) { + ccell->neighbor_block_data[i][j] = 0.0; + } + } } @@ -1414,12 +1397,12 @@ void update_remote_mapping_contribution( // int maxNeighborSize = max(p_nbrs.size(),n_nbrs.size()); for (uint i = 0; i < MAX_FACE_NEIGHBORS_PER_DIM; ++i) { - if(sameRefinementAsNeighbors) { - //if(maxNeighborSize == 1) { - // Initialize number of blocks to 0 and block data to a default value - ccell->neighbor_block_data[i] = ccell->get_data(popID); - ccell->neighbor_number_of_blocks[i] = 0; - } else { + // if(sameRefinementAsNeighbors) { + // //if(maxNeighborSize == 1) { + // // Initialize number of blocks to 0 and block data to a default value + // ccell->neighbor_block_data[i] = ccell->get_data(popID); + // ccell->neighbor_number_of_blocks[i] = 0; + // } else { // Initialize number of blocks to the number of blocks in this cell (neighbors should have same number) // and the block data to 0. We need to do this to make multi-process communications work. ccell->neighbor_number_of_blocks[i] = ccell->get_number_of_velocity_blocks(popID); @@ -1428,7 +1411,7 @@ void update_remote_mapping_contribution( for (uint j = 0; j < ccell->neighbor_number_of_blocks[i] * WID3; ++j) { ccell->neighbor_block_data[i][j] = 0.0; } - } + // } } if (all_of(nbrPairVector->begin(), nbrPairVector->end(), @@ -1437,7 +1420,34 @@ void update_remote_mapping_contribution( continue; } + + // Find out which cell in the list of siblings this cell is. That will determine which + // neighbor_block_data gets stored as the transferBuffer. + auto myIndices = mpiGrid.mapping.get_indices(c); + auto allSiblings = mpiGrid.get_all_children(mpiGrid.get_parent(c)); + vector siblings; + + for (auto sibling : allSiblings) { + auto indices = mpiGrid.mapping.get_indices(sibling); + if(indices[dimension] == myIndices[dimension]) { + siblings.push_back(sibling); + } + } + + auto myLocation = std::find(siblings.begin(),siblings.end(),c); + + uint nSiblings = 1; + uint sendIndex = 0; + uint recvIndex = 0; + uint bufferSize = 1; + if(myLocation != siblings.end()) { + nSiblings = 4; + sendIndex = std::distance(siblings.begin(), myLocation); + recvIndex = std::distance(siblings.begin(), myLocation); + bufferSize = 4; + } + // MPI_Barrier(MPI_COMM_WORLD); // ccell adds a neighbor_block_data block for each neighbor in the positive direction to its local data for (uint i_nbr = 0; i_nbr < p_nbrs.size(); ++i_nbr) { @@ -1448,15 +1458,42 @@ void update_remote_mapping_contribution( if(pcell->sysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY) { //Send data in nbr target array that we just mapped to if 1) it is a valid target, //2) is remote cell, 3) the source cell in center was translated, 4) it is not a boundary cell? + + if(nSiblings == 1 && p_nbrs.size() == 4) { + sendIndex = i_nbr; + } - ccell->neighbor_block_data.at(i_nbr) = pcell->get_data(popID); - ccell->neighbor_number_of_blocks.at(i_nbr) = pcell->get_number_of_velocity_blocks(popID); + ccell->neighbor_block_data.at(sendIndex) = pcell->get_data(popID); + ccell->neighbor_number_of_blocks.at(sendIndex) = pcell->get_number_of_velocity_blocks(popID); send_cells.push_back(nbr); send_origin_cells.push_back(c); + + Realf checksum1 = 0.0; + Realf checksum2 = 0.0; + Realf checksum3 = 0.0; + Realf checksum4 = 0.0; + for(unsigned int vCell = 0; vCell < VELOCITY_BLOCK_LENGTH * ccell->get_number_of_velocity_blocks(popID); ++vCell) { + checksum1 += ccell->neighbor_block_data[0][vCell]; + checksum2 += ccell->neighbor_block_data[1][vCell]; + checksum3 += ccell->neighbor_block_data[2][vCell]; + checksum4 += ccell->neighbor_block_data[3][vCell]; + } + + cout << "Rank " << myRank << ": Cell " << c << " sending to " << nbr << " index is " << sendIndex << " sums are "; + cout << checksum1 << ", "; + cout << checksum2 << ", "; + cout << checksum3 << ", "; + cout << checksum4 << ", "; + cout << "numbers of blocks are "; + cout << ccell->neighbor_number_of_blocks[0] << ", "; + cout << ccell->neighbor_number_of_blocks[1] << ", "; + cout << ccell->neighbor_number_of_blocks[2] << ", "; + cout << ccell->neighbor_number_of_blocks[3]; + cout << endl; } } } - + // MPI_Barrier(MPI_COMM_WORLD); for (uint i_nbr = 0; i_nbr < n_nbrs.size(); ++i_nbr) { @@ -1477,32 +1514,9 @@ void update_remote_mapping_contribution( // Receiving cell reads the element from receiveBuffer equal to their sibling index // 3) sibling of 1 receiving from sibling of 4 // Receiving cell reads all elements from receiveBuffer that have data from remote neighbors - - // Find out which cell in the list of siblings this cell is. That will determine which - // neighbor_block_data gets stored as the transferBuffer. - auto myIndices = mpiGrid.mapping.get_indices(c); - auto allSiblings = mpiGrid.get_all_children(mpiGrid.get_parent(c)); - vector siblings; - - for (auto sibling : allSiblings) { - auto indices = mpiGrid.mapping.get_indices(sibling); - if(indices[dimension] == myIndices[dimension]) { - siblings.push_back(sibling); - } - } - - auto myLocation = std::find(siblings.begin(),siblings.end(),c); - uint nSiblings = 1; - uint recvBufferIndex = 0; - uint bufferSize = 1; - if(myLocation != siblings.end()) { - nSiblings = 4; - recvBufferIndex = std::distance(siblings.begin(), myLocation); - bufferSize = 4; - } - + if(nSiblings == 1 && n_nbrs.size() == 4) { - recvBufferIndex = i_nbr; + recvIndex = i_nbr; bufferSize = 4; } @@ -1512,20 +1526,22 @@ void update_remote_mapping_contribution( for (uint i_buf = 0; i_buf < bufferSize; ++i_buf) { ncell->neighbor_number_of_blocks.at(i_buf) = ccell->get_number_of_velocity_blocks(popID); ncell->neighbor_block_data.at(i_buf) = - (Realf*) aligned_malloc(ncell->neighbor_number_of_blocks.at(i_buf) * WID3 * sizeof(Realf), 64); + (Realf*) aligned_malloc(ncell->neighbor_number_of_blocks.at(i_buf) * WID3 * sizeof(Realf), 64); + + // for (uint j = 0; j < ccell->neighbor_number_of_blocks[i_buf] * WID3; ++j) { + // ncell->neighbor_block_data[i_buf][j] = 0.0; + // } } - receiveBuffers.push_back(ncell->neighbor_block_data.at(recvBufferIndex)); + //receiveBuffers.push_back(ncell->neighbor_block_data.at(recvIndex)); receive_cells.push_back(c); // For debugging receive_origin_cells.push_back(nbr); + receive_origin_index.push_back(recvIndex); } } } - // MPI_Barrier(MPI_COMM_WORLD); - if(printLines) std::cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << " " << direction << " " << dimension < receive_cells_sums; + std::vector receive_cells_sums; + MPI_Barrier(MPI_COMM_WORLD); //reduce data: sum received data in the data array to // the target grid in the temporary block container for (size_t c = 0; c < receive_cells.size(); ++c) { - SpatialCell* spatial_cell = mpiGrid[receive_cells[c]]; - Realf *blockData = spatial_cell->get_data(popID); - - Realf checksum = 0.0; + SpatialCell* receive_cell = mpiGrid[receive_cells[c]]; + SpatialCell* origin_cell = mpiGrid[receive_origin_cells[c]]; + + Realf *blockData = receive_cell->get_data(popID); + Realf *neighborData = origin_cell->neighbor_block_data[receive_origin_index[c]]; int numReceiveCells = count(receive_cells.begin(), receive_cells.end(), receive_cells[c]); + Realf checksum = 0.0; + Realf checksum1 = 0.0; + Realf checksum2 = 0.0; + Realf checksum3 = 0.0; + Realf checksum4 = 0.0; //#pragma omp for - for(unsigned int vCell = 0; vCell < VELOCITY_BLOCK_LENGTH * spatial_cell->get_number_of_velocity_blocks(popID); ++vCell) { - blockData[vCell] += receiveBuffers[c][vCell] / numReceiveCells; - checksum += blockData[vCell]; + for(unsigned int vCell = 0; vCell < VELOCITY_BLOCK_LENGTH * receive_cell->get_number_of_velocity_blocks(popID); ++vCell) { + checksum += neighborData[vCell]; + checksum1 += origin_cell->neighbor_block_data[0][vCell]; + checksum2 += origin_cell->neighbor_block_data[1][vCell]; + checksum3 += origin_cell->neighbor_block_data[2][vCell]; + checksum4 += origin_cell->neighbor_block_data[3][vCell]; + blockData[vCell] += neighborData[vCell] / numReceiveCells; } receive_cells_sums.push_back(checksum); + cout << "Rank " << myRank << ": cell " << receive_cells[c] << " receiving from " << receive_origin_cells[c] << ". Checksums: "; + cout << checksum1 << ", "; + cout << checksum2 << ", "; + cout << checksum3 << ", "; + cout << checksum4 << "."; + cout << " Index is " << receive_origin_index[c] << endl; } - + MPI_Barrier(MPI_COMM_WORLD); + cout << endl; // send cell data is set to zero. This is to avoid double copy if // one cell is the neighbor on bot + and - side to the same process for (size_t c = 0; c < send_cells.size(); ++c) { SpatialCell* spatial_cell = mpiGrid[send_cells[c]]; - Realf * blockData = spatial_cell->get_data(popID); - + Realf * blockData = spatial_cell->get_data(popID); //#pragma omp for nowait for(unsigned int vCell = 0; vCell < VELOCITY_BLOCK_LENGTH * spatial_cell->get_number_of_velocity_blocks(popID); ++vCell) { // copy received target data to temporary array where target data is stored. @@ -1581,11 +1614,8 @@ void update_remote_mapping_contribution( } //} - // MPI_Barrier(MPI_COMM_WORLD); - if(printLines) std::cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << std::endl; - - //and finally free temporary receive buffer - for (size_t c=0; c < receiveBuffers.size(); ++c) { - aligned_free(receiveBuffers[c]); - } + // //and finally free temporary receive buffer + // for (size_t c=0; c < receiveBuffers.size(); ++c) { + // aligned_free(receiveBuffers[c]); + // } } diff --git a/vlasovsolver/vlasovmover.cpp b/vlasovsolver/vlasovmover.cpp index 1a02b19ad..ee79dc82d 100644 --- a/vlasovsolver/vlasovmover.cpp +++ b/vlasovsolver/vlasovmover.cpp @@ -242,8 +242,8 @@ void calculateSpatialTranslation( // MPI_Barrier(MPI_COMM_WORLD); } - // MPI_Barrier(MPI_COMM_WORLD); - // bailout(true, "", __FILE__, __LINE__); + MPI_Barrier(MPI_COMM_WORLD); + bailout(true, "", __FILE__, __LINE__); } /*! From d029bbe0e70a09f2bfb1ac66295dd1e3dac79367 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Mon, 7 Jan 2019 10:25:04 +0200 Subject: [PATCH 169/602] Communication bug fixes. A double copy was being made when multiple neighbors on the same process were sending data to a cell on a different process. The use of areaRatio fixed this in some cases but caused problems elsewhere. Now areaRatio only compensates for cases where a pencil does not cover the whole cell area, and the double copy is avoided by keeping track of cells with the same neighbor. --- vlasovsolver/cpu_trans_map_amr.cpp | 277 ++++++++++++++--------------- 1 file changed, 132 insertions(+), 145 deletions(-) diff --git a/vlasovsolver/cpu_trans_map_amr.cpp b/vlasovsolver/cpu_trans_map_amr.cpp index 556f2f77e..62f6f693f 100644 --- a/vlasovsolver/cpu_trans_map_amr.cpp +++ b/vlasovsolver/cpu_trans_map_amr.cpp @@ -274,64 +274,63 @@ CellID selectNeighbor(const dccrg::Dccrg } -void removeDuplicates(setOfPencils &pencils) { +// void removeDuplicates(setOfPencils &pencils) { - vector duplicatePencilIds; +// vector duplicatePencilIds; - // Loop over all pencils twice to do cross-comparisons - for (uint myPencilId = 0; myPencilId < pencils.N; ++myPencilId) { +// // Loop over all pencils twice to do cross-comparisons +// for (uint myPencilId = 0; myPencilId < pencils.N; ++myPencilId) { - vector myCellIds = pencils.getIds(myPencilId); +// vector myCellIds = pencils.getIds(myPencilId); - for (uint theirPencilId = 0; theirPencilId < pencils.N; ++theirPencilId) { +// for (uint theirPencilId = 0; theirPencilId < pencils.N; ++theirPencilId) { - // Do not compare with self - if (myPencilId == theirPencilId) { - continue; - } +// // Do not compare with self +// if (myPencilId == theirPencilId) { +// continue; +// } - // we check if all cells of pencil b ("their") are included in pencil a ("my") - bool removeThisPencil = true; +// // we check if all cells of pencil b ("their") are included in pencil a ("my") +// bool removeThisPencil = true; - vector theirCellIds = pencils.getIds(theirPencilId); - - for (auto theirCellId : theirCellIds) { - bool matchFound = false; - for (auto myCellId : myCellIds) { - // Compare each "my" cell to all "their" cells, if any of them match - // update a logical value matchFound to true. - if (myCellId == theirCellId && pencils.path[myPencilId] == pencils.path[theirPencilId]) { - matchFound = true; - } - } - // If no match was found for this "my" cell, we can end the comparison, these pencils - // are not duplicates. - if(!matchFound) { - removeThisPencil = false; - continue; - } - } - - if(removeThisPencil) { - if(std::find(duplicatePencilIds.begin(), duplicatePencilIds.end(), myPencilId) == duplicatePencilIds.end() ) { - duplicatePencilIds.push_back(theirPencilId); - } +// vector theirCellIds = pencils.getIds(theirPencilId); + +// for (auto theirCellId : theirCellIds) { +// bool matchFound = false; +// for (auto myCellId : myCellIds) { +// // Compare each "my" cell to all "their" cells, if any of them match +// // update a logical value matchFound to true. +// if (myCellId == theirCellId && pencils.path[myPencilId] == pencils.path[theirPencilId]) { +// matchFound = true; +// } +// } +// // If no match was found for this "my" cell, we can end the comparison, these pencils +// // are not duplicates. +// if(!matchFound) { +// removeThisPencil = false; +// continue; +// } +// } + +// if(removeThisPencil) { +// if(std::find(duplicatePencilIds.begin(), duplicatePencilIds.end(), myPencilId) == duplicatePencilIds.end() ) { +// duplicatePencilIds.push_back(theirPencilId); +// } - } +// } - } +// } - } +// } - int myRank; - MPI_Comm_rank(MPI_COMM_WORLD,&myRank); +// int myRank; +// MPI_Comm_rank(MPI_COMM_WORLD,&myRank); - for (auto id : duplicatePencilIds) { - //pencils.removePencil(id); - cout << "I am rank " << myRank << ", I would like to remove pencil number " << id << endl; - } - -} +// for (auto id : duplicatePencilIds) { +// //pencils.removePencil(id); +// cout << "I am rank " << myRank << ", I would like to remove pencil number " << id << endl; +// } +// } setOfPencils buildPencilsWithNeighbors( const dccrg::Dccrg &grid, setOfPencils &pencils, const CellID startingId, @@ -484,7 +483,7 @@ setOfPencils buildPencilsWithNeighbors( const dccrg::Dccrg& mpiGr const bool debug = false; int myRank; - MPI_Comm_rank(MPI_COMM_WORLD,&myRank); + if (debug) MPI_Comm_rank(MPI_COMM_WORLD,&myRank); int neighborhood = getNeighborhood(dimension,1); @@ -924,6 +928,7 @@ void printPencilsFunc(const setOfPencils& pencils, const uint dimension, const i std::cout << std::endl; } + MPI_Barrier(MPI_COMM_WORLD); } bool trans_map_1d_amr(const dccrg::Dccrg& mpiGrid, @@ -935,7 +940,6 @@ bool trans_map_1d_amr(const dccrg::Dccrg& const bool printPencils = false; const bool printTargets = false; - const bool printLines = false; Realv dvz,vz_min; uint cell_indices_to_id[3]; /*< used when computing id of target cell in block*/ unsigned char cellid_transpose[WID3]; /*< defines the transpose for the solver internal (transposed) id: i + j*WID + k*WID2 to actual one*/ @@ -947,9 +951,7 @@ bool trans_map_1d_amr(const dccrg::Dccrg& } int myRank; - if(printLines || printPencils) MPI_Comm_rank(MPI_COMM_WORLD,&myRank); - - if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; + if(printTargets || printPencils) MPI_Comm_rank(MPI_COMM_WORLD,&myRank); // Vector with all cell ids vector allCells(localPropagatedCells); @@ -1022,15 +1024,11 @@ bool trans_map_1d_amr(const dccrg::Dccrg& setOfPencils pencils; vector pencilSets; - if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; - for (const auto seedId : seedIds) { // Construct pencils from the seedIds into a set of pencils. pencils = buildPencilsWithNeighbors(mpiGrid, pencils, seedId, ids, dimension, path, seedIds); } - if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; - // Check refinement of two ghost cells on each end of each pencil check_ghost_cells(mpiGrid,pencils,dimension); // **************************************************************************** @@ -1048,8 +1046,6 @@ bool trans_map_1d_amr(const dccrg::Dccrg& // Only one set is created for now but we retain support for multiple sets pencilSets.push_back(pencils); - if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; - const uint8_t VMESH_REFLEVEL = 0; // Get a pointer to the velocity mesh of the first spatial cell @@ -1075,8 +1071,6 @@ bool trans_map_1d_amr(const dccrg::Dccrg& unionOfBlocks.push_back(blockGID); } // **************************************************************************** - - if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; int t1 = phiprof::initializeTimer("mappingAndStore"); @@ -1139,7 +1133,9 @@ bool trans_map_1d_amr(const dccrg::Dccrg& // std::cout << cell->parameters[CellParams::CELLID] << " "; // } // std::cout << std::endl; - + + + // dz is the cell size in the direction of the pencil Vec dz[sourceCells.size()]; uint i = 0; for(auto cell: sourceCells) { @@ -1154,6 +1150,7 @@ bool trans_map_1d_amr(const dccrg::Dccrg& dz[i] = cell->SpatialCell::parameters[CellParams::DZ]; break; } + i++; } @@ -1220,7 +1217,7 @@ bool trans_map_1d_amr(const dccrg::Dccrg& int L = pencils.lengthOfPencils[pencili]; uint targetLength = L + 2; - vector pencilIds = pencils.getIds(pencili); + //vector pencilIds = pencils.getIds(pencili); // Calculate the max and min refinement levels in this pencil. // All cells that are not on the max refinement level will be split @@ -1228,18 +1225,12 @@ bool trans_map_1d_amr(const dccrg::Dccrg& // up the contributions from each pencil. // The most convenient way is to just count how many refinement steps the // pencil has taken on its path. - int maxRefLvl = pencils.path[pencili].size(); + int pencilRefLvl = pencils.path[pencili].size(); // Unpack the vector data // Loop over cells in pencil +- 1 padded cell for ( uint celli = 0; celli < targetLength; ++celli ) { - - // // If the pencil is periodic, we do not write the ghost cells because - // // They are copies of cells that are already in the pencil - // // - It seems that doing this was wrong. Investigate! - // if(pencils.periodic[pencili] && (celli == 0 || celli == targetLength - 1)) - // continue; Realv vector[VECL]; // Loop over 1st vspace dimension @@ -1277,14 +1268,24 @@ bool trans_map_1d_amr(const dccrg::Dccrg& Realf* blockData = spatial_cell->get_data(blockLID, popID); - Realf areaRatio = 1.0; - if (spatial_cell->parameters[CellParams::REFINEMENT_LEVEL] < maxRefLvl) { - areaRatio = 1.0 / pow(pow(2, maxRefLvl - spatial_cell->parameters[CellParams::REFINEMENT_LEVEL]), 2); - } - + // areaRatio is the reatio of the cross-section of the spatial cell to the cross-section of the pencil. + Realf areaRatio = pow(pow(2,spatial_cell->SpatialCell::parameters[CellParams::REFINEMENT_LEVEL] - pencils.path[pencili].size()),2);; + + // Realf checksum = 0.0; for(int i = 0; i < WID3 ; i++) { blockData[i] += targetBlockData[GID * WID3 + i] * areaRatio; + // checksum += targetBlockData[GID * WID3 + i] * areaRatio; } + + + // cout << "Rank " << myRank; + // cout << ", pencil " << pencili; + // cout << ", cell " << spatial_cell->parameters[CellParams::CELLID]; + // cout << ", dimension " << dimension; + // cout << ", areaRatio " << areaRatio; + // cout << ", checksum = " << checksum; + // cout << endl; + } totalTargetLength += targetLength; @@ -1293,9 +1294,8 @@ bool trans_map_1d_amr(const dccrg::Dccrg& } } } - } + } - if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; return true; } @@ -1319,18 +1319,10 @@ void update_remote_mapping_contribution( const vector remote_cells = mpiGrid.get_remote_cells_on_process_boundary(VLASOV_SOLVER_NEIGHBORHOOD_ID); vector receive_cells; vector send_cells; - //vector receiveBuffers; - vector send_origin_cells; vector receive_origin_cells; vector receive_origin_index; - - int myRank; - const bool printLines = false; - - MPI_Comm_rank(MPI_COMM_WORLD,&myRank); - if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; - + //normalize if(direction > 0) direction = 1; if(direction < 0) direction = -1; @@ -1351,9 +1343,10 @@ void update_remote_mapping_contribution( for (uint j = 0; j < ccell->neighbor_number_of_blocks[i] * WID3; ++j) { ccell->neighbor_block_data[i][j] = 0.0; } - } } + + set allNeighbors; for (auto c : local_cells) { @@ -1447,18 +1440,23 @@ void update_remote_mapping_contribution( bufferSize = 4; } - // MPI_Barrier(MPI_COMM_WORLD); // ccell adds a neighbor_block_data block for each neighbor in the positive direction to its local data for (uint i_nbr = 0; i_nbr < p_nbrs.size(); ++i_nbr) { CellID nbr = p_nbrs[i_nbr]; - if (nbr != INVALID_CELLID && !mpiGrid.is_local(nbr) && do_translate_cell(ccell)) { + if (nbr != INVALID_CELLID && !mpiGrid.is_local(nbr) && do_translate_cell(ccell) + && allNeighbors.find(nbr) == allNeighbors.end()) { + SpatialCell *pcell = mpiGrid[nbr]; + if(pcell->sysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY) { //Send data in nbr target array that we just mapped to if 1) it is a valid target, //2) is remote cell, 3) the source cell in center was translated, 4) it is not a boundary cell? + //5) We have not already sent data from this rank to this cell. + allNeighbors.insert(nbr); + if(nSiblings == 1 && p_nbrs.size() == 4) { sendIndex = i_nbr; } @@ -1466,34 +1464,31 @@ void update_remote_mapping_contribution( ccell->neighbor_block_data.at(sendIndex) = pcell->get_data(popID); ccell->neighbor_number_of_blocks.at(sendIndex) = pcell->get_number_of_velocity_blocks(popID); send_cells.push_back(nbr); - send_origin_cells.push_back(c); - - Realf checksum1 = 0.0; - Realf checksum2 = 0.0; - Realf checksum3 = 0.0; - Realf checksum4 = 0.0; - for(unsigned int vCell = 0; vCell < VELOCITY_BLOCK_LENGTH * ccell->get_number_of_velocity_blocks(popID); ++vCell) { - checksum1 += ccell->neighbor_block_data[0][vCell]; - checksum2 += ccell->neighbor_block_data[1][vCell]; - checksum3 += ccell->neighbor_block_data[2][vCell]; - checksum4 += ccell->neighbor_block_data[3][vCell]; - } - cout << "Rank " << myRank << ": Cell " << c << " sending to " << nbr << " index is " << sendIndex << " sums are "; - cout << checksum1 << ", "; - cout << checksum2 << ", "; - cout << checksum3 << ", "; - cout << checksum4 << ", "; - cout << "numbers of blocks are "; - cout << ccell->neighbor_number_of_blocks[0] << ", "; - cout << ccell->neighbor_number_of_blocks[1] << ", "; - cout << ccell->neighbor_number_of_blocks[2] << ", "; - cout << ccell->neighbor_number_of_blocks[3]; - cout << endl; + // Realf checksum1 = 0.0; + // Realf checksum2 = 0.0; + // Realf checksum3 = 0.0; + // Realf checksum4 = 0.0; + // for(unsigned int vCell = 0; vCell < VELOCITY_BLOCK_LENGTH * ccell->get_number_of_velocity_blocks(popID); ++vCell) { + // checksum1 += ccell->neighbor_block_data[0][vCell]; + // checksum2 += ccell->neighbor_block_data[1][vCell]; + // checksum3 += ccell->neighbor_block_data[2][vCell]; + // checksum4 += ccell->neighbor_block_data[3][vCell]; + // } + + // cout << "Rank " << myRank; + // cout << ", dimension " << dimension; + // cout << ", direction " << direction; + // cout << ": Cell " << c; + // cout << " sending to " << nbr << " index is " << sendIndex << " sums are "; + // cout << checksum1 << ", "; + // cout << checksum2 << ", "; + // cout << checksum3 << ", "; + // cout << checksum4 << ", "; + // cout << endl; } } } - // MPI_Barrier(MPI_COMM_WORLD); for (uint i_nbr = 0; i_nbr < n_nbrs.size(); ++i_nbr) { @@ -1533,9 +1528,7 @@ void update_remote_mapping_contribution( // } } - //receiveBuffers.push_back(ncell->neighbor_block_data.at(recvIndex)); receive_cells.push_back(c); - // For debugging receive_origin_cells.push_back(nbr); receive_origin_index.push_back(recvIndex); } @@ -1560,12 +1553,9 @@ void update_remote_mapping_contribution( break; } - if(printLines) std::cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << std::endl; - //#pragma omp parallel //{ std::vector receive_cells_sums; - MPI_Barrier(MPI_COMM_WORLD); //reduce data: sum received data in the data array to // the target grid in the temporary block container for (size_t c = 0; c < receive_cells.size(); ++c) { @@ -1576,31 +1566,33 @@ void update_remote_mapping_contribution( Realf *neighborData = origin_cell->neighbor_block_data[receive_origin_index[c]]; int numReceiveCells = count(receive_cells.begin(), receive_cells.end(), receive_cells[c]); - Realf checksum = 0.0; - Realf checksum1 = 0.0; - Realf checksum2 = 0.0; - Realf checksum3 = 0.0; - Realf checksum4 = 0.0; + // Realf checksum = 0.0; + // Realf checksum1 = 0.0; + // Realf checksum2 = 0.0; + // Realf checksum3 = 0.0; + // Realf checksum4 = 0.0; //#pragma omp for for(unsigned int vCell = 0; vCell < VELOCITY_BLOCK_LENGTH * receive_cell->get_number_of_velocity_blocks(popID); ++vCell) { - checksum += neighborData[vCell]; - checksum1 += origin_cell->neighbor_block_data[0][vCell]; - checksum2 += origin_cell->neighbor_block_data[1][vCell]; - checksum3 += origin_cell->neighbor_block_data[2][vCell]; - checksum4 += origin_cell->neighbor_block_data[3][vCell]; - blockData[vCell] += neighborData[vCell] / numReceiveCells; + // checksum += neighborData[vCell]; + // checksum1 += origin_cell->neighbor_block_data[0][vCell]; + // checksum2 += origin_cell->neighbor_block_data[1][vCell]; + // checksum3 += origin_cell->neighbor_block_data[2][vCell]; + // checksum4 += origin_cell->neighbor_block_data[3][vCell]; + blockData[vCell] += neighborData[vCell]; } - receive_cells_sums.push_back(checksum); - cout << "Rank " << myRank << ": cell " << receive_cells[c] << " receiving from " << receive_origin_cells[c] << ". Checksums: "; - cout << checksum1 << ", "; - cout << checksum2 << ", "; - cout << checksum3 << ", "; - cout << checksum4 << "."; - cout << " Index is " << receive_origin_index[c] << endl; + // receive_cells_sums.push_back(checksum); + // cout << "Rank " << myRank << ": cell " << receive_cells[c] << " receiving from " << receive_origin_cells[c] << ". Checksums: "; + // cout << checksum1 << ", "; + // cout << checksum2 << ", "; + // cout << checksum3 << ", "; + // cout << checksum4 << "."; + // cout << " Index is " << receive_origin_index[c] << endl; } - MPI_Barrier(MPI_COMM_WORLD); - cout << endl; + + // MPI_Barrier(MPI_COMM_WORLD); + // cout << endl; + // send cell data is set to zero. This is to avoid double copy if // one cell is the neighbor on bot + and - side to the same process for (size_t c = 0; c < send_cells.size(); ++c) { @@ -1613,9 +1605,4 @@ void update_remote_mapping_contribution( } } //} - - // //and finally free temporary receive buffer - // for (size_t c=0; c < receiveBuffers.size(); ++c) { - // aligned_free(receiveBuffers[c]); - // } } From 65572c8f622505c4fd4e417733d14d40f6599007 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Mon, 7 Jan 2019 10:26:33 +0200 Subject: [PATCH 170/602] Fixed coordinates of split pencils --- vlasovsolver/cpu_trans_map_amr.hpp | 28 +++++++++++++++++++++------- 1 file changed, 21 insertions(+), 7 deletions(-) diff --git a/vlasovsolver/cpu_trans_map_amr.hpp b/vlasovsolver/cpu_trans_map_amr.hpp index b96666001..2b3fdd97a 100644 --- a/vlasovsolver/cpu_trans_map_amr.hpp +++ b/vlasovsolver/cpu_trans_map_amr.hpp @@ -100,16 +100,12 @@ struct setOfPencils { // dx and dy are the dimensions of the original pencil. void split(const uint myPencilId, const Realv dx, const Realv dy) { - auto ids = getIds(myPencilId); - - x[myPencilId] -= 0.25 * dx; - y[myPencilId] += 0.25 * dy; - + auto ids = getIds(myPencilId); // Find paths that members of this pencil may have in other pencils (can happen) // so that we don't add duplicates. std::vector existingSteps; - for (int theirPencilId = 0; theirPencilId < N; ++theirPencilId) { + for (uint theirPencilId = 0; theirPencilId < N; ++theirPencilId) { if(theirPencilId == myPencilId) continue; auto theirIds = getIds(theirPencilId); for (auto theirId : theirIds) { @@ -126,6 +122,8 @@ struct setOfPencils { bool firstPencil = true; const auto copy_of_path = path.at(myPencilId); + const auto copy_of_x = x.at(myPencilId); + const auto copy_of_y = y.at(myPencilId); // Add those pencils whose steps dont already exist in the pencils struct for (int step = 0; step < 4; ++step) { @@ -133,14 +131,30 @@ struct setOfPencils { continue; } + Realv signX = 1.0; + Realv signY = 1.0; + + if(step < 2) { + signY = -1.0; + } + + if(step % 2 == 0) { + signX = -1.0; + } + + auto myX = copy_of_x + signX * 0.25 * dx; + auto myY = copy_of_y + signY * 0.25 * dy; + if(firstPencil) { //TODO: set x and y correctly. Right now they are not used anywhere. path.at(myPencilId).push_back(step); + x.at(myPencilId) = myX; + y.at(myPencilId) = myY; firstPencil = false; } else { auto myPath = copy_of_path; myPath.push_back(step); - addPencil(ids, x.at(myPencilId) + 0.25 * dx, y.at(myPencilId) + 0.25 * dy, periodic.at(myPencilId), myPath); + addPencil(ids, myX, myY, periodic.at(myPencilId), myPath); } } } From 6958321bbd8f5742f9b5c8f81a2775b58d7e9faa Mon Sep 17 00:00:00 2001 From: tkoskela Date: Mon, 7 Jan 2019 10:27:32 +0200 Subject: [PATCH 171/602] Cleaned up commented debugging code --- vlasovsolver/vlasovmover.cpp | 95 ++---------------------------------- 1 file changed, 3 insertions(+), 92 deletions(-) diff --git a/vlasovsolver/vlasovmover.cpp b/vlasovsolver/vlasovmover.cpp index ee79dc82d..9563f4af5 100644 --- a/vlasovsolver/vlasovmover.cpp +++ b/vlasovsolver/vlasovmover.cpp @@ -132,118 +132,29 @@ void calculateSpatialTranslation( // ------------- SLICE - map dist function in Y --------------- // if(P::ycells_ini > 1) { - - std::vector sum_local_initial;; - std::vector sum_local_before_trans; - std::vector sum_local_after_trans; - std::vector sum_local_after_update; - std::vector sum_remote_initial; - std::vector sum_remote_before_trans; - std::vector sum_remote_after_trans; - std::vector sum_remote_after_update; trans_timer=phiprof::initializeTimer("transfer-stencil-data-y","MPI"); phiprof::start(trans_timer); SpatialCell::set_mpi_transfer_type(Transfer::VEL_BLOCK_DATA); - - // for (auto c : local_propagated_cells) { - // Realf sum = 0.0; - // SpatialCell* spatial_cell = mpiGrid[c]; - // Realf *blockData = spatial_cell->get_data(popID); - // for(unsigned int vCell = 0; vCell < VELOCITY_BLOCK_LENGTH * spatial_cell->get_number_of_velocity_blocks(popID); ++vCell) { - // sum += blockData[vCell]; - // } - // sum_local_initial.push_back(sum); - // } - // for (auto c : remoteTargetCellsy) { - // Realf sum = 0.0; - // SpatialCell* spatial_cell = mpiGrid[c]; - // Realf *blockData = spatial_cell->get_data(popID); - // for(unsigned int vCell = 0; vCell < VELOCITY_BLOCK_LENGTH * spatial_cell->get_number_of_velocity_blocks(popID); ++vCell) { - // sum += blockData[vCell]; - // } - // sum_remote_initial.push_back(sum); - // } mpiGrid.set_send_single_cells(false); mpiGrid.update_copies_of_remote_neighbors(VLASOV_SOLVER_Y_NEIGHBORHOOD_ID); phiprof::stop(trans_timer); - - // for (auto c : local_propagated_cells) { - // Realf sum = 0.0; - // SpatialCell* spatial_cell = mpiGrid[c]; - // Realf *blockData = spatial_cell->get_data(popID); - // for(unsigned int vCell = 0; vCell < VELOCITY_BLOCK_LENGTH * spatial_cell->get_number_of_velocity_blocks(popID); ++vCell) { - // sum += blockData[vCell]; - // } - // sum_local_before_trans.push_back(sum); - // } - // for (auto c : remoteTargetCellsy) { - // Realf sum = 0.0; - // SpatialCell* spatial_cell = mpiGrid[c]; - // Realf *blockData = spatial_cell->get_data(popID); - // for(unsigned int vCell = 0; vCell < VELOCITY_BLOCK_LENGTH * spatial_cell->get_number_of_velocity_blocks(popID); ++vCell) { - // sum += blockData[vCell]; - // } - // sum_remote_before_trans.push_back(sum); - // } phiprof::start("compute-mapping-y"); trans_map_1d_amr(mpiGrid,local_propagated_cells, remoteTargetCellsy, 1,dt,popID); // map along y// phiprof::stop("compute-mapping-y"); - - // for (auto c : local_propagated_cells) { - // Realf sum = 0.0; - // SpatialCell* spatial_cell = mpiGrid[c]; - // Realf *blockData = spatial_cell->get_data(popID); - // for(unsigned int vCell = 0; vCell < VELOCITY_BLOCK_LENGTH * spatial_cell->get_number_of_velocity_blocks(popID); ++vCell) { - // sum += blockData[vCell]; - // } - // sum_local_after_trans.push_back(sum); - // } - // for (auto c : remoteTargetCellsy) { - // Realf sum = 0.0; - // SpatialCell* spatial_cell = mpiGrid[c]; - // Realf *blockData = spatial_cell->get_data(popID); - // for(unsigned int vCell = 0; vCell < VELOCITY_BLOCK_LENGTH * spatial_cell->get_number_of_velocity_blocks(popID); ++vCell) { - // sum += blockData[vCell]; - // } - // sum_remote_after_trans.push_back(sum); - // } - - // MPI_Barrier(MPI_COMM_WORLD); trans_timer=phiprof::initializeTimer("update_remote-y","MPI"); phiprof::start("update_remote-y"); update_remote_mapping_contribution(mpiGrid, 1,+1,popID); update_remote_mapping_contribution(mpiGrid, 1,-1,popID); phiprof::stop("update_remote-y"); - - // for (auto c : local_propagated_cells) { - // Realf sum = 0.0; - // SpatialCell* spatial_cell = mpiGrid[c]; - // Realf *blockData = spatial_cell->get_data(popID); - // for(unsigned int vCell = 0; vCell < VELOCITY_BLOCK_LENGTH * spatial_cell->get_number_of_velocity_blocks(popID); ++vCell) { - // sum += blockData[vCell]; - // } - // sum_local_after_update.push_back(sum); - // } - // for (auto c : remoteTargetCellsy) { - // Realf sum = 0.0; - // SpatialCell* spatial_cell = mpiGrid[c]; - // Realf *blockData = spatial_cell->get_data(popID); - // for(unsigned int vCell = 0; vCell < VELOCITY_BLOCK_LENGTH * spatial_cell->get_number_of_velocity_blocks(popID); ++vCell) { - // sum += blockData[vCell]; - // } - // sum_remote_after_update.push_back(sum); - // } - - - // MPI_Barrier(MPI_COMM_WORLD); + } - MPI_Barrier(MPI_COMM_WORLD); - bailout(true, "", __FILE__, __LINE__); + // MPI_Barrier(MPI_COMM_WORLD); + // bailout(true, "", __FILE__, __LINE__); } /*! From 9751480882b07aa3274745ffb2368392c7aed3a3 Mon Sep 17 00:00:00 2001 From: Tuomas Koskela Date: Wed, 9 Jan 2019 15:12:29 +0200 Subject: [PATCH 172/602] Added config file parameter to override maximum refinement level. Cleaned up printouts. --- projects/testAmr/testAmr.cpp | 60 +++++++++++++----------------------- projects/testAmr/testAmr.h | 1 + 2 files changed, 22 insertions(+), 39 deletions(-) diff --git a/projects/testAmr/testAmr.cpp b/projects/testAmr/testAmr.cpp index cc574af8e..7ebe5c3e3 100644 --- a/projects/testAmr/testAmr.cpp +++ b/projects/testAmr/testAmr.cpp @@ -62,6 +62,7 @@ namespace projects { RP::add("testAmr.lambda", "B cosine perturbation wavelength (m)", 1.0); RP::add("testAmr.nVelocitySamples", "Number of sampling points per velocity dimension", 2); RP::add("testAmr.densityModel","Which spatial density model is used?",string("uniform")); + RP::add("testAmr.maxSpatialRefinementLevel", "Maximum level for spatial refinement", 1.0); // Per-population parameters for(uint i=0; i< getObjectWrapper().particleSpecies.size(); i++) { @@ -92,6 +93,7 @@ namespace projects { RP::get("testAmr.dBy", this->dBy); RP::get("testAmr.dBz", this->dBz); RP::get("testAmr.lambda", this->lambda); + RP::get("testAmr.maxSpatialRefinementLevel", this->maxSpatialRefinementLevel); RP::get("testAmr.nVelocitySamples", this->nVelocitySamples); // Per-population parameters @@ -253,9 +255,14 @@ namespace projects { } bool testAmr::refineSpatialCells( dccrg::Dccrg& mpiGrid ) const { + + int myRank; + MPI_Comm_rank(MPI_COMM_WORLD,&myRank); + + mpiGrid.set_maximum_refinement_level(std::min(this->maxSpatialRefinementLevel, mpiGrid.mapping.get_maximum_refinement_level())); // cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; - std::cout << "Maximum refinement level is " << mpiGrid.mapping.get_maximum_refinement_level() << std::endl; + if(myRank == MASTER_RANK) std::cout << "Maximum refinement level is " << mpiGrid.mapping.get_maximum_refinement_level() << std::endl; std::array xyz_mid; xyz_mid[0] = (P::xmax - P::xmin) / 2.0; @@ -272,51 +279,26 @@ namespace projects { xyz[0] = x; xyz[1] = y; //std::cout << "Trying to refine at " << xyz[0] << ", " << xyz[1] << ", " << xyz[2] << std::endl; - //CellID myCell = mpiGrid.get_existing_cell(xyz); - //std::cout << "Got cell ID " << myCell << std::endl; + CellID myCell = mpiGrid.get_existing_cell(xyz); + if (mpiGrid.is_local(myCell)) { + std::cout << "Rank " << myRank << " is refining cell " << myCell << std::endl; + } //refineSuccess.push_back(mpiGrid.refine_completely(myCell)); refineSuccess.push_back(mpiGrid.refine_completely_at(xyz)); } - } + } std::vector refinedCells = mpiGrid.stop_refining(true); - cout << "Finished first level of refinement" << endl; - cout << "Refined Cells are: "; - for (auto cellid : refinedCells) { - cout << cellid << " "; + if(myRank == MASTER_RANK) std::cout << "Finished first level of refinement" << endl; + if(refinedCells.size() > 0) { + std::cout << "Refined cells produced by rank " << myRank << " are: "; + for (auto cellid : refinedCells) { + std::cout << cellid << " "; + } + std::cout << endl; } - cout << endl; - - // auto xyz = xyz_mid; - // xyz[0] = 1.4 * xyz[0]; - // xyz[1] = 1.4 * xyz[1]; - // std::cout << "Trying to refine at " << xyz[0] << ", " << xyz[1] << ", " << xyz[2] << std::endl; - // CellID myCell = mpiGrid.get_existing_cell(xyz); - // std::cout << "Got cell ID " << myCell << std::endl; - // int refLvl = mpiGrid.get_refinement_level(myCell); - // std::cout << "Refinement level is " << refLvl << std::endl; - // //mpiGrid.refine_completely_at(xyz); - // mpiGrid.refine_completely(myCell); - // refinedCells.clear(); - // refinedCells = mpiGrid.stop_refining(true); - // cout << "Finished second level of refinement" << endl; - // cout << "Refined Cells are: "; - // for (auto cellid : refinedCells) { - // cout << cellid << " "; - // } - // cout << endl; - - // mpiGrid.refine_completely_at(xyz_mid); - // mpiGrid.stop_refining(); - // mpiGrid.refine_completely_at(xyz_mid); - // mpiGrid.stop_refining(); - // mpiGrid.unrefine_completely_at(xyz_mid); - // mpiGrid.stop_refining(); - + mpiGrid.balance_load(); - //cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; - - // return std::all_of(refineSuccess.begin(), refineSuccess.end(), [](bool v) { return v; }); return true; } diff --git a/projects/testAmr/testAmr.h b/projects/testAmr/testAmr.h index 7ebaf59d8..fe5db8ca2 100644 --- a/projects/testAmr/testAmr.h +++ b/projects/testAmr/testAmr.h @@ -94,6 +94,7 @@ namespace projects { Real magYPertAbsAmp; Real magZPertAbsAmp; Real lambda; + int maxSpatialRefinementLevel; uint nVelocitySamples; std::vector speciesParams; From 96890e39bc5f1264c4c8a6b94179fc7fa1590b45 Mon Sep 17 00:00:00 2001 From: Tuomas Koskela Date: Wed, 9 Jan 2019 15:16:05 +0200 Subject: [PATCH 173/602] Added check for null pointers from dccrg --- vlasovsolver/cpu_trans_map_amr.cpp | 24 +++++++++++++----------- 1 file changed, 13 insertions(+), 11 deletions(-) diff --git a/vlasovsolver/cpu_trans_map_amr.cpp b/vlasovsolver/cpu_trans_map_amr.cpp index 62f6f693f..a11bfaf6c 100644 --- a/vlasovsolver/cpu_trans_map_amr.cpp +++ b/vlasovsolver/cpu_trans_map_amr.cpp @@ -1327,23 +1327,25 @@ void update_remote_mapping_contribution( if(direction > 0) direction = 1; if(direction < 0) direction = -1; - for (auto c : remote_cells) { + SpatialCell *ccell = mpiGrid[c]; + // Initialize number of blocks to 0 and block data to a default value // We need the default for 1 to 1 communications - for (uint i = 0; i < MAX_FACE_NEIGHBORS_PER_DIM; ++i) { - //ccell->neighbor_block_data[i] = ccell->get_data(popID); - //ccell->neighbor_number_of_blocks[i] = 0; - - - ccell->neighbor_number_of_blocks[i] = ccell->get_number_of_velocity_blocks(popID); - ccell->neighbor_block_data[i] = + if(ccell) { + for (uint i = 0; i < MAX_FACE_NEIGHBORS_PER_DIM; ++i) { + //ccell->neighbor_block_data[i] = ccell->get_data(popID); + //ccell->neighbor_number_of_blocks[i] = 0; + + ccell->neighbor_number_of_blocks[i] = ccell->get_number_of_velocity_blocks(popID); + ccell->neighbor_block_data[i] = (Realf*) aligned_malloc(ccell->neighbor_number_of_blocks[i] * WID3 * sizeof(Realf), 64); - for (uint j = 0; j < ccell->neighbor_number_of_blocks[i] * WID3; ++j) { + for (uint j = 0; j < ccell->neighbor_number_of_blocks[i] * WID3; ++j) { ccell->neighbor_block_data[i][j] = 0.0; - } - } + } + } + } } set allNeighbors; From c19f3a6e8ecb7612b227597c8658a7fce2719761 Mon Sep 17 00:00:00 2001 From: Tuomas Koskela Date: Wed, 9 Jan 2019 15:16:44 +0200 Subject: [PATCH 174/602] Changed data type to 64 bit int --- spatial_cell.cpp | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/spatial_cell.cpp b/spatial_cell.cpp index 683507a69..134f4c7a8 100644 --- a/spatial_cell.cpp +++ b/spatial_cell.cpp @@ -1411,8 +1411,10 @@ namespace spatial_cell { * @return True on success.*/ bool SpatialCell::shrink_to_fit() { bool success = true; + return success; + for (size_t p=0; p Date: Wed, 9 Jan 2019 15:17:59 +0200 Subject: [PATCH 175/602] Fixed indentation --- vlasovsolver/cpu_trans_map_amr.cpp | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/vlasovsolver/cpu_trans_map_amr.cpp b/vlasovsolver/cpu_trans_map_amr.cpp index a11bfaf6c..62e2f6b03 100644 --- a/vlasovsolver/cpu_trans_map_amr.cpp +++ b/vlasovsolver/cpu_trans_map_amr.cpp @@ -1334,24 +1334,24 @@ void update_remote_mapping_contribution( // Initialize number of blocks to 0 and block data to a default value // We need the default for 1 to 1 communications if(ccell) { - for (uint i = 0; i < MAX_FACE_NEIGHBORS_PER_DIM; ++i) { - //ccell->neighbor_block_data[i] = ccell->get_data(popID); - //ccell->neighbor_number_of_blocks[i] = 0; - - ccell->neighbor_number_of_blocks[i] = ccell->get_number_of_velocity_blocks(popID); - ccell->neighbor_block_data[i] = - (Realf*) aligned_malloc(ccell->neighbor_number_of_blocks[i] * WID3 * sizeof(Realf), 64); - for (uint j = 0; j < ccell->neighbor_number_of_blocks[i] * WID3; ++j) { - ccell->neighbor_block_data[i][j] = 0.0; - } - } + for (uint i = 0; i < MAX_FACE_NEIGHBORS_PER_DIM; ++i) { + //ccell->neighbor_block_data[i] = ccell->get_data(popID); + //ccell->neighbor_number_of_blocks[i] = 0; + + ccell->neighbor_number_of_blocks[i] = ccell->get_number_of_velocity_blocks(popID); + ccell->neighbor_block_data[i] = + (Realf*) aligned_malloc(ccell->neighbor_number_of_blocks[i] * WID3 * sizeof(Realf), 64); + for (uint j = 0; j < ccell->neighbor_number_of_blocks[i] * WID3; ++j) { + ccell->neighbor_block_data[i][j] = 0.0; + } + } } } set allNeighbors; for (auto c : local_cells) { - + SpatialCell *ccell = mpiGrid[c]; int neighborhood = getNeighborhood(dimension,1); From 24c87f8017e60fcc465f82ae3de38f218e193192 Mon Sep 17 00:00:00 2001 From: Tuomas Koskela Date: Fri, 11 Jan 2019 15:49:09 +0200 Subject: [PATCH 176/602] Ilja's patch to dccrg calls in grid.cpp to avoid errors from null pointers. --- grid.cpp | 31 +++++++++++++++++++++---------- 1 file changed, 21 insertions(+), 10 deletions(-) diff --git a/grid.cpp b/grid.cpp index ae9d236f2..9ce59ca80 100644 --- a/grid.cpp +++ b/grid.cpp @@ -656,17 +656,28 @@ void updateRemoteVelocityBlockLists(dccrg::Dccrg incoming_cells = mpiGrid.get_remote_cells_on_process_boundary(DIST_FUNC_NEIGHBORHOOD_ID); #pragma omp parallel for + for (unsigned int i=0; iprepare_to_receive_blocks(popID); - } + uint64_t cell_id = incoming_cells[i]; + SpatialCell* cell = mpiGrid[cell_id]; + if (cell == NULL) { + for (const auto& cell: mpiGrid.local_cells) { + if (cell.id == cell_id) { + cerr << __FILE__ << ":" << __LINE__ << std::endl; + abort(); + } + for (const auto& neighbor: cell.neighbors_of) { + if (neighbor.id == cell_id) { + cerr << __FILE__ << ":" << __LINE__ << std::endl; + abort(); + } + } + } + continue; + } + cell->prepare_to_receive_blocks(popID); + } + phiprof::stop("Preparing receives", incoming_cells.size(), "SpatialCells"); } From d8057272d472bc44c3eedae5f94ed6a24071fe8b Mon Sep 17 00:00:00 2001 From: Tuomas Koskela Date: Fri, 11 Jan 2019 15:53:46 +0200 Subject: [PATCH 177/602] Print out the LB after refinement call. Commented out. --- projects/testAmr/testAmr.cpp | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/projects/testAmr/testAmr.cpp b/projects/testAmr/testAmr.cpp index 7ebe5c3e3..bbcbb3fcc 100644 --- a/projects/testAmr/testAmr.cpp +++ b/projects/testAmr/testAmr.cpp @@ -299,6 +299,17 @@ namespace projects { mpiGrid.balance_load(); +// auto cells = mpiGrid.get_cells(); +// if(cells.empty()) { +// std::cout << "Rank " << myRank << " has no cells!" << std::endl; +// } else { +// std::cout << "Cells on rank " << myRank << ": "; +// for (auto c : cells) { +// std::cout << c << " "; +// } +// std::cout << std::endl; +// } + return true; } From 71e53278fb993743f5f4ac7d34bebb5008f57290 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Fri, 11 Jan 2019 17:10:58 +0200 Subject: [PATCH 178/602] Changed - to + in refine function --- projects/testAmr/testAmr.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/projects/testAmr/testAmr.cpp b/projects/testAmr/testAmr.cpp index bbcbb3fcc..ed14852f2 100644 --- a/projects/testAmr/testAmr.cpp +++ b/projects/testAmr/testAmr.cpp @@ -265,9 +265,9 @@ namespace projects { if(myRank == MASTER_RANK) std::cout << "Maximum refinement level is " << mpiGrid.mapping.get_maximum_refinement_level() << std::endl; std::array xyz_mid; - xyz_mid[0] = (P::xmax - P::xmin) / 2.0; - xyz_mid[1] = (P::ymax - P::ymin) / 2.0; - xyz_mid[2] = (P::zmax - P::zmin) / 2.0; + xyz_mid[0] = (P::xmax + P::xmin) / 2.0; + xyz_mid[1] = (P::ymax + P::ymin) / 2.0; + xyz_mid[2] = (P::zmax + P::zmin) / 2.0; std::vector refineSuccess; From b92a2b5bfc1f760860f1141d2749d252b5e78d45 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Fri, 11 Jan 2019 17:12:02 +0200 Subject: [PATCH 179/602] Added refine function to test_fp project for fieldsolver debugging. --- projects/test_fp/test_fp.cfg | 7 ++-- projects/test_fp/test_fp.cpp | 63 ++++++++++++++++++++++++++++++++++++ projects/test_fp/test_fp.h | 1 + 3 files changed, 69 insertions(+), 2 deletions(-) diff --git a/projects/test_fp/test_fp.cfg b/projects/test_fp/test_fp.cfg index 8e45f666a..7d0ed5b06 100644 --- a/projects/test_fp/test_fp.cfg +++ b/projects/test_fp/test_fp.cfg @@ -8,6 +8,9 @@ propagate_vlasov_translation = 0 project = test_fp dynamic_timestep = 1 +[AMR] +max_spatial_level = 0 + [io] diagnostic_write_interval = 1 write_initial_state = 1 @@ -62,9 +65,9 @@ output = B output = BackgroundB output = PerturbedB output = RhoV -output = Blocks +output = populations_Blocks diagnostic = FluxB -diagnostic = Blocks +diagnostic = populations_Blocks [sparse] minValue = 1e-15 diff --git a/projects/test_fp/test_fp.cpp b/projects/test_fp/test_fp.cpp index 3f3402ca5..6008d69e1 100644 --- a/projects/test_fp/test_fp.cpp +++ b/projects/test_fp/test_fp.cpp @@ -250,4 +250,67 @@ namespace projects { return this->getV0(x,y,z,dx,dy,dz,popID); } + + bool test_fp::refineSpatialCells( dccrg::Dccrg& mpiGrid ) const { + + int myRank; + MPI_Comm_rank(MPI_COMM_WORLD,&myRank); + + // mpiGrid.set_maximum_refinement_level(std::min(this->maxSpatialRefinementLevel, mpiGrid.mapping.get_maximum_refinement_level())); + + // cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; + if(myRank == MASTER_RANK) std::cout << "Maximum refinement level is " << mpiGrid.mapping.get_maximum_refinement_level() << std::endl; + + std::array xyz_mid; + xyz_mid[0] = (P::xmax + P::xmin) / 2.0; + xyz_mid[1] = (P::ymax + P::ymin) / 2.0; + xyz_mid[2] = (P::zmax + P::zmin) / 2.0; + + std::vector refineSuccess; + + int boxHalfWidth = 1; + + for (double x = xyz_mid[0] - boxHalfWidth * P::dx_ini; x <= xyz_mid[0] + boxHalfWidth * P::dx_ini; x += P::dx_ini) { + for (double y = xyz_mid[1] - boxHalfWidth * P::dy_ini; y <= xyz_mid[1] + boxHalfWidth * P::dy_ini; y += P::dy_ini) { + for (double z = xyz_mid[2] - boxHalfWidth * P::dz_ini; z <= xyz_mid[2] + boxHalfWidth * P::dz_ini; z += P::dz_ini) { + auto xyz = xyz_mid; + xyz[0] = x; + xyz[1] = y; + xyz[2] = z; + std::cout << "Trying to refine at " << xyz[0] << ", " << xyz[1] << ", " << xyz[2] << std::endl; + CellID myCell = mpiGrid.get_existing_cell(xyz); + if (mpiGrid.is_local(myCell)) { + std::cout << "Rank " << myRank << " is refining cell " << myCell << std::endl; + } + //refineSuccess.push_back(mpiGrid.refine_completely(myCell)); + refineSuccess.push_back(mpiGrid.refine_completely_at(xyz)); + } + } + } + std::vector refinedCells = mpiGrid.stop_refining(true); + if(myRank == MASTER_RANK) std::cout << "Finished first level of refinement" << endl; + if(refinedCells.size() > 0) { + std::cout << "Refined cells produced by rank " << myRank << " are: "; + for (auto cellid : refinedCells) { + std::cout << cellid << " "; + } + std::cout << endl; + } + + mpiGrid.balance_load(); + +// auto cells = mpiGrid.get_cells(); +// if(cells.empty()) { +// std::cout << "Rank " << myRank << " has no cells!" << std::endl; +// } else { +// std::cout << "Cells on rank " << myRank << ": "; +// for (auto c : cells) { +// std::cout << c << " "; +// } +// std::cout << std::endl; +// } + + return true; + } + }// namespace projects diff --git a/projects/test_fp/test_fp.h b/projects/test_fp/test_fp.h index d715f90d6..8388ab6ee 100644 --- a/projects/test_fp/test_fp.h +++ b/projects/test_fp/test_fp.h @@ -42,6 +42,7 @@ namespace projects { protected: Real sign(creal value) const; Real getDistribValue(creal& vx, creal& vy, creal& vz); + bool refineSpatialCells( dccrg::Dccrg& mpiGrid ) const; virtual void calcCellParameters(spatial_cell::SpatialCell* cell,creal& t); virtual Real calcPhaseSpaceDensity( creal& x, creal& y, creal& z, From db34b808a61d31738fdbcc16b2807b5795a716d6 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Fri, 11 Jan 2019 17:12:29 +0200 Subject: [PATCH 180/602] Reading maxSpatialRefLvl from config file into parameters. --- parameters.cpp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/parameters.cpp b/parameters.cpp index e9eb53095..8c0e7accd 100644 --- a/parameters.cpp +++ b/parameters.cpp @@ -224,6 +224,7 @@ bool Parameters::addParameters(){ Readparameters::add("AMR.max_velocity_level","Maximum velocity mesh refinement level",(uint)0); Readparameters::add("AMR.refine_limit","If the refinement criterion function returns a larger value than this, block is refined",(Realf)1.0); Readparameters::add("AMR.coarsen_limit","If the refinement criterion function returns a smaller value than this, block can be coarsened",(Realf)0.5); + Readparameters::add("AMR.max_spatial_level","Maximum spatial mesh refinement level",(uint)1); return true; } @@ -377,6 +378,7 @@ bool Parameters::getParameters(){ P::zmax = 1; } Readparameters::get("AMR.max_velocity_level",P::amrMaxVelocityRefLevel); + Readparameters::get("AMR.max_spatial_level",P::amrMaxSpatialRefLevel); Readparameters::get("AMR.vel_refinement_criterion",P::amrVelRefCriterion); Readparameters::get("AMR.refine_limit",P::amrRefineLimit); Readparameters::get("AMR.coarsen_limit",P::amrCoarsenLimit); From 872481cd20ad739d93dfebb556a99217e816522a Mon Sep 17 00:00:00 2001 From: tkoskela Date: Tue, 15 Jan 2019 11:55:36 +0200 Subject: [PATCH 181/602] Added initialization to getFieldDataFromFsGrid --- fieldsolver/gridGlue.hpp | 52 +++++++--------------------------------- 1 file changed, 8 insertions(+), 44 deletions(-) diff --git a/fieldsolver/gridGlue.hpp b/fieldsolver/gridGlue.hpp index 373ca4194..a3c41a35c 100644 --- a/fieldsolver/gridGlue.hpp +++ b/fieldsolver/gridGlue.hpp @@ -89,35 +89,16 @@ int getNumberOfCellsOnMaxRefLvl(dccrg::Dccrg void feedFieldDataIntoFsGrid( -// dccrg::Dccrg& mpiGrid, -// const std::vector& cells, int index, -// FsGrid< std::array, 2>& targetGrid) { - -// targetGrid.setupForTransferIn(cells.size()); - -// for(CellID i : cells) { -// // TODO: This assumes that the field data are lying continuous in memory. -// // Check definition of CellParams in common.h if unsure. -// std::array* cellDataPointer = reinterpret_cast*>( -// &(mpiGrid[i]->get_cell_parameters()[index])); -// targetGrid.transferDataIn(i - 1, cellDataPointer); -// } - -// targetGrid.finishTransfersIn(); -// } - template< unsigned int numFields > void feedFieldDataIntoFsGrid( dccrg::Dccrg& mpiGrid, const std::vector& cells, int cellParamsIndex, FsGrid< std::array, 2>& targetGrid) { - + int nCells = getNumberOfCellsOnMaxRefLvl(mpiGrid, cells); targetGrid.setupForTransferIn(nCells); int myRank; MPI_Comm_rank(MPI_COMM_WORLD,&myRank); - //std::cout << "Process rank " << myRank << " send tags: "; for(CellID dccrgId : cells) { const auto fsgridIds = mapDccrgIdToFsGridGlobalID(mpiGrid, dccrgId); @@ -130,9 +111,6 @@ template< unsigned int numFields > void feedFieldDataIntoFsGrid( } } - //std::cout << std::endl; - //std::cout << std::endl; - targetGrid.finishTransfersIn(); } @@ -148,28 +126,11 @@ template< unsigned int numFields > void feedFieldDataIntoFsGrid( * * This function assumes that proper grid coupling has been set up. */ -// template< unsigned int numFields > void getFieldDataFromFsGrid( -// FsGrid< std::array, 2>& sourceGrid, -// dccrg::Dccrg& mpiGrid, -// const std::vector& cells, int index) { - -// sourceGrid.setupForTransferOut(cells.size()); - -// for(CellID i : cells) { -// // TODO: This assumes that the field data are lying continuous in memory. -// // Check definition of CellParams in common.h if unsure. -// std::array* cellDataPointer = reinterpret_cast*>( -// &(mpiGrid[i]->get_cell_parameters()[index])); -// sourceGrid.transferDataOut(i - 1, cellDataPointer); -// } - -// sourceGrid.finishTransfersOut(); -// } template< unsigned int numFields > void getFieldDataFromFsGrid( FsGrid< std::array, 2>& sourceGrid, dccrg::Dccrg& mpiGrid, const std::vector& cells, int index) { - + int nCells = getNumberOfCellsOnMaxRefLvl(mpiGrid, cells); std::vector< std::array > transferBuffer(nCells); std::vector< std::array*> transferBufferPointer; @@ -200,6 +161,11 @@ template< unsigned int numFields > void getFieldDataFromFsGrid( CellID dccrgId = cells[i]; + // Set cell data to 0 + for (int iField = 0; iField < numFields; ++iField) { + mpiGrid[dccrgId]->get_cell_parameters()[index+iField] = 0.0; + } + // Calculate the number of fsgrid cells we need to average into the current dccrg cell auto refLvl = mpiGrid.mapping.get_refinement_level(dccrgId); int nCells = pow(pow(2,mpiGrid.mapping.get_maximum_refinement_level() - mpiGrid.mapping.get_refinement_level(dccrgId)),3); @@ -211,13 +177,11 @@ template< unsigned int numFields > void getFieldDataFromFsGrid( for (int iField = 0; iField < numFields; ++iField) { mpiGrid[dccrgId]->get_cell_parameters()[index+iField] += cellDataPointer->at(iField); } - } for (int iField = 0; iField < numFields; ++iField) { mpiGrid[dccrgId]->get_cell_parameters()[index+iField] /= nCells; - } - + } } } From 28f357906365f704b219aa0d258555642da19b7a Mon Sep 17 00:00:00 2001 From: tkoskela Date: Tue, 15 Jan 2019 15:13:52 +0200 Subject: [PATCH 182/602] Clarified messages from refine function --- projects/testAmr/testAmr.cpp | 9 ++--- projects/test_fp/test_fp.cpp | 73 ++++++++++++++++++++---------------- 2 files changed, 43 insertions(+), 39 deletions(-) diff --git a/projects/testAmr/testAmr.cpp b/projects/testAmr/testAmr.cpp index ed14852f2..39e5a4e07 100644 --- a/projects/testAmr/testAmr.cpp +++ b/projects/testAmr/testAmr.cpp @@ -269,8 +269,6 @@ namespace projects { xyz_mid[1] = (P::ymax + P::ymin) / 2.0; xyz_mid[2] = (P::zmax + P::zmin) / 2.0; - std::vector refineSuccess; - int boxHalfWidth = 1; for (double x = xyz_mid[0] - boxHalfWidth * P::dx_ini; x <= xyz_mid[0] + boxHalfWidth * P::dx_ini; x += P::dx_ini) { @@ -279,14 +277,13 @@ namespace projects { xyz[0] = x; xyz[1] = y; //std::cout << "Trying to refine at " << xyz[0] << ", " << xyz[1] << ", " << xyz[2] << std::endl; - CellID myCell = mpiGrid.get_existing_cell(xyz); - if (mpiGrid.is_local(myCell)) { + CellID myCell = mpiGrid.get_existing_cell(xyz); + if (mpiGrid.refine_completely_at(xyz)) { std::cout << "Rank " << myRank << " is refining cell " << myCell << std::endl; } - //refineSuccess.push_back(mpiGrid.refine_completely(myCell)); - refineSuccess.push_back(mpiGrid.refine_completely_at(xyz)); } } + std::vector refinedCells = mpiGrid.stop_refining(true); if(myRank == MASTER_RANK) std::cout << "Finished first level of refinement" << endl; if(refinedCells.size() > 0) { diff --git a/projects/test_fp/test_fp.cpp b/projects/test_fp/test_fp.cpp index 6008d69e1..a00aa2567 100644 --- a/projects/test_fp/test_fp.cpp +++ b/projects/test_fp/test_fp.cpp @@ -119,43 +119,53 @@ namespace projects { cellParams[CellParams::PERBZ ] = 0.0; typedef Parameters P; - creal dx = cellParams[CellParams::DX]; - creal x = cellParams[CellParams::XCRD] + 0.5 * dx; + creal dx = P::dx_ini * 3.5; + creal dy = P::dy_ini * 3.5; + creal dz = P::dz_ini * 3.5; + creal x = cellParams[CellParams::XCRD] + 0.5 * cellParams[CellParams::DX]; creal y = cellParams[CellParams::YCRD] + 0.5 * cellParams[CellParams::DY]; creal z = cellParams[CellParams::ZCRD] + 0.5 * cellParams[CellParams::DZ]; - + + Real areaFactor = 1.0; + switch (this->CASE) { - case BXCASE: - cellParams[CellParams::PERBX] = 0.1 * this->B0; - if (y >= -3.5 * dx && y <= 3.5 * dx) - if (z >= -3.5 * dx && z <= 3.5 * dx) - cellParams[CellParams::PERBX] = this->B0; + case BXCASE: + cellParams[CellParams::PERBX] = 0.1 * this->B0 * areaFactor; + //areaFactor = (CellParams::DY * CellParams::DZ) / (dy * dz); + if (y >= -dy && y <= dy) + if (z >= -dz && z <= dz) + cellParams[CellParams::PERBX] = this->B0 * areaFactor; break; case BYCASE: - cellParams[CellParams::PERBY] = 0.1 * this->B0; - if (x >= -3.5 * dx && x <= 3.5 * dx) - if (z >= -3.5 * dx && z <= 3.5 * dx) - cellParams[CellParams::PERBY] = this->B0; + cellParams[CellParams::PERBY] = 0.1 * this->B0 * areaFactor; + //areaFactor = (CellParams::DX * CellParams::DZ) / (dx * dz); + if (x >= -dx && x <= dx) + if (z >= -dz && z <= dz) + cellParams[CellParams::PERBY] = this->B0 * areaFactor; break; case BZCASE: - cellParams[CellParams::PERBZ] = 0.1 * this->B0; - if (x >= -3.5 * dx && x <= 3.5 * dx) - if (y >= -3.5 * dx && y <= 3.5 * dx) - cellParams[CellParams::PERBZ] = this->B0; + cellParams[CellParams::PERBZ] = 0.1 * this->B0 * areaFactor; + //areaFactor = (CellParams::DX * CellParams::DY) / (dx * dy); + if (x >= -dx && x <= dx) + if (y >= -dy && y <= dy) + cellParams[CellParams::PERBZ] = this->B0 * areaFactor; break; - case BALLCASE: - cellParams[CellParams::PERBX] = 0.1 * this->B0; - cellParams[CellParams::PERBY] = 0.1 * this->B0; - cellParams[CellParams::PERBZ] = 0.1 * this->B0; - if (y >= -3.5 * dx && y <= 3.5 * dx) - if (z >= -3.5 * dx && z <= 3.5 * dx) - cellParams[CellParams::PERBX] = this->B0; - if (x >= -3.5 * dx && x <= 3.5 * dx) - if (z >= -3.5 * dx && z <= 3.5 * dx) - cellParams[CellParams::PERBY] = this->B0; - if (x >= -3.5 * dx && x <= 3.5 * dx) - if (y >= -3.5 * dx && y <= 3.5 * dx) - cellParams[CellParams::PERBZ] = this->B0; + case BALLCASE: + cellParams[CellParams::PERBX] = 0.1 * this->B0 * areaFactor; + cellParams[CellParams::PERBY] = 0.1 * this->B0 * areaFactor; + cellParams[CellParams::PERBZ] = 0.1 * this->B0 * areaFactor; + + //areaFactor = (CellParams::DX * CellParams::DY) / (dx * dy); + + if (y >= -dy && y <= dy) + if (z >= -dz && z <= dz) + cellParams[CellParams::PERBX] = this->B0 * areaFactor; + if (x >= -dx && x <= dx) + if (z >= -dz && z <= dz) + cellParams[CellParams::PERBY] = this->B0 * areaFactor; + if (x >= -dx && x <= dx) + if (y >= -dy && y <= dy) + cellParams[CellParams::PERBZ] = this->B0 * areaFactor; break; } } @@ -277,13 +287,10 @@ namespace projects { xyz[0] = x; xyz[1] = y; xyz[2] = z; - std::cout << "Trying to refine at " << xyz[0] << ", " << xyz[1] << ", " << xyz[2] << std::endl; CellID myCell = mpiGrid.get_existing_cell(xyz); - if (mpiGrid.is_local(myCell)) { + if (mpiGrid.refine_completely_at(xyz)) { std::cout << "Rank " << myRank << " is refining cell " << myCell << std::endl; } - //refineSuccess.push_back(mpiGrid.refine_completely(myCell)); - refineSuccess.push_back(mpiGrid.refine_completely_at(xyz)); } } } From c65662ffbfddd5bdf5f575ccf223f6e5a864c097 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Wed, 16 Jan 2019 11:18:09 +0200 Subject: [PATCH 183/602] Added refine call to Magnetosphere project --- projects/Magnetosphere/Magnetosphere.cpp | 48 ++++++++++++++++++++++++ projects/Magnetosphere/Magnetosphere.h | 1 + 2 files changed, 49 insertions(+) diff --git a/projects/Magnetosphere/Magnetosphere.cpp b/projects/Magnetosphere/Magnetosphere.cpp index a19a983f5..a0a6c4165 100644 --- a/projects/Magnetosphere/Magnetosphere.cpp +++ b/projects/Magnetosphere/Magnetosphere.cpp @@ -449,6 +449,54 @@ namespace projects { centerPoints.push_back(V0); return centerPoints; } + + bool Magnetosphere::refineSpatialCells( dccrg::Dccrg& mpiGrid ) const { + + int myRank; + MPI_Comm_rank(MPI_COMM_WORLD,&myRank); + + // mpiGrid.set_maximum_refinement_level(std::min(this->maxSpatialRefinementLevel, mpiGrid.mapping.get_maximum_refinement_level())); + + // cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; + if(myRank == MASTER_RANK) std::cout << "Maximum refinement level is " << mpiGrid.mapping.get_maximum_refinement_level() << std::endl; + + std::array xyz_mid; + xyz_mid[0] = (P::xmax + P::xmin) / 2.0; + xyz_mid[1] = (P::ymax + P::ymin) / 2.0; + xyz_mid[2] = (P::zmax + P::zmin) / 2.0; + + std::vector refineSuccess; + + int boxHalfWidth = 5; + + for (double x = xyz_mid[0] - boxHalfWidth * P::dx_ini; x <= xyz_mid[0] + boxHalfWidth * P::dx_ini; x += P::dx_ini) { + for (double y = xyz_mid[1] - boxHalfWidth * P::dy_ini; y <= xyz_mid[1] + boxHalfWidth * P::dy_ini; y += P::dy_ini) { + for (double z = xyz_mid[2] - boxHalfWidth * P::dz_ini; z <= xyz_mid[2] + boxHalfWidth * P::dz_ini; z += P::dz_ini) { + auto xyz = xyz_mid; + xyz[0] = x; + xyz[1] = y; + xyz[2] = z; + CellID myCell = mpiGrid.get_existing_cell(xyz); + if (mpiGrid.refine_completely_at(xyz)) { + std::cout << "Rank " << myRank << " is refining cell " << myCell << std::endl; + } + } + } + } + std::vector refinedCells = mpiGrid.stop_refining(true); + if(myRank == MASTER_RANK) std::cout << "Finished first level of refinement" << endl; + if(refinedCells.size() > 0) { + std::cout << "Refined cells produced by rank " << myRank << " are: "; + for (auto cellid : refinedCells) { + std::cout << cellid << " "; + } + std::cout << endl; + } + + mpiGrid.balance_load(); + + return true; + } } // namespace projects diff --git a/projects/Magnetosphere/Magnetosphere.h b/projects/Magnetosphere/Magnetosphere.h index c5cd5d322..3259e5d64 100644 --- a/projects/Magnetosphere/Magnetosphere.h +++ b/projects/Magnetosphere/Magnetosphere.h @@ -63,6 +63,7 @@ namespace projects { creal& dvx, creal& dvy, creal& dvz, const uint popID ) const; + bool refineSpatialCells( dccrg::Dccrg& mpiGrid ) const; virtual void calcCellParameters(spatial_cell::SpatialCell* cell,creal& t); virtual std::vector > getV0( creal x, From 9ab7bd174d63ef55c89df9b95ad32c25bac26f84 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Fri, 18 Jan 2019 14:38:17 +0200 Subject: [PATCH 184/602] When mpiGrid is refined, the fsgrid boundary layer has a width greater than 2. In this case, the boundary cells that do not find a non-boundary neighbor just keep their original value, we don't care what happens in them since they have no effect on the Vlasov solver. --- sysboundary/sysboundarycondition.cpp | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/sysboundary/sysboundarycondition.cpp b/sysboundary/sysboundarycondition.cpp index 18cbafd09..714a7e5d4 100644 --- a/sysboundary/sysboundarycondition.cpp +++ b/sysboundary/sysboundarycondition.cpp @@ -800,6 +800,17 @@ namespace SBC { #ifndef NDEBUG const std::array gid = technicalGrid.getGlobalIndices(i, j, k); const std::array ngid = technicalGrid.getGlobalIndices(closestCell[0], closestCell[1], closestCell[2]); + + if (closestCell[0] == std::numeric_limits::min()) { + //cerr << "(" << gid[0] << "," << gid[1] << "," << gid[2] << ")" << __FILE__ << ":" << __LINE__ << ": No closest cell found!" << endl; + //abort(); + + // When mpiGrid is refined, the fsgrid boundary layer has a width greater than 2. In this case, + // the boundary cells that do not find a non-boundary neighbor just keep their original value, + // we don't care what happens in them since they have no effect on the Vlasov solver. + return perBGrid.get(i,j,k)->at(fsgrids::bfield::PERBX+component); + } + if ( technicalGrid.get(closestCell[0], closestCell[1], closestCell[2]) == nullptr ) { stringstream ss; ss << "ERROR, cell (" << gid[0] << "," << gid[1] << "," << gid[2] << ") tries to access invalid sysboundary nbr (" << ngid[0] << "," << ngid[1] << "," << ngid[2] << ") in " << __FILE__ << ":" << __LINE__ << endl; @@ -814,10 +825,6 @@ namespace SBC { exit(1); } - if (closestCell[0] == std::numeric_limits::min()) { - cerr << "(" << gid[0] << "," << gid[1] << "," << gid[2] << ")" << __FILE__ << ":" << __LINE__ << ": No closest cell found!" << endl; - abort(); - } #endif return perBGrid.get(closestCell[0], closestCell[1], closestCell[2])->at(fsgrids::bfield::PERBX+component); From f88e0e2a6692ef0b90f81749cef0f2ee8b4450d9 Mon Sep 17 00:00:00 2001 From: Tuomas Koskela Date: Fri, 18 Jan 2019 14:55:38 +0200 Subject: [PATCH 185/602] Removed debug arguments from fsgrid calls --- vlasiator.cpp | 137 ++++++++++++++++++++------------------------------ 1 file changed, 55 insertions(+), 82 deletions(-) diff --git a/vlasiator.cpp b/vlasiator.cpp index d0e11b993..6933607df 100644 --- a/vlasiator.cpp +++ b/vlasiator.cpp @@ -440,37 +440,20 @@ int main(int argn,char* args[]) { phiprof::stop("Init fieldsolver grids"); phiprof::start("Initial fsgrid coupling"); const std::vector& cells = getLocalCells(); - - // for (auto cell: cells) { - // creal dx = mpiGrid[cell]->parameters[CellParams::DX]; - // creal dy = mpiGrid[cell]->parameters[CellParams::DY]; - // creal dz = mpiGrid[cell]->parameters[CellParams::DZ]; - - // std::cout << "cell " << cell << " dx,dy,dz = " << dx << ", " << dy << ", " << dz << std::endl; - // } - - // cout << "Local cells are: "; - // for(auto id : cells) cout << id << " "; - // cout << endl; - - // Couple FSGrids to mpiGrid - // TODO: Do we really need to couple *all* of these fields? - - bool debugFsgrid = false; - perBGrid. setupForGridCoupling(debugFsgrid); - perBDt2Grid. setupForGridCoupling(debugFsgrid); - EGrid. setupForGridCoupling(debugFsgrid); - EDt2Grid. setupForGridCoupling(debugFsgrid); - EHallGrid. setupForGridCoupling(debugFsgrid); - EGradPeGrid. setupForGridCoupling(debugFsgrid); - momentsGrid. setupForGridCoupling(debugFsgrid); - momentsDt2Grid.setupForGridCoupling(debugFsgrid); - dPerBGrid. setupForGridCoupling(debugFsgrid); - dMomentsGrid. setupForGridCoupling(debugFsgrid); - BgBGrid. setupForGridCoupling(debugFsgrid); - volGrid. setupForGridCoupling(debugFsgrid); - technicalGrid. setupForGridCoupling(debugFsgrid); + perBGrid. setupForGridCoupling(); + perBDt2Grid. setupForGridCoupling(); + EGrid. setupForGridCoupling(); + EDt2Grid. setupForGridCoupling(); + EHallGrid. setupForGridCoupling(); + EGradPeGrid. setupForGridCoupling(); + momentsGrid. setupForGridCoupling(); + momentsDt2Grid.setupForGridCoupling(); + dPerBGrid. setupForGridCoupling(); + dMomentsGrid. setupForGridCoupling(); + BgBGrid. setupForGridCoupling(); + volGrid. setupForGridCoupling(); + technicalGrid. setupForGridCoupling(); // Each dccrg cell may have to communicate with multiple fsgrid cells, if they are on a lower refinement level. // Calculate the corresponding fsgrid ids for each dccrg cell and set coupling for each fsgrid id. @@ -478,22 +461,20 @@ int main(int argn,char* args[]) { const auto fsgridIds = mapDccrgIdToFsGridGlobalID(mpiGrid, dccrgId); for (auto fsgridId : fsgridIds) { - - debugFsgrid = false; - perBGrid. setGridCoupling(fsgridId, myRank, debugFsgrid); - perBDt2Grid. setGridCoupling(fsgridId, myRank, debugFsgrid); - EGrid. setGridCoupling(fsgridId, myRank, debugFsgrid); - EDt2Grid. setGridCoupling(fsgridId, myRank, debugFsgrid); - EHallGrid. setGridCoupling(fsgridId, myRank, debugFsgrid); - EGradPeGrid. setGridCoupling(fsgridId, myRank, debugFsgrid); - momentsGrid. setGridCoupling(fsgridId, myRank, debugFsgrid); - momentsDt2Grid.setGridCoupling(fsgridId, myRank, debugFsgrid); - dPerBGrid. setGridCoupling(fsgridId, myRank, debugFsgrid); - dMomentsGrid. setGridCoupling(fsgridId, myRank, debugFsgrid); - BgBGrid. setGridCoupling(fsgridId, myRank, debugFsgrid); - volGrid. setGridCoupling(fsgridId, myRank, debugFsgrid); - technicalGrid. setGridCoupling(fsgridId, myRank, debugFsgrid); + perBGrid. setGridCoupling(fsgridId, myRank); + perBDt2Grid. setGridCoupling(fsgridId, myRank); + EGrid. setGridCoupling(fsgridId, myRank); + EDt2Grid. setGridCoupling(fsgridId, myRank); + EHallGrid. setGridCoupling(fsgridId, myRank); + EGradPeGrid. setGridCoupling(fsgridId, myRank); + momentsGrid. setGridCoupling(fsgridId, myRank); + momentsDt2Grid.setGridCoupling(fsgridId, myRank); + dPerBGrid. setGridCoupling(fsgridId, myRank); + dMomentsGrid. setGridCoupling(fsgridId, myRank); + BgBGrid. setGridCoupling(fsgridId, myRank); + volGrid. setGridCoupling(fsgridId, myRank); + technicalGrid. setGridCoupling(fsgridId, myRank); } } @@ -985,54 +966,46 @@ int main(int argn,char* args[]) { const vector& cells = getLocalCells(); - cout << "Reloadbalance: Local cells are: "; - for(auto id : cells) cout << id << " "; - cout << endl; - - debugFsgrid = false; +// cout << "Reloadbalance: Local cells are: "; +// for(auto id : cells) cout << id << " "; +// cout << endl; - perBGrid. setupForGridCoupling(debugFsgrid); - perBDt2Grid. setupForGridCoupling(debugFsgrid); - EGrid. setupForGridCoupling(debugFsgrid); - EDt2Grid. setupForGridCoupling(debugFsgrid); - EHallGrid. setupForGridCoupling(debugFsgrid); - EGradPeGrid. setupForGridCoupling(debugFsgrid); - momentsGrid. setupForGridCoupling(debugFsgrid); - momentsDt2Grid.setupForGridCoupling(debugFsgrid); - dPerBGrid. setupForGridCoupling(debugFsgrid); - dMomentsGrid. setupForGridCoupling(debugFsgrid); - BgBGrid. setupForGridCoupling(debugFsgrid); - volGrid. setupForGridCoupling(debugFsgrid); - technicalGrid. setupForGridCoupling(debugFsgrid); + perBGrid. setupForGridCoupling(); + perBDt2Grid. setupForGridCoupling(); + EGrid. setupForGridCoupling(); + EDt2Grid. setupForGridCoupling(); + EHallGrid. setupForGridCoupling(); + EGradPeGrid. setupForGridCoupling(); + momentsGrid. setupForGridCoupling(); + momentsDt2Grid.setupForGridCoupling(); + dPerBGrid. setupForGridCoupling(); + dMomentsGrid. setupForGridCoupling(); + BgBGrid. setupForGridCoupling(); + volGrid. setupForGridCoupling(); + technicalGrid. setupForGridCoupling(); if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; // Each dccrg cell may have to communicate with multiple fsgrid cells, if they are on a lower refinement level. // Calculate the corresponding fsgrid ids for each dccrg cell and set coupling for each fsgrid id. - // cout << "send tags are: " << endl; for(auto& dccrgId : cells) { const auto fsgridIds = mapDccrgIdToFsGridGlobalID(mpiGrid, dccrgId); - //cout << "Fsgrid ids for cell " << dccrgId << " are: "; for (auto& fsgridId : fsgridIds) { - //cout << fsgridId << " "; - - debugFsgrid = false; - perBGrid. setGridCoupling(fsgridId, myRank, debugFsgrid); - perBDt2Grid. setGridCoupling(fsgridId, myRank, debugFsgrid); - EGrid. setGridCoupling(fsgridId, myRank, debugFsgrid); - EDt2Grid. setGridCoupling(fsgridId, myRank, debugFsgrid); - EHallGrid. setGridCoupling(fsgridId, myRank, debugFsgrid); - EGradPeGrid. setGridCoupling(fsgridId, myRank, debugFsgrid); - momentsGrid. setGridCoupling(fsgridId, myRank, debugFsgrid); - momentsDt2Grid.setGridCoupling(fsgridId, myRank, debugFsgrid); - dPerBGrid. setGridCoupling(fsgridId, myRank, debugFsgrid); - dMomentsGrid. setGridCoupling(fsgridId, myRank, debugFsgrid); - BgBGrid. setGridCoupling(fsgridId, myRank, debugFsgrid); - volGrid. setGridCoupling(fsgridId, myRank, debugFsgrid); - technicalGrid. setGridCoupling(fsgridId, myRank, debugFsgrid); + perBGrid. setGridCoupling(fsgridId, myRank); + perBDt2Grid. setGridCoupling(fsgridId, myRank); + EGrid. setGridCoupling(fsgridId, myRank); + EDt2Grid. setGridCoupling(fsgridId, myRank); + EHallGrid. setGridCoupling(fsgridId, myRank); + EGradPeGrid. setGridCoupling(fsgridId, myRank); + momentsGrid. setGridCoupling(fsgridId, myRank); + momentsDt2Grid.setGridCoupling(fsgridId, myRank); + dPerBGrid. setGridCoupling(fsgridId, myRank); + dMomentsGrid. setGridCoupling(fsgridId, myRank); + BgBGrid. setGridCoupling(fsgridId, myRank); + volGrid. setGridCoupling(fsgridId, myRank); + technicalGrid. setGridCoupling(fsgridId, myRank); } - //cout << endl; } cout << endl; if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; From fbd1f10a3814da4b0d30f1f7b049d4de8126695d Mon Sep 17 00:00:00 2001 From: tkoskela Date: Wed, 23 Jan 2019 12:09:53 +0200 Subject: [PATCH 186/602] Fixed issues with non-periodic boundaries. - Seed ids get assigned when the previous neighbor is in boundary layer 2 with boundaryflag != NON_SYSBOUNDARY or has boundaryflag == DO_NOT_COMPUTE - Pencil building stops on cells with the above conditions. --- vlasovsolver/cpu_trans_map_amr.cpp | 43 ++++++++++++++---------------- 1 file changed, 20 insertions(+), 23 deletions(-) diff --git a/vlasovsolver/cpu_trans_map_amr.cpp b/vlasovsolver/cpu_trans_map_amr.cpp index 62e2f6b03..08a5ee00b 100644 --- a/vlasovsolver/cpu_trans_map_amr.cpp +++ b/vlasovsolver/cpu_trans_map_amr.cpp @@ -204,7 +204,7 @@ void computeSpatialTargetCellsForPencils(const dccrg::Dccrg backNeighborIds; @@ -215,7 +215,7 @@ void computeSpatialTargetCellsForPencils(const dccrg::Dccrg } } + if( myNeighbors.size() == 0 ) { + return neighbor; + } + int neighborIndex = 0; if (myNeighbors.size() > 1) { neighborIndex = path; } + if (grid.is_local(myNeighbors[neighborIndex])) { neighbor = myNeighbors[neighborIndex]; } - - // std::cout << "selectNeighbor: id = " << id << " path = " << path << " neighbors = "; - // for (auto nbr : myNeighbors) std::cout << nbr << " "; - // std::cout << ", returning " << neighbor << std::endl; - return neighbor; } @@ -477,24 +477,19 @@ setOfPencils buildPencilsWithNeighbors( const dccrg::DccrgsysBoundaryFlag == sysboundarytype::DO_NOT_COMPUTE || + ( grid[nextNeighbor]->sysBoundaryLayer == 2 && + grid[nextNeighbor]->sysBoundaryFlag != sysboundarytype::NOT_SYSBOUNDARY ) ) { nextNeighbor = INVALID_CELLID; } else { ids.push_back(nextNeighbor); } - // Check for id in seedIds list - // for (auto endId : endIds) { - // if (nextNeighbor == id) { - // nextNeighbor = INVALID_CELLID; - // } - // } - } id = nextNeighbor; @@ -661,7 +656,7 @@ void getSeedIds(const dccrg::Dccrg& mpiGr auto myIndices = mpiGrid.mapping.get_indices(celli); - bool remoteNeighborExists = false; + bool addToSeedIds = false; // Returns all neighbors as (id, direction-dimension) pair pointers. for ( const auto nbrPair : *(mpiGrid.get_neighbors_of(celli, neighborhood)) ) { @@ -675,15 +670,18 @@ void getSeedIds(const dccrg::Dccrg& mpiGr // cell as a seed for pencils if ( abs ( myIndices[dimension] - nbrIndices[dimension] ) > pow(2,mpiGrid.get_maximum_refinement_level()) || - !mpiGrid.is_local(nbrPair.first)) { + !mpiGrid.is_local(nbrPair.first) || + mpiGrid[nbrPair.first]->sysBoundaryFlag == sysboundarytype::DO_NOT_COMPUTE || + ( mpiGrid[nbrPair.first]->sysBoundaryLayer == 2 && + mpiGrid[nbrPair.first]->sysBoundaryFlag != sysboundarytype::NOT_SYSBOUNDARY ) ) { - remoteNeighborExists = true; + addToSeedIds = true; } } } - if (remoteNeighborExists) { + if (addToSeedIds) { seedIds.push_back(celli); } @@ -878,7 +876,6 @@ void check_ghost_cells(const dccrg::Dccrg bool checkPencils(const std::vector &cells, const setOfPencils& pencils) { - bool correct = true; for (auto id : cells) { @@ -887,7 +884,7 @@ bool checkPencils(const std::vector &cells, const setOfPencils& pencils) if( myCount == 0 || (myCount != 1 && myCount % 4 != 0)) { - std::cerr << "ERROR: Cell ID " << id << " Appears in pencils " << myCount << " times!"; + std::cerr << "ERROR: Cell ID " << id << " Appears in pencils " << myCount << " times!"<< std::endl; correct = false; } @@ -1036,7 +1033,7 @@ bool trans_map_1d_amr(const dccrg::Dccrg& if(printPencils) printPencilsFunc(pencils,dimension,myRank); if(!checkPencils(localPropagatedCells, pencils)) { - throw; + abort(); } // // Remove duplicates From 3df10951978bd5f63023120862e0cb33fddfc51c Mon Sep 17 00:00:00 2001 From: tkoskela Date: Thu, 24 Jan 2019 12:38:16 +0200 Subject: [PATCH 187/602] Removed extra declarations from the header. Adjusted pencil build to not include any boundary cells. This is in line with how the target and source cells are defined. Removed the possibility of writing into boundary cells. --- vlasovsolver/cpu_trans_map_amr.cpp | 84 +++++++++++++++--------------- vlasovsolver/cpu_trans_map_amr.hpp | 12 ----- 2 files changed, 41 insertions(+), 55 deletions(-) diff --git a/vlasovsolver/cpu_trans_map_amr.cpp b/vlasovsolver/cpu_trans_map_amr.cpp index 08a5ee00b..9d7901478 100644 --- a/vlasovsolver/cpu_trans_map_amr.cpp +++ b/vlasovsolver/cpu_trans_map_amr.cpp @@ -483,9 +483,12 @@ setOfPencils buildPencilsWithNeighbors( const dccrg::DccrgsysBoundaryFlag == sysboundarytype::DO_NOT_COMPUTE || - ( grid[nextNeighbor]->sysBoundaryLayer == 2 && - grid[nextNeighbor]->sysBoundaryFlag != sysboundarytype::NOT_SYSBOUNDARY ) ) { + grid[nextNeighbor]->sysBoundaryFlag != sysboundarytype::NOT_SYSBOUNDARY ) { + //!do_translate_cell(grid[nextNeighbor])) { + // grid[nextNeighbor]->sysBoundaryFlag == sysboundarytype::DO_NOT_COMPUTE || + // ( grid[nextNeighbor]->sysBoundaryLayer == 2 && + // grid[nextNeighbor]->sysBoundaryFlag != sysboundarytype::NOT_SYSBOUNDARY ) ) { + nextNeighbor = INVALID_CELLID; } else { ids.push_back(nextNeighbor); @@ -536,7 +539,8 @@ setOfPencils buildPencilsWithNeighbors( const dccrg::Dccrg &vmesh, const uint lengthOfPencil) { // Get velocity data from vmesh that we need later to calculate the translation @@ -559,9 +563,10 @@ void propagatePencil(Vec* dz, Vec* values, const uint dimension, const uint bloc targetValues[i] = Vec(0.0); } + // Go from 0 to length here to propagate all the cells in the pencil - for (uint i = 0; i < lengthOfPencil; i++){ - + for (uint i = 0; i < lengthOfPencil; i++){ + // The source array is padded by VLASOV_STENCIL_WIDTH on both sides. uint i_source = i + VLASOV_STENCIL_WIDTH; @@ -645,7 +650,7 @@ void getSeedIds(const dccrg::Dccrg& mpiGr const uint dimension, vector &seedIds) { - const bool debug = false; + const bool debug = true; int myRank; if (debug) MPI_Comm_rank(MPI_COMM_WORLD,&myRank); @@ -671,9 +676,14 @@ void getSeedIds(const dccrg::Dccrg& mpiGr if ( abs ( myIndices[dimension] - nbrIndices[dimension] ) > pow(2,mpiGrid.get_maximum_refinement_level()) || !mpiGrid.is_local(nbrPair.first) || - mpiGrid[nbrPair.first]->sysBoundaryFlag == sysboundarytype::DO_NOT_COMPUTE || - ( mpiGrid[nbrPair.first]->sysBoundaryLayer == 2 && - mpiGrid[nbrPair.first]->sysBoundaryFlag != sysboundarytype::NOT_SYSBOUNDARY ) ) { + ( mpiGrid[nbrPair.first]->sysBoundaryFlag != sysboundarytype::NOT_SYSBOUNDARY && + mpiGrid[nbrPair.first]->sysBoundaryLayer == 1 ) ) { + + // !do_translate_cell(mpiGrid[nbrPair.first])) { + + // mpiGrid[nbrPair.first]->sysBoundaryFlag == sysboundarytype::DO_NOT_COMPUTE || + // ( mpiGrid[nbrPair.first]->sysBoundaryLayer == 2 && + // mpiGrid[nbrPair.first]->sysBoundaryFlag != sysboundarytype::NOT_SYSBOUNDARY ) ) { addToSeedIds = true; @@ -874,18 +884,23 @@ void check_ghost_cells(const dccrg::Dccrg } } -bool checkPencils(const std::vector &cells, const setOfPencils& pencils) { +bool checkPencils(const dccrg::Dccrg& mpiGrid, + const std::vector &cells, const setOfPencils& pencils) { bool correct = true; for (auto id : cells) { - int myCount = std::count(pencils.ids.begin(), pencils.ids.end(), id); - - if( myCount == 0 || (myCount != 1 && myCount % 4 != 0)) { + if (mpiGrid[id]->sysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY ) { + + int myCount = std::count(pencils.ids.begin(), pencils.ids.end(), id); + + if( myCount == 0 || (myCount != 1 && myCount % 4 != 0)) { + + std::cerr << "ERROR: Cell ID " << id << " Appears in pencils " << myCount << " times!"<< std::endl; + correct = false; + } - std::cerr << "ERROR: Cell ID " << id << " Appears in pencils " << myCount << " times!"<< std::endl; - correct = false; } } @@ -935,7 +950,7 @@ bool trans_map_1d_amr(const dccrg::Dccrg& const Realv dt, const uint popID) { - const bool printPencils = false; + const bool printPencils = true; const bool printTargets = false; Realv dvz,vz_min; uint cell_indices_to_id[3]; /*< used when computing id of target cell in block*/ @@ -1032,7 +1047,7 @@ bool trans_map_1d_amr(const dccrg::Dccrg& if(printPencils) printPencilsFunc(pencils,dimension,myRank); - if(!checkPencils(localPropagatedCells, pencils)) { + if(!checkPencils(mpiGrid, localPropagatedCells, pencils)) { abort(); } @@ -1108,7 +1123,8 @@ bool trans_map_1d_amr(const dccrg::Dccrg& // Compute spatial neighbors for target cells. // For targets we need the local cells, plus a padding of 1 cell at both ends std::vector targetCells(pencils.sumOfLengths + pencils.N * 2 ); - + //std::vector targetsValid(pencils.sumOfLengths + 2 * pencils.N) = false; + computeSpatialTargetCellsForPencils(mpiGrid, pencils, dimension, targetCells.data()); // Loop over pencils @@ -1125,13 +1141,6 @@ bool trans_map_1d_amr(const dccrg::Dccrg& computeSpatialSourceCellsForPencil(mpiGrid, pencils, pencili, dimension, sourceCells.data()); - // std::cout << "Source cells for pencil " << pencili << ", rank " << myRank << ": "; - // for (auto cell : sourceCells) { - // std::cout << cell->parameters[CellParams::CELLID] << " "; - // } - // std::cout << std::endl; - - // dz is the cell size in the direction of the pencil Vec dz[sourceCells.size()]; uint i = 0; @@ -1251,18 +1260,16 @@ bool trans_map_1d_amr(const dccrg::Dccrg& uint GID = celli + totalTargetLength; SpatialCell* spatial_cell = targetCells[GID]; + + const vmesh::LocalID blockLID = spatial_cell->get_velocity_block_local_id(blockGID, popID); - if(spatial_cell == NULL) { + if(spatial_cell == NULL || + spatial_cell->sysBoundaryFlag != sysboundarytype::NOT_SYSBOUNDARY || + blockLID == vmesh::VelocityMesh::invalidLocalID()) { // Invalid target spatial cell continue; } - const vmesh::LocalID blockLID = spatial_cell->get_velocity_block_local_id(blockGID, popID); - if (blockLID == vmesh::VelocityMesh::invalidLocalID()) { - // Invalid local id. - continue; - } - Realf* blockData = spatial_cell->get_data(blockLID, popID); // areaRatio is the reatio of the cross-section of the spatial cell to the cross-section of the pencil. @@ -1273,16 +1280,7 @@ bool trans_map_1d_amr(const dccrg::Dccrg& blockData[i] += targetBlockData[GID * WID3 + i] * areaRatio; // checksum += targetBlockData[GID * WID3 + i] * areaRatio; } - - - // cout << "Rank " << myRank; - // cout << ", pencil " << pencili; - // cout << ", cell " << spatial_cell->parameters[CellParams::CELLID]; - // cout << ", dimension " << dimension; - // cout << ", areaRatio " << areaRatio; - // cout << ", checksum = " << checksum; - // cout << endl; - + } totalTargetLength += targetLength; diff --git a/vlasovsolver/cpu_trans_map_amr.hpp b/vlasovsolver/cpu_trans_map_amr.hpp index 2b3fdd97a..a85b0d9c1 100644 --- a/vlasovsolver/cpu_trans_map_amr.hpp +++ b/vlasovsolver/cpu_trans_map_amr.hpp @@ -160,18 +160,6 @@ struct setOfPencils { } }; -void compute_spatial_source_cells_for_pencil(const dccrg::Dccrg& mpiGrid, - setOfPencils pencils, - const uint iPencil, - const uint dimension, - SpatialCell **sourceCells); - - -void compute_spatial_target_cells_for_pencils(const dccrg::Dccrg& mpiGrid, - setOfPencils& pencils, - const uint dimension, - SpatialCell **targetCells); - CellID selectNeighbor(const dccrg::Dccrg &grid, CellID id, int dimension, uint path); From b40d9a2002a2d76419d4f4100033f940b5e978f8 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Thu, 24 Jan 2019 12:45:51 +0200 Subject: [PATCH 188/602] Set debug flags to false --- vlasovsolver/cpu_trans_map_amr.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/vlasovsolver/cpu_trans_map_amr.cpp b/vlasovsolver/cpu_trans_map_amr.cpp index 9d7901478..7ae07e289 100644 --- a/vlasovsolver/cpu_trans_map_amr.cpp +++ b/vlasovsolver/cpu_trans_map_amr.cpp @@ -650,7 +650,7 @@ void getSeedIds(const dccrg::Dccrg& mpiGr const uint dimension, vector &seedIds) { - const bool debug = true; + const bool debug = false; int myRank; if (debug) MPI_Comm_rank(MPI_COMM_WORLD,&myRank); @@ -950,7 +950,7 @@ bool trans_map_1d_amr(const dccrg::Dccrg& const Realv dt, const uint popID) { - const bool printPencils = true; + const bool printPencils = false; const bool printTargets = false; Realv dvz,vz_min; uint cell_indices_to_id[3]; /*< used when computing id of target cell in block*/ From befa3ec3bdb0504e37e2d51ebc03d1d6cd03dd51 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Thu, 24 Jan 2019 14:28:14 +0200 Subject: [PATCH 189/602] Added constraints to pencil builder. Do not seed pencils in the boundary layer. --- vlasovsolver/cpu_trans_map_amr.cpp | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/vlasovsolver/cpu_trans_map_amr.cpp b/vlasovsolver/cpu_trans_map_amr.cpp index 7ae07e289..dfa6925be 100644 --- a/vlasovsolver/cpu_trans_map_amr.cpp +++ b/vlasovsolver/cpu_trans_map_amr.cpp @@ -650,7 +650,7 @@ void getSeedIds(const dccrg::Dccrg& mpiGr const uint dimension, vector &seedIds) { - const bool debug = false; + const bool debug = true; int myRank; if (debug) MPI_Comm_rank(MPI_COMM_WORLD,&myRank); @@ -671,8 +671,8 @@ void getSeedIds(const dccrg::Dccrg& mpiGr // the distance in indices between this cell and its neighbor. auto nbrIndices = mpiGrid.mapping.get_indices(nbrPair.first); - // If a neighbor is non-local or across a periodic boundary, then we use this - // cell as a seed for pencils + // If a neighbor is non-local, across a periodic boundary, or in non-periodic boundary layer 1 + // then we use this cell as a seed for pencils if ( abs ( myIndices[dimension] - nbrIndices[dimension] ) > pow(2,mpiGrid.get_maximum_refinement_level()) || !mpiGrid.is_local(nbrPair.first) || @@ -691,7 +691,7 @@ void getSeedIds(const dccrg::Dccrg& mpiGr } } - if (addToSeedIds) { + if ( addToSeedIds && mpiGrid[celli]->sysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY ) { seedIds.push_back(celli); } @@ -950,7 +950,7 @@ bool trans_map_1d_amr(const dccrg::Dccrg& const Realv dt, const uint popID) { - const bool printPencils = false; + const bool printPencils = true; const bool printTargets = false; Realv dvz,vz_min; uint cell_indices_to_id[3]; /*< used when computing id of target cell in block*/ From 376f9bfd24c156fc96b7add31e37cb463df6c797 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Thu, 24 Jan 2019 15:50:09 +0200 Subject: [PATCH 190/602] Pencils back to including layer 1 boundary cells. Target cell pointers set to NULL for boundary cells. Added null pointer checks in places where target cells are handled. --- vlasovsolver/cpu_trans_map_amr.cpp | 83 ++++++++++++++++-------------- 1 file changed, 45 insertions(+), 38 deletions(-) diff --git a/vlasovsolver/cpu_trans_map_amr.cpp b/vlasovsolver/cpu_trans_map_amr.cpp index dfa6925be..1bf270aa8 100644 --- a/vlasovsolver/cpu_trans_map_amr.cpp +++ b/vlasovsolver/cpu_trans_map_amr.cpp @@ -225,7 +225,7 @@ void computeSpatialTargetCellsForPencils(const dccrg::DccrgsysBoundaryFlag != sysboundarytype::NOT_SYSBOUNDARY ) { + targetCells[i] = NULL; + } + } + } CellID selectNeighbor(const dccrg::Dccrg &grid, @@ -483,11 +491,11 @@ setOfPencils buildPencilsWithNeighbors( const dccrg::DccrgsysBoundaryFlag != sysboundarytype::NOT_SYSBOUNDARY ) { - //!do_translate_cell(grid[nextNeighbor])) { - // grid[nextNeighbor]->sysBoundaryFlag == sysboundarytype::DO_NOT_COMPUTE || - // ( grid[nextNeighbor]->sysBoundaryLayer == 2 && - // grid[nextNeighbor]->sysBoundaryFlag != sysboundarytype::NOT_SYSBOUNDARY ) ) { + !do_translate_cell(grid[nextNeighbor])) { + // grid[nextNeighbor]->sysBoundaryFlag != sysboundarytype::NOT_SYSBOUNDARY ) { + // grid[nextNeighbor]->sysBoundaryFlag == sysboundarytype::DO_NOT_COMPUTE || + // ( grid[nextNeighbor]->sysBoundaryLayer == 2 && + // grid[nextNeighbor]->sysBoundaryFlag != sysboundarytype::NOT_SYSBOUNDARY ) ) { nextNeighbor = INVALID_CELLID; } else { @@ -676,22 +684,21 @@ void getSeedIds(const dccrg::Dccrg& mpiGr if ( abs ( myIndices[dimension] - nbrIndices[dimension] ) > pow(2,mpiGrid.get_maximum_refinement_level()) || !mpiGrid.is_local(nbrPair.first) || - ( mpiGrid[nbrPair.first]->sysBoundaryFlag != sysboundarytype::NOT_SYSBOUNDARY && - mpiGrid[nbrPair.first]->sysBoundaryLayer == 1 ) ) { - - // !do_translate_cell(mpiGrid[nbrPair.first])) { - - // mpiGrid[nbrPair.first]->sysBoundaryFlag == sysboundarytype::DO_NOT_COMPUTE || - // ( mpiGrid[nbrPair.first]->sysBoundaryLayer == 2 && - // mpiGrid[nbrPair.first]->sysBoundaryFlag != sysboundarytype::NOT_SYSBOUNDARY ) ) { - + !do_translate_cell(mpiGrid[nbrPair.first]) ) { + + // ( mpiGrid[nbrPair.first]->sysBoundaryFlag != sysboundarytype::NOT_SYSBOUNDARY && + // mpiGrid[nbrPair.first]->sysBoundaryLayer == 1 ) ) { + + // mpiGrid[nbrPair.first]->sysBoundaryFlag == sysboundarytype::DO_NOT_COMPUTE || + // ( mpiGrid[nbrPair.first]->sysBoundaryLayer == 2 && + // mpiGrid[nbrPair.first]->sysBoundaryFlag != sysboundarytype::NOT_SYSBOUNDARY ) ) { + addToSeedIds = true; - } } } - if ( addToSeedIds && mpiGrid[celli]->sysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY ) { + if ( addToSeedIds ) { seedIds.push_back(celli); } @@ -1200,8 +1207,8 @@ bool trans_map_1d_amr(const dccrg::Dccrg& // At this point the data is saved in targetVecData so we can reset the spatial cells for (auto *spatial_cell: targetCells) { - // Check for system boundary - if(spatial_cell->sysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY) { + // Check for null and system boundary + if (spatial_cell && spatial_cell->sysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY) { // Get local velocity block id const vmesh::LocalID blockLID = spatial_cell->get_velocity_block_local_id(blockGID, popID); // Check for invalid id @@ -1259,28 +1266,28 @@ bool trans_map_1d_amr(const dccrg::Dccrg& for ( uint celli = 0; celli < targetLength; celli++ ) { uint GID = celli + totalTargetLength; - SpatialCell* spatial_cell = targetCells[GID]; + SpatialCell* targetCell = targetCells[GID]; - const vmesh::LocalID blockLID = spatial_cell->get_velocity_block_local_id(blockGID, popID); - - if(spatial_cell == NULL || - spatial_cell->sysBoundaryFlag != sysboundarytype::NOT_SYSBOUNDARY || - blockLID == vmesh::VelocityMesh::invalidLocalID()) { - // Invalid target spatial cell - continue; - } + if(targetCell) { - Realf* blockData = spatial_cell->get_data(blockLID, popID); + const vmesh::LocalID blockLID = targetCell->get_velocity_block_local_id(blockGID, popID); - // areaRatio is the reatio of the cross-section of the spatial cell to the cross-section of the pencil. - Realf areaRatio = pow(pow(2,spatial_cell->SpatialCell::parameters[CellParams::REFINEMENT_LEVEL] - pencils.path[pencili].size()),2);; - - // Realf checksum = 0.0; - for(int i = 0; i < WID3 ; i++) { - blockData[i] += targetBlockData[GID * WID3 + i] * areaRatio; - // checksum += targetBlockData[GID * WID3 + i] * areaRatio; + if( blockLID == vmesh::VelocityMesh::invalidLocalID() ) { + // Invalid target spatial cell + continue; + } + + Realf* blockData = targetCell->get_data(blockLID, popID); + + // areaRatio is the reatio of the cross-section of the spatial cell to the cross-section of the pencil. + Realf areaRatio = pow(pow(2,targetCell->SpatialCell::parameters[CellParams::REFINEMENT_LEVEL] - pencils.path[pencili].size()),2);; + + // Realf checksum = 0.0; + for(int i = 0; i < WID3 ; i++) { + blockData[i] += targetBlockData[GID * WID3 + i] * areaRatio; + // checksum += targetBlockData[GID * WID3 + i] * areaRatio; + } } - } totalTargetLength += targetLength; From f5b2d1473a832deb445fd78e621c358417bec32f Mon Sep 17 00:00:00 2001 From: tkoskela Date: Mon, 28 Jan 2019 15:52:20 +0200 Subject: [PATCH 191/602] Debugging remote neighbor update with neighbor number of blocks initialized to 0 to avoid unnecessary communications. --- vlasovsolver/cpu_trans_map_amr.cpp | 254 ++++++++++++++--------------- 1 file changed, 123 insertions(+), 131 deletions(-) diff --git a/vlasovsolver/cpu_trans_map_amr.cpp b/vlasovsolver/cpu_trans_map_amr.cpp index 1bf270aa8..d733ec194 100644 --- a/vlasovsolver/cpu_trans_map_amr.cpp +++ b/vlasovsolver/cpu_trans_map_amr.cpp @@ -1333,167 +1333,137 @@ void update_remote_mapping_contribution( SpatialCell *ccell = mpiGrid[c]; - // Initialize number of blocks to 0 and block data to a default value + // Initialize number of blocks to 0 and block data to a default value. // We need the default for 1 to 1 communications if(ccell) { for (uint i = 0; i < MAX_FACE_NEIGHBORS_PER_DIM; ++i) { - //ccell->neighbor_block_data[i] = ccell->get_data(popID); - //ccell->neighbor_number_of_blocks[i] = 0; - - ccell->neighbor_number_of_blocks[i] = ccell->get_number_of_velocity_blocks(popID); - ccell->neighbor_block_data[i] = - (Realf*) aligned_malloc(ccell->neighbor_number_of_blocks[i] * WID3 * sizeof(Realf), 64); - for (uint j = 0; j < ccell->neighbor_number_of_blocks[i] * WID3; ++j) { - ccell->neighbor_block_data[i][j] = 0.0; - } + ccell->neighbor_block_data[i] = ccell->get_data(popID); + ccell->neighbor_number_of_blocks[i] = 0; } - } + } } + for (auto c : local_cells) { + + SpatialCell *ccell = mpiGrid[c]; + + if(ccell) { + + // Initialize number of blocks to 0 and block data to a default value. + for (uint i = 0; i < MAX_FACE_NEIGHBORS_PER_DIM; ++i) { + ccell->neighbor_block_data[i] = ccell->get_data(popID); + ccell->neighbor_number_of_blocks[i] = 0; + } + } + } + set allNeighbors; + int neighborhood = getNeighborhood(dimension,1); for (auto c : local_cells) { SpatialCell *ccell = mpiGrid[c]; - - int neighborhood = getNeighborhood(dimension,1); - auto* nbrPairVector = mpiGrid.get_neighbors_of(c, neighborhood); - // Initialize to empty vectors, add default values at the end. + if (!ccell) { + continue; + } + + auto* nbrPairVector = mpiGrid.get_neighbors_of(c, neighborhood); // neighbors in the positive direction vector p_nbrs; // neighbors on the negative direction vector n_nbrs; - - auto myRefLvl = mpiGrid.get_refinement_level(c); - bool sameRefinementAsNeighbors = true; // Collect neighbors on the positive and negative sides into separate lists for (auto nbrPair : *nbrPairVector) { if (nbrPair.second.at(dimension) == direction) { - p_nbrs.push_back(nbrPair.first); - - if(mpiGrid.get_refinement_level(nbrPair.first) != myRefLvl) { - sameRefinementAsNeighbors = false; - } - + p_nbrs.push_back(nbrPair.first); } - + if (nbrPair.second.at(dimension) == -direction) { n_nbrs.push_back(nbrPair.first); - - // if(mpiGrid.get_refinement_level(nbrPair.first) != myRefLvl) { - // sameRefinementAsNeighbors = false; - // } - } - } - - // int maxNeighborSize = max(p_nbrs.size(),n_nbrs.size()); - - for (uint i = 0; i < MAX_FACE_NEIGHBORS_PER_DIM; ++i) { - // if(sameRefinementAsNeighbors) { - // //if(maxNeighborSize == 1) { - // // Initialize number of blocks to 0 and block data to a default value - // ccell->neighbor_block_data[i] = ccell->get_data(popID); - // ccell->neighbor_number_of_blocks[i] = 0; - // } else { - // Initialize number of blocks to the number of blocks in this cell (neighbors should have same number) - // and the block data to 0. We need to do this to make multi-process communications work. - ccell->neighbor_number_of_blocks[i] = ccell->get_number_of_velocity_blocks(popID); - ccell->neighbor_block_data[i] = - (Realf*) aligned_malloc(ccell->neighbor_number_of_blocks[i] * WID3 * sizeof(Realf), 64); - for (uint j = 0; j < ccell->neighbor_number_of_blocks[i] * WID3; ++j) { - ccell->neighbor_block_data[i][j] = 0.0; - } - // } - } + } if (all_of(nbrPairVector->begin(), nbrPairVector->end(), [mpiGrid](pair > p){return mpiGrid.is_local(p.first);})) { // Only local neighbors, move on. continue; - } + } + + uint nSiblings = 1; + uint sendIndex = 0; + uint recvIndex = 0; + auto myIndices = mpiGrid.mapping.get_indices(c); + auto myParent = mpiGrid.get_parent(c); // Find out which cell in the list of siblings this cell is. That will determine which - // neighbor_block_data gets stored as the transferBuffer. - auto myIndices = mpiGrid.mapping.get_indices(c); - auto allSiblings = mpiGrid.get_all_children(mpiGrid.get_parent(c)); - vector siblings; + // neighbor_block_data element gets allocated and read after the communication. + if( c != myParent) { + auto allSiblings = mpiGrid.get_all_children(myParent); + vector siblings; - for (auto sibling : allSiblings) { - auto indices = mpiGrid.mapping.get_indices(sibling); - if(indices[dimension] == myIndices[dimension]) { - siblings.push_back(sibling); + for (auto sibling : allSiblings) { + auto indices = mpiGrid.mapping.get_indices(sibling); + if(indices[dimension] == myIndices[dimension]) { + siblings.push_back(sibling); + } } - } - auto myLocation = std::find(siblings.begin(),siblings.end(),c); + auto myLocation = std::find(siblings.begin(),siblings.end(),c); - uint nSiblings = 1; - uint sendIndex = 0; - uint recvIndex = 0; - uint bufferSize = 1; - if(myLocation != siblings.end()) { - nSiblings = 4; - sendIndex = std::distance(siblings.begin(), myLocation); - recvIndex = std::distance(siblings.begin(), myLocation); - bufferSize = 4; + if(myLocation != siblings.end()) { + nSiblings = siblings.size(); + sendIndex = std::distance(siblings.begin(), myLocation); + recvIndex = std::distance(siblings.begin(), myLocation); + } } // ccell adds a neighbor_block_data block for each neighbor in the positive direction to its local data for (uint i_nbr = 0; i_nbr < p_nbrs.size(); ++i_nbr) { - + CellID nbr = p_nbrs[i_nbr]; + //Send data in nbr target array that we just mapped to, if + // 1) it is a valid target, + // 2) the source cell in center was translated, + if(nbr != INVALID_CELLID && do_translate_cell(ccell)) { - if (nbr != INVALID_CELLID && !mpiGrid.is_local(nbr) && do_translate_cell(ccell) - && allNeighbors.find(nbr) == allNeighbors.end()) { - SpatialCell *pcell = mpiGrid[nbr]; - - if(pcell->sysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY) { - //Send data in nbr target array that we just mapped to if 1) it is a valid target, - //2) is remote cell, 3) the source cell in center was translated, 4) it is not a boundary cell? - //5) We have not already sent data from this rank to this cell. - allNeighbors.insert(nbr); - + // 3) it is not a boundary cell, + if(pcell && pcell->sysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY) { + if(nSiblings == 1 && p_nbrs.size() == 4) { sendIndex = i_nbr; } - - ccell->neighbor_block_data.at(sendIndex) = pcell->get_data(popID); + ccell->neighbor_number_of_blocks.at(sendIndex) = pcell->get_number_of_velocity_blocks(popID); - send_cells.push_back(nbr); - - // Realf checksum1 = 0.0; - // Realf checksum2 = 0.0; - // Realf checksum3 = 0.0; - // Realf checksum4 = 0.0; - // for(unsigned int vCell = 0; vCell < VELOCITY_BLOCK_LENGTH * ccell->get_number_of_velocity_blocks(popID); ++vCell) { - // checksum1 += ccell->neighbor_block_data[0][vCell]; - // checksum2 += ccell->neighbor_block_data[1][vCell]; - // checksum3 += ccell->neighbor_block_data[2][vCell]; - // checksum4 += ccell->neighbor_block_data[3][vCell]; - // } - - // cout << "Rank " << myRank; - // cout << ", dimension " << dimension; - // cout << ", direction " << direction; - // cout << ": Cell " << c; - // cout << " sending to " << nbr << " index is " << sendIndex << " sums are "; - // cout << checksum1 << ", "; - // cout << checksum2 << ", "; - // cout << checksum3 << ", "; - // cout << checksum4 << ", "; - // cout << endl; + + if(!mpiGrid.is_local(nbr) && allNeighbors.find(nbr) == allNeighbors.end()) { + // 4a) Cell is remote, + // 5a) We have not already sent data from this rank to this cell. + + allNeighbors.insert(nbr); + + ccell->neighbor_block_data.at(sendIndex) = pcell->get_data(popID); + send_cells.push_back(nbr); + } else { + // 4b) Set neighbor_number_of_blocks for local cells with remote siblings + // 5b) Set neighbor_number_of_blocks for cells whose local siblings have already sent data + + ccell->neighbor_block_data.at(sendIndex) = + (Realf*) aligned_malloc(ccell->neighbor_number_of_blocks.at(sendIndex) * WID3 * sizeof(Realf), 64); + for (uint j = 0; j < ccell->neighbor_number_of_blocks.at(sendIndex) * WID3; ++j) { + ccell->neighbor_block_data[sendIndex][j] = 0.0; + } + } } } } - + for (uint i_nbr = 0; i_nbr < n_nbrs.size(); ++i_nbr) { CellID nbr = n_nbrs[i_nbr]; @@ -1506,35 +1476,54 @@ void update_remote_mapping_contribution( SpatialCell *ncell = mpiGrid[nbr]; - // There are three possibilities for how we receive data + // Check for null pointer + if(!ncell) { + continue; + } + + // There are four possibilities for how we receive data // 1) sibling of 1 receiving from sibling of 1 - // Receiving cell reads from 0th element of receiveBuffer + // Receiving cell reads from 0th element of neighbor_block_data // 2) sibling of 4 receiving from sibling of 1 - // Receiving cell reads the element from receiveBuffer equal to their sibling index + // Receiving cell reads from recveIndex'th element of neighbor_block_data // 3) sibling of 1 receiving from sibling of 4 - // Receiving cell reads all elements from receiveBuffer that have data from remote neighbors - - if(nSiblings == 1 && n_nbrs.size() == 4) { - recvIndex = i_nbr; - bufferSize = 4; - } + // Receiving cell reads all elements from neighbor_block_data that have data from remote neighbors + // 4) sibling of 4 receiving from sibling of 4 + // Receiving cell reads from recvIndex'th element of neighbor_block_data - // We have to allocate memory for each sibling to receive all the data sent by ncell. - // There should be either 1 or 4 siblings, if there is only 1 sibling, the other receive - // blocks will remain at number_of_blocks = 0 as initialized. - for (uint i_buf = 0; i_buf < bufferSize; ++i_buf) { - ncell->neighbor_number_of_blocks.at(i_buf) = ccell->get_number_of_velocity_blocks(popID); - ncell->neighbor_block_data.at(i_buf) = - (Realf*) aligned_malloc(ncell->neighbor_number_of_blocks.at(i_buf) * WID3 * sizeof(Realf), 64); - - // for (uint j = 0; j < ccell->neighbor_number_of_blocks[i_buf] * WID3; ++j) { - // ncell->neighbor_block_data[i_buf][j] = 0.0; - // } - } + // This covers options 1 & 4 + if(mpiGrid.get_refinement_level(nbr) == mpiGrid.get_refinement_level(c)) { + + // Allocate memory for one sibling. Each cell will send/receive with the previously calculated recvIndex. + ncell->neighbor_number_of_blocks.at(recvIndex) = ccell->get_number_of_velocity_blocks(popID); + ncell->neighbor_block_data.at(recvIndex) = + (Realf*) aligned_malloc(ncell->neighbor_number_of_blocks.at(recvIndex) * WID3 * sizeof(Realf), 64); + + } else if(nSiblings == 4 && n_nbrs.size() == 1) { + + // Allocate memory for each sibling to receive all the data sent by ncell. Use the + // recvIndex that was previously calculated + + for (uint i_buf = 0; i_buf < MAX_FACE_NEIGHBORS_PER_DIM; ++i_buf) { + ncell->neighbor_number_of_blocks.at(i_buf) = ccell->get_number_of_velocity_blocks(popID); + ncell->neighbor_block_data.at(i_buf) = + (Realf*) aligned_malloc(ncell->neighbor_number_of_blocks.at(i_buf) * WID3 * sizeof(Realf), 64); + } + + } else if(nSiblings == 1 && n_nbrs.size() == 4) { + + // Each remote neighbor will allocate memory for the data it's about to receive at recvIndex = i_nbr. + + recvIndex = i_nbr; + ncell->neighbor_number_of_blocks.at(recvIndex) = ccell->get_number_of_velocity_blocks(popID); + ncell->neighbor_block_data.at(recvIndex) = + (Realf*) aligned_malloc(ncell->neighbor_number_of_blocks.at(recvIndex) * WID3 * sizeof(Realf), 64); + } receive_cells.push_back(c); receive_origin_cells.push_back(nbr); receive_origin_index.push_back(recvIndex); + } } } @@ -1565,11 +1554,14 @@ void update_remote_mapping_contribution( for (size_t c = 0; c < receive_cells.size(); ++c) { SpatialCell* receive_cell = mpiGrid[receive_cells[c]]; SpatialCell* origin_cell = mpiGrid[receive_origin_cells[c]]; + + if(!receive_cell || !origin_cell) { + continue; + } Realf *blockData = receive_cell->get_data(popID); Realf *neighborData = origin_cell->neighbor_block_data[receive_origin_index[c]]; - - int numReceiveCells = count(receive_cells.begin(), receive_cells.end(), receive_cells[c]); + // Realf checksum = 0.0; // Realf checksum1 = 0.0; // Realf checksum2 = 0.0; From 3972d00b00c56e14e9694bcc033f38f9a22ea316 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Tue, 29 Jan 2019 13:51:41 +0200 Subject: [PATCH 192/602] Various fixes to how sends and receives are initialized and set up. Now transtest passes, flowthrough runs 16 steps but eventually fails. Debugging continues. --- vlasovsolver/cpu_trans_map_amr.cpp | 200 ++++++++++++++++------------- 1 file changed, 111 insertions(+), 89 deletions(-) diff --git a/vlasovsolver/cpu_trans_map_amr.cpp b/vlasovsolver/cpu_trans_map_amr.cpp index d733ec194..fce78bcc8 100644 --- a/vlasovsolver/cpu_trans_map_amr.cpp +++ b/vlasovsolver/cpu_trans_map_amr.cpp @@ -1323,12 +1323,16 @@ void update_remote_mapping_contribution( vector send_cells; vector receive_origin_cells; - vector receive_origin_index; - + vector receive_origin_index; + //normalize if(direction > 0) direction = 1; if(direction < 0) direction = -1; + MPI_Barrier(MPI_COMM_WORLD); + cout << "Updating remote neighbors, direction = " << direction << endl; + MPI_Barrier(MPI_COMM_WORLD); + for (auto c : remote_cells) { SpatialCell *ccell = mpiGrid[c]; @@ -1387,11 +1391,11 @@ void update_remote_mapping_contribution( } } - if (all_of(nbrPairVector->begin(), nbrPairVector->end(), - [mpiGrid](pair > p){return mpiGrid.is_local(p.first);})) { - // Only local neighbors, move on. - continue; - } + // if (all_of(nbrPairVector->begin(), nbrPairVector->end(), + // [mpiGrid](pair > p){return mpiGrid.is_local(p.first);})) { + // // Only local neighbors, move on. + // continue; + // } uint nSiblings = 1; uint sendIndex = 0; @@ -1421,109 +1425,126 @@ void update_remote_mapping_contribution( recvIndex = std::distance(siblings.begin(), myLocation); } } - - // ccell adds a neighbor_block_data block for each neighbor in the positive direction to its local data - for (uint i_nbr = 0; i_nbr < p_nbrs.size(); ++i_nbr) { - - CellID nbr = p_nbrs[i_nbr]; - //Send data in nbr target array that we just mapped to, if - // 1) it is a valid target, - // 2) the source cell in center was translated, - if(nbr != INVALID_CELLID && do_translate_cell(ccell)) { - - SpatialCell *pcell = mpiGrid[nbr]; - // 3) it is not a boundary cell, - if(pcell && pcell->sysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY) { + if (!all_of(p_nbrs.begin(), p_nbrs.end(),[mpiGrid](CellID i){return mpiGrid.is_local(i);})) { - if(nSiblings == 1 && p_nbrs.size() == 4) { - sendIndex = i_nbr; - } + // ccell adds a neighbor_block_data block for each neighbor in the positive direction to its local data + for (uint i_nbr = 0; i_nbr < p_nbrs.size(); ++i_nbr) { + + bool initBlocksForEmptySiblings = false; + CellID nbr = p_nbrs[i_nbr]; - ccell->neighbor_number_of_blocks.at(sendIndex) = pcell->get_number_of_velocity_blocks(popID); + if(nSiblings == 1 && p_nbrs.size() == 4) { + sendIndex = i_nbr; + } - if(!mpiGrid.is_local(nbr) && allNeighbors.find(nbr) == allNeighbors.end()) { - // 4a) Cell is remote, - // 5a) We have not already sent data from this rank to this cell. - - allNeighbors.insert(nbr); + //Send data in nbr target array that we just mapped to, if + // 1) it is a valid target, + // 2) the source cell in center was translated, + if(nbr != INVALID_CELLID && do_translate_cell(ccell)) { + + // 3) Cell is remote. + if (!mpiGrid.is_local(nbr)) { + + SpatialCell *pcell = mpiGrid[nbr]; + + // 4) it is not a boundary cell, + if(pcell && pcell->sysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY) { - ccell->neighbor_block_data.at(sendIndex) = pcell->get_data(popID); - send_cells.push_back(nbr); + if(allNeighbors.find(nbr) == allNeighbors.end()) { + // 5a) We have not already sent data from this rank to this cell. + + allNeighbors.insert(nbr); + + ccell->neighbor_number_of_blocks.at(sendIndex) = pcell->get_number_of_velocity_blocks(popID); + ccell->neighbor_block_data.at(sendIndex) = pcell->get_data(popID); + send_cells.push_back(nbr); + } else { + initBlocksForEmptySiblings = true; + } + } } else { - // 4b) Set neighbor_number_of_blocks for local cells with remote siblings - // 5b) Set neighbor_number_of_blocks for cells whose local siblings have already sent data - - ccell->neighbor_block_data.at(sendIndex) = - (Realf*) aligned_malloc(ccell->neighbor_number_of_blocks.at(sendIndex) * WID3 * sizeof(Realf), 64); - for (uint j = 0; j < ccell->neighbor_number_of_blocks.at(sendIndex) * WID3; ++j) { - ccell->neighbor_block_data[sendIndex][j] = 0.0; - } - } + // If some but not all neighbors are local, we need to initialize their number of blocks + // and block data + // for communication because we don't check at receive how many siblings are remote. + initBlocksForEmptySiblings = true; + } + } + + if(initBlocksForEmptySiblings) { + + ccell->neighbor_number_of_blocks.at(sendIndex) = mpiGrid[nbr]->get_number_of_velocity_blocks(popID); + ccell->neighbor_block_data.at(sendIndex) = + (Realf*) aligned_malloc(ccell->neighbor_number_of_blocks.at(sendIndex) * WID3 * sizeof(Realf), 64); + for (uint j = 0; j < ccell->neighbor_number_of_blocks.at(sendIndex) * WID3; ++j) { + ccell->neighbor_block_data[sendIndex][j] = 0.0; + } } } } + + if (!all_of(n_nbrs.begin(), n_nbrs.end(),[mpiGrid](CellID i){return mpiGrid.is_local(i);})) { - for (uint i_nbr = 0; i_nbr < n_nbrs.size(); ++i_nbr) { + for (uint i_nbr = 0; i_nbr < n_nbrs.size(); ++i_nbr) { - CellID nbr = n_nbrs[i_nbr]; + CellID nbr = n_nbrs[i_nbr]; - if (nbr != INVALID_CELLID && !mpiGrid.is_local(nbr) && - ccell->sysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY) { - //Receive data that ncell mapped to this local cell data array, - //if 1) ncell is a valid source cell, 2) center cell is to be updated (normal cell) 3) ncell is remote - //we will here allocate a receive buffer, since we need to aggregate values + if (nbr != INVALID_CELLID && !mpiGrid.is_local(nbr) && + ccell->sysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY) { + //Receive data that ncell mapped to this local cell data array, + //if 1) ncell is a valid source cell, 2) center cell is to be updated (normal cell) 3) ncell is remote + //we will here allocate a receive buffer, since we need to aggregate values - SpatialCell *ncell = mpiGrid[nbr]; + SpatialCell *ncell = mpiGrid[nbr]; - // Check for null pointer - if(!ncell) { - continue; - } + // Check for null pointer + if(!ncell) { + continue; + } - // There are four possibilities for how we receive data - // 1) sibling of 1 receiving from sibling of 1 - // Receiving cell reads from 0th element of neighbor_block_data - // 2) sibling of 4 receiving from sibling of 1 - // Receiving cell reads from recveIndex'th element of neighbor_block_data - // 3) sibling of 1 receiving from sibling of 4 - // Receiving cell reads all elements from neighbor_block_data that have data from remote neighbors - // 4) sibling of 4 receiving from sibling of 4 - // Receiving cell reads from recvIndex'th element of neighbor_block_data - - // This covers options 1 & 4 - if(mpiGrid.get_refinement_level(nbr) == mpiGrid.get_refinement_level(c)) { + // There are four possibilities for how we receive data + // 1) sibling of 1 receiving from sibling of 1 + // Receiving cell reads from 0th element of neighbor_block_data + // 2) sibling of 4 receiving from sibling of 1 + // Receiving cell reads from recveIndex'th element of neighbor_block_data + // 3) sibling of 1 receiving from sibling of 4 + // Receiving cell reads all elements from neighbor_block_data that have data from remote neighbors + // 4) sibling of 4 receiving from sibling of 4 + // Receiving cell reads from recvIndex'th element of neighbor_block_data + + // This covers options 1 & 4 + if(mpiGrid.get_refinement_level(nbr) == mpiGrid.get_refinement_level(c)) { - // Allocate memory for one sibling. Each cell will send/receive with the previously calculated recvIndex. - ncell->neighbor_number_of_blocks.at(recvIndex) = ccell->get_number_of_velocity_blocks(popID); - ncell->neighbor_block_data.at(recvIndex) = - (Realf*) aligned_malloc(ncell->neighbor_number_of_blocks.at(recvIndex) * WID3 * sizeof(Realf), 64); + // Allocate memory for one sibling. Each cell will send/receive with the previously calculated recvIndex. + ncell->neighbor_number_of_blocks.at(recvIndex) = ccell->get_number_of_velocity_blocks(popID); + ncell->neighbor_block_data.at(recvIndex) = + (Realf*) aligned_malloc(ncell->neighbor_number_of_blocks.at(recvIndex) * WID3 * sizeof(Realf), 64); - } else if(nSiblings == 4 && n_nbrs.size() == 1) { + } else if(nSiblings == 4 && n_nbrs.size() == 1) { - // Allocate memory for each sibling to receive all the data sent by ncell. Use the - // recvIndex that was previously calculated + // Allocate memory for each sibling to receive all the data sent by ncell. - for (uint i_buf = 0; i_buf < MAX_FACE_NEIGHBORS_PER_DIM; ++i_buf) { - ncell->neighbor_number_of_blocks.at(i_buf) = ccell->get_number_of_velocity_blocks(popID); - ncell->neighbor_block_data.at(i_buf) = - (Realf*) aligned_malloc(ncell->neighbor_number_of_blocks.at(i_buf) * WID3 * sizeof(Realf), 64); - } + for (uint i_buf = 0; i_buf < MAX_FACE_NEIGHBORS_PER_DIM; ++i_buf) { + ncell->neighbor_number_of_blocks.at(i_buf) = ccell->get_number_of_velocity_blocks(popID); + ncell->neighbor_block_data.at(i_buf) = + (Realf*) aligned_malloc(ncell->neighbor_number_of_blocks.at(i_buf) * WID3 * sizeof(Realf), 64); + } - } else if(nSiblings == 1 && n_nbrs.size() == 4) { + } else if(nSiblings == 1 && n_nbrs.size() == 4) { - // Each remote neighbor will allocate memory for the data it's about to receive at recvIndex = i_nbr. + // Each remote neighbor will allocate memory for the data it's about to receive at recvIndex = i_nbr. - recvIndex = i_nbr; - ncell->neighbor_number_of_blocks.at(recvIndex) = ccell->get_number_of_velocity_blocks(popID); - ncell->neighbor_block_data.at(recvIndex) = - (Realf*) aligned_malloc(ncell->neighbor_number_of_blocks.at(recvIndex) * WID3 * sizeof(Realf), 64); - } + recvIndex = i_nbr; + ncell->neighbor_number_of_blocks.at(recvIndex) = ccell->get_number_of_velocity_blocks(popID); + ncell->neighbor_block_data.at(recvIndex) = + (Realf*) aligned_malloc(ncell->neighbor_number_of_blocks.at(recvIndex) * WID3 * sizeof(Realf), 64); + } - receive_cells.push_back(c); - receive_origin_cells.push_back(nbr); - receive_origin_index.push_back(recvIndex); + receive_cells.push_back(c); + receive_origin_cells.push_back(nbr); + receive_origin_index.push_back(recvIndex); + } } } } @@ -1546,6 +1567,9 @@ void update_remote_mapping_contribution( break; } + int myRank; + MPI_Comm_rank(MPI_COMM_WORLD,&myRank); + //#pragma omp parallel //{ std::vector receive_cells_sums; @@ -1562,7 +1586,6 @@ void update_remote_mapping_contribution( Realf *blockData = receive_cell->get_data(popID); Realf *neighborData = origin_cell->neighbor_block_data[receive_origin_index[c]]; - // Realf checksum = 0.0; // Realf checksum1 = 0.0; // Realf checksum2 = 0.0; // Realf checksum3 = 0.0; @@ -1570,14 +1593,13 @@ void update_remote_mapping_contribution( //#pragma omp for for(unsigned int vCell = 0; vCell < VELOCITY_BLOCK_LENGTH * receive_cell->get_number_of_velocity_blocks(popID); ++vCell) { - // checksum += neighborData[vCell]; // checksum1 += origin_cell->neighbor_block_data[0][vCell]; // checksum2 += origin_cell->neighbor_block_data[1][vCell]; // checksum3 += origin_cell->neighbor_block_data[2][vCell]; // checksum4 += origin_cell->neighbor_block_data[3][vCell]; blockData[vCell] += neighborData[vCell]; } - // receive_cells_sums.push_back(checksum); + // // receive_cells_sums.push_back(checksum); // cout << "Rank " << myRank << ": cell " << receive_cells[c] << " receiving from " << receive_origin_cells[c] << ". Checksums: "; // cout << checksum1 << ", "; // cout << checksum2 << ", "; From 21b7da254b1344a1082540b503d0dba99a37e4cf Mon Sep 17 00:00:00 2001 From: tkoskela Date: Tue, 29 Jan 2019 15:13:21 +0200 Subject: [PATCH 193/602] Reading the numbers of blocks of siblings when receiving to set up the buffer to correct size. No way around this. Could in the same way check if they are local or remote. --- vlasovsolver/cpu_trans_map_amr.cpp | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/vlasovsolver/cpu_trans_map_amr.cpp b/vlasovsolver/cpu_trans_map_amr.cpp index fce78bcc8..2d9b8a0f9 100644 --- a/vlasovsolver/cpu_trans_map_amr.cpp +++ b/vlasovsolver/cpu_trans_map_amr.cpp @@ -658,7 +658,7 @@ void getSeedIds(const dccrg::Dccrg& mpiGr const uint dimension, vector &seedIds) { - const bool debug = true; + const bool debug = false; int myRank; if (debug) MPI_Comm_rank(MPI_COMM_WORLD,&myRank); @@ -957,7 +957,7 @@ bool trans_map_1d_amr(const dccrg::Dccrg& const Realv dt, const uint popID) { - const bool printPencils = true; + const bool printPencils = false; const bool printTargets = false; Realv dvz,vz_min; uint cell_indices_to_id[3]; /*< used when computing id of target cell in block*/ @@ -1400,15 +1400,16 @@ void update_remote_mapping_contribution( uint nSiblings = 1; uint sendIndex = 0; uint recvIndex = 0; + vector siblings; auto myIndices = mpiGrid.mapping.get_indices(c); auto myParent = mpiGrid.get_parent(c); // Find out which cell in the list of siblings this cell is. That will determine which // neighbor_block_data element gets allocated and read after the communication. + if( c != myParent) { auto allSiblings = mpiGrid.get_all_children(myParent); - vector siblings; for (auto sibling : allSiblings) { auto indices = mpiGrid.mapping.get_indices(sibling); @@ -1524,10 +1525,13 @@ void update_remote_mapping_contribution( // Allocate memory for each sibling to receive all the data sent by ncell. - for (uint i_buf = 0; i_buf < MAX_FACE_NEIGHBORS_PER_DIM; ++i_buf) { - ncell->neighbor_number_of_blocks.at(i_buf) = ccell->get_number_of_velocity_blocks(popID); - ncell->neighbor_block_data.at(i_buf) = - (Realf*) aligned_malloc(ncell->neighbor_number_of_blocks.at(i_buf) * WID3 * sizeof(Realf), 64); + for (uint i_sib = 0; i_sib < MAX_FACE_NEIGHBORS_PER_DIM; ++i_sib) { + + auto* scell = mpiGrid[siblings.at(i_sib)]; + + ncell->neighbor_number_of_blocks.at(i_sib) = scell->get_number_of_velocity_blocks(popID); + ncell->neighbor_block_data.at(i_sib) = + (Realf*) aligned_malloc(ncell->neighbor_number_of_blocks.at(i_sib) * WID3 * sizeof(Realf), 64); } } else if(nSiblings == 1 && n_nbrs.size() == 4) { From f0a8c47e5016dcabb4425dcf627372fbc54e5b02 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Tue, 29 Jan 2019 15:43:34 +0200 Subject: [PATCH 194/602] Initializing neighbor blocks to 0. --- spatial_cell.cpp | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/spatial_cell.cpp b/spatial_cell.cpp index 134f4c7a8..a789456a5 100644 --- a/spatial_cell.cpp +++ b/spatial_cell.cpp @@ -57,6 +57,12 @@ namespace spatial_cell { for (unsigned int i = 0; i < bvolderivatives::N_BVOL_DERIVATIVES; i++) { this->derivativesBVOL[i]=0; } + + for (unsigned int i = 0; i < MAX_FACE_NEIGHBORS_PER_DIM; ++i) { + this->neighbor_number_of_blocks[i] = 0; + this->neighbor_block_data[i] = NULL; + } + //is transferred by default this->mpiTransferEnabled=true; @@ -825,7 +831,11 @@ namespace spatial_cell { int myRank; MPI_Type_size(datatype,&mpiSize); MPI_Comm_rank(MPI_COMM_WORLD,&myRank); - cout << myRank << " get_mpi_datatype: " << cellID << " " << sender_rank << " " << receiver_rank << " " << mpiSize << endl; + cout << myRank << " get_mpi_datatype: " << cellID << " " << sender_rank << " " << receiver_rank << " " << mpiSize << ", Nblocks = " << populations[activePopID].N_blocks << ", nbr Nblocks ="; + for (uint i = 0; i < 4; ++i) { + cout << " " << this->neighbor_number_of_blocks[i]; + } + cout << endl; } return std::make_tuple(address,count,datatype); From 8923fe6e88b7be598afa74afe012cc853f83d1a4 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Tue, 29 Jan 2019 15:44:09 +0200 Subject: [PATCH 195/602] Comments detailing future work. --- fieldsolver/gridGlue.cpp | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/fieldsolver/gridGlue.cpp b/fieldsolver/gridGlue.cpp index cba3b1242..6d9e08561 100644 --- a/fieldsolver/gridGlue.cpp +++ b/fieldsolver/gridGlue.cpp @@ -367,6 +367,7 @@ void setupTechnicalFsGrid(dccrg::Dccrg& m fsgrids::technical* thisCellData = &transferBuffer[i]; // Data needs to be collected from some different places for this grid. thisCellData->sysBoundaryFlag = mpiGrid[cells[i]]->sysBoundaryFlag; + // Remove boundary layer copy here thisCellData->sysBoundaryLayer = mpiGrid[cells[i]]->sysBoundaryLayer; thisCellData->maxFsDt = std::numeric_limits::max(); } @@ -384,8 +385,9 @@ void setupTechnicalFsGrid(dccrg::Dccrg& m } } - technicalGrid.finishTransfersIn(); + + // Add layer calculation here. Include diagonals +-1. } void getFsGridMaxDt(FsGrid< fsgrids::technical, 2>& technicalGrid, From 1dc8f23c273d965ed2826850987bdf8b9745cefc Mon Sep 17 00:00:00 2001 From: tkoskela Date: Tue, 29 Jan 2019 15:44:36 +0200 Subject: [PATCH 196/602] Cleanup from debugging, enable openmp pragmas in remote update. --- vlasovsolver/cpu_trans_map_amr.cpp | 39 ++++++------------------------ 1 file changed, 7 insertions(+), 32 deletions(-) diff --git a/vlasovsolver/cpu_trans_map_amr.cpp b/vlasovsolver/cpu_trans_map_amr.cpp index 2d9b8a0f9..d7dd7a474 100644 --- a/vlasovsolver/cpu_trans_map_amr.cpp +++ b/vlasovsolver/cpu_trans_map_amr.cpp @@ -664,8 +664,7 @@ void getSeedIds(const dccrg::Dccrg& mpiGr int neighborhood = getNeighborhood(dimension,1); - //#pragma omp parallel for - for(auto celli: localPropagatedCells) { + for(auto celli : localPropagatedCells) { auto myIndices = mpiGrid.mapping.get_indices(celli); @@ -1328,10 +1327,6 @@ void update_remote_mapping_contribution( //normalize if(direction > 0) direction = 1; if(direction < 0) direction = -1; - - MPI_Barrier(MPI_COMM_WORLD); - cout << "Updating remote neighbors, direction = " << direction << endl; - MPI_Barrier(MPI_COMM_WORLD); for (auto c : remote_cells) { @@ -1574,10 +1569,9 @@ void update_remote_mapping_contribution( int myRank; MPI_Comm_rank(MPI_COMM_WORLD,&myRank); - //#pragma omp parallel - //{ - std::vector receive_cells_sums; - //reduce data: sum received data in the data array to +#pragma omp parallel + { + // Reduce data: sum received data in the data array to // the target grid in the temporary block container for (size_t c = 0; c < receive_cells.size(); ++c) { SpatialCell* receive_cell = mpiGrid[receive_cells[c]]; @@ -1590,41 +1584,22 @@ void update_remote_mapping_contribution( Realf *blockData = receive_cell->get_data(popID); Realf *neighborData = origin_cell->neighbor_block_data[receive_origin_index[c]]; - // Realf checksum1 = 0.0; - // Realf checksum2 = 0.0; - // Realf checksum3 = 0.0; - // Realf checksum4 = 0.0; - - //#pragma omp for +#pragma omp for for(unsigned int vCell = 0; vCell < VELOCITY_BLOCK_LENGTH * receive_cell->get_number_of_velocity_blocks(popID); ++vCell) { - // checksum1 += origin_cell->neighbor_block_data[0][vCell]; - // checksum2 += origin_cell->neighbor_block_data[1][vCell]; - // checksum3 += origin_cell->neighbor_block_data[2][vCell]; - // checksum4 += origin_cell->neighbor_block_data[3][vCell]; blockData[vCell] += neighborData[vCell]; } - // // receive_cells_sums.push_back(checksum); - // cout << "Rank " << myRank << ": cell " << receive_cells[c] << " receiving from " << receive_origin_cells[c] << ". Checksums: "; - // cout << checksum1 << ", "; - // cout << checksum2 << ", "; - // cout << checksum3 << ", "; - // cout << checksum4 << "."; - // cout << " Index is " << receive_origin_index[c] << endl; } - // MPI_Barrier(MPI_COMM_WORLD); - // cout << endl; - // send cell data is set to zero. This is to avoid double copy if // one cell is the neighbor on bot + and - side to the same process for (size_t c = 0; c < send_cells.size(); ++c) { SpatialCell* spatial_cell = mpiGrid[send_cells[c]]; Realf * blockData = spatial_cell->get_data(popID); - //#pragma omp for nowait +#pragma omp for nowait for(unsigned int vCell = 0; vCell < VELOCITY_BLOCK_LENGTH * spatial_cell->get_number_of_velocity_blocks(popID); ++vCell) { // copy received target data to temporary array where target data is stored. blockData[vCell] = 0; } } - //} + } } From b70edce8fe185fdde4c450a05dd6d2198f9222af Mon Sep 17 00:00:00 2001 From: tkoskela Date: Tue, 29 Jan 2019 15:45:06 +0200 Subject: [PATCH 197/602] Use -O2 --- MAKE/Makefile.appa | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/MAKE/Makefile.appa b/MAKE/Makefile.appa index 4984ad871..670daf45c 100644 --- a/MAKE/Makefile.appa +++ b/MAKE/Makefile.appa @@ -48,7 +48,7 @@ FLAGS = #GNU flags: CC_BRAND = gcc CC_BRAND_VERSION = 5.4.0 -CXXFLAGS += -g -O0 -fopenmp -funroll-loops -std=c++11 -W -Wall -Wno-unused -fabi-version=0 -mavx2 +CXXFLAGS += -g -O2 -fopenmp -funroll-loops -std=c++11 -W -Wall -Wno-unused -fabi-version=0 -mavx2 #CXXFLAGS += -g -fopenmp -funroll-loops -std=c++11 -W -Wall -Wno-unused -fabi-version=0 -mavx2 testpackage: CXXFLAGS = -O2 -fopenmp -funroll-loops -std=c++0x -fabi-version=0 -mavx From 6d1ced130d8dc56d8c7bcd771b1a6261ffd6b21e Mon Sep 17 00:00:00 2001 From: tkoskela Date: Wed, 30 Jan 2019 09:12:07 +0200 Subject: [PATCH 198/602] Enabled re-loadbalnacing. --- vlasiator.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/vlasiator.cpp b/vlasiator.cpp index 6933607df..e8c615bce 100644 --- a/vlasiator.cpp +++ b/vlasiator.cpp @@ -948,7 +948,7 @@ int main(int argn,char* args[]) { //Re-loadbalance if needed //TODO - add LB measure and do LB if it exceeds threshold #warning Re-loadbalance has been disabled temporarily for amr debugging - if(((P::tstep % P::rebalanceInterval == 0 && P::tstep > P::tstep_min) || overrideRebalanceNow) && false) { + if(((P::tstep % P::rebalanceInterval == 0 && P::tstep > P::tstep_min) || overrideRebalanceNow)) { logFile << "(LB): Start load balance, tstep = " << P::tstep << " t = " << P::t << endl << writeVerbose; balanceLoad(mpiGrid, sysBoundaries); addTimedBarrier("barrier-end-load-balance"); @@ -1007,7 +1007,7 @@ int main(int argn,char* args[]) { technicalGrid. setGridCoupling(fsgridId, myRank); } } - cout << endl; + // cout << endl; if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; perBGrid. finishGridCoupling(); From 6473994ab2d3814bf3fd483011f66ce5e9081584 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Wed, 30 Jan 2019 13:03:32 +0200 Subject: [PATCH 199/602] Removed printouts of local/global sums Added phiprof timnig regions to trans_map_1d_amr --- vlasiator.cpp | 2 +- vlasovsolver/cpu_trans_map_amr.cpp | 16 +++++++++++----- 2 files changed, 12 insertions(+), 6 deletions(-) diff --git a/vlasiator.cpp b/vlasiator.cpp index e8c615bce..eacc6a756 100644 --- a/vlasiator.cpp +++ b/vlasiator.cpp @@ -271,7 +271,7 @@ int main(int argn,char* args[]) { const bool printLines = false; const bool printCells = false; - const bool printSums = true; + const bool printSums = false; // Init MPI: int required=MPI_THREAD_FUNNELED; diff --git a/vlasovsolver/cpu_trans_map_amr.cpp b/vlasovsolver/cpu_trans_map_amr.cpp index d7dd7a474..9de3c7f17 100644 --- a/vlasovsolver/cpu_trans_map_amr.cpp +++ b/vlasovsolver/cpu_trans_map_amr.cpp @@ -1090,15 +1090,14 @@ bool trans_map_1d_amr(const dccrg::Dccrg& } // **************************************************************************** - int t1 = phiprof::initializeTimer("mappingAndStore"); + int t1 = phiprof::initializeTimer("mapping"); + int t2 = phiprof::initializeTimer("store"); #pragma omp parallel { // Loop over velocity space blocks. Thread this loop (over vspace blocks) with OpenMP. #pragma omp for schedule(guided) - for(uint blocki = 0; blocki < unionOfBlocks.size(); blocki++){ - - phiprof::start(t1); + for(uint blocki = 0; blocki < unionOfBlocks.size(); blocki++) { // Get global id of the velocity block vmesh::GlobalID blockGID = unionOfBlocks[blocki]; @@ -1106,12 +1105,14 @@ bool trans_map_1d_amr(const dccrg::Dccrg& velocity_block_indices_t block_indices; uint8_t vRefLevel; vmesh.getIndices(blockGID,vRefLevel, block_indices[0], - block_indices[1], block_indices[2]); + block_indices[1], block_indices[2]); // Loop over sets of pencils // This loop only has one iteration for now for ( auto pencils: pencilSets ) { + phiprof::start(t1); + std::vector targetBlockData((pencils.sumOfLengths + 2 * pencils.N) * WID3); // Allocate vectorized targetvecdata sum(lengths of pencils)*WID3 / VECL) // Add padding by 2 for each pencil @@ -1202,6 +1203,9 @@ bool trans_map_1d_amr(const dccrg::Dccrg& // dealloc source data -- Should be automatic since it's declared in this loop iteration? } + phiprof::stop(t1); + phiprof::start(t2); + // reset blocks in all non-sysboundary neighbor spatial cells for this block id // At this point the data is saved in targetVecData so we can reset the spatial cells @@ -1293,6 +1297,8 @@ bool trans_map_1d_amr(const dccrg::Dccrg& // dealloc target data -- Should be automatic again? } + + phiprof::stop(t2); } } } From 53a373bf2b377c045200bb514db4a39a9b96e323 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Wed, 30 Jan 2019 13:42:52 +0200 Subject: [PATCH 200/602] Passing mpiGrid to lambdas by reference. Improves performance significantly. --- vlasovsolver/cpu_trans_map_amr.cpp | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/vlasovsolver/cpu_trans_map_amr.cpp b/vlasovsolver/cpu_trans_map_amr.cpp index 9de3c7f17..eee1e8393 100644 --- a/vlasovsolver/cpu_trans_map_amr.cpp +++ b/vlasovsolver/cpu_trans_map_amr.cpp @@ -1393,7 +1393,7 @@ void update_remote_mapping_contribution( } // if (all_of(nbrPairVector->begin(), nbrPairVector->end(), - // [mpiGrid](pair > p){return mpiGrid.is_local(p.first);})) { + // [&mpiGrid](pair > p){return mpiGrid.is_local(p.first);})) { // // Only local neighbors, move on. // continue; // } @@ -1428,7 +1428,8 @@ void update_remote_mapping_contribution( } } - if (!all_of(p_nbrs.begin(), p_nbrs.end(),[mpiGrid](CellID i){return mpiGrid.is_local(i);})) { + // Set up sends if any neighbor cells in p_nbrs are non-local. + if (!all_of(p_nbrs.begin(), p_nbrs.end(),[&mpiGrid](CellID i){return mpiGrid.is_local(i);})) { // ccell adds a neighbor_block_data block for each neighbor in the positive direction to its local data for (uint i_nbr = 0; i_nbr < p_nbrs.size(); ++i_nbr) { @@ -1485,7 +1486,8 @@ void update_remote_mapping_contribution( } } - if (!all_of(n_nbrs.begin(), n_nbrs.end(),[mpiGrid](CellID i){return mpiGrid.is_local(i);})) { + // Set up receives if any neighbor cells in n_nbrs are non-local. + if (!all_of(n_nbrs.begin(), n_nbrs.end(),[&mpiGrid](CellID i){return mpiGrid.is_local(i);})) { for (uint i_nbr = 0; i_nbr < n_nbrs.size(); ++i_nbr) { From 6fb2141a5c7efa1e4fa03ee506c42c5c7e15a522 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Wed, 30 Jan 2019 14:03:02 +0200 Subject: [PATCH 201/602] Disabled call to refinemnet if maximum refinemnet level is 0 --- grid.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/grid.cpp b/grid.cpp index 9ce59ca80..7c94957bb 100644 --- a/grid.cpp +++ b/grid.cpp @@ -124,7 +124,7 @@ void initializeGrid( MPI_Barrier(comm); - if(project.refineSpatialCells(mpiGrid)) { + if(P::amrMaxSpatialRefLevel > 0 && project.refineSpatialCells(mpiGrid)) { recalculateLocalCellsCache(); } MPI_Barrier(comm); From b1ebda43adcfa7163bb80b0419e89da26cb5f01b Mon Sep 17 00:00:00 2001 From: tkoskela Date: Thu, 31 Jan 2019 14:12:38 +0200 Subject: [PATCH 202/602] Moved boxHalfWidth in the refineSpatialCells - function in projects to amrBoxHalfWidth in the config file. Default value is 5. --- parameters.cpp | 4 ++- parameters.h | 1 + projects/Flowthrough/Flowthrough.cpp | 46 ++++++++++++++++++++++++ projects/Flowthrough/Flowthrough.h | 1 + projects/Magnetosphere/Magnetosphere.cpp | 8 ++--- projects/testAmr/testAmr.cpp | 6 ++-- 6 files changed, 56 insertions(+), 10 deletions(-) diff --git a/parameters.cpp b/parameters.cpp index 8c0e7accd..85b6e72c0 100644 --- a/parameters.cpp +++ b/parameters.cpp @@ -136,8 +136,8 @@ uint P::amrMaxVelocityRefLevel = 0; Realf P::amrRefineLimit = 1.0; Realf P::amrCoarsenLimit = 0.5; string P::amrVelRefCriterion = ""; - int P::amrMaxSpatialRefLevel = 1; +int P::amrBoxHalfWidth = 5; bool Parameters::addParameters(){ //the other default parameters we read through the add/get interface @@ -225,6 +225,7 @@ bool Parameters::addParameters(){ Readparameters::add("AMR.refine_limit","If the refinement criterion function returns a larger value than this, block is refined",(Realf)1.0); Readparameters::add("AMR.coarsen_limit","If the refinement criterion function returns a smaller value than this, block can be coarsened",(Realf)0.5); Readparameters::add("AMR.max_spatial_level","Maximum spatial mesh refinement level",(uint)1); + Readparameters::add("AMR.box_half_width","Half width of the box around origin that is refined (for testing)",(uint)5); return true; } @@ -379,6 +380,7 @@ bool Parameters::getParameters(){ } Readparameters::get("AMR.max_velocity_level",P::amrMaxVelocityRefLevel); Readparameters::get("AMR.max_spatial_level",P::amrMaxSpatialRefLevel); + Readparameters::get("AMR.box_half_width",P::amrBoxHalfWidth); Readparameters::get("AMR.vel_refinement_criterion",P::amrVelRefCriterion); Readparameters::get("AMR.refine_limit",P::amrRefineLimit); Readparameters::get("AMR.coarsen_limit",P::amrCoarsenLimit); diff --git a/parameters.h b/parameters.h index 0b0985725..04c7895db 100644 --- a/parameters.h +++ b/parameters.h @@ -137,6 +137,7 @@ struct Parameters { * The value must be larger than amrCoarsenLimit.*/ static std::string amrVelRefCriterion; /**< Name of the velocity block refinement criterion function.*/ static int amrMaxSpatialRefLevel; + static int amrBoxHalfWidth; /*! \brief Add the global parameters. * diff --git a/projects/Flowthrough/Flowthrough.cpp b/projects/Flowthrough/Flowthrough.cpp index 678a18f55..d95e9ed0e 100644 --- a/projects/Flowthrough/Flowthrough.cpp +++ b/projects/Flowthrough/Flowthrough.cpp @@ -228,4 +228,50 @@ namespace projects { return centerPoints; } + bool Flowthrough::refineSpatialCells( dccrg::Dccrg& mpiGrid ) const { + + int myRank; + MPI_Comm_rank(MPI_COMM_WORLD,&myRank); + + // mpiGrid.set_maximum_refinement_level(std::min(this->maxSpatialRefinementLevel, mpiGrid.mapping.get_maximum_refinement_level())); + + // cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; + if(myRank == MASTER_RANK) std::cout << "Maximum refinement level is " << mpiGrid.mapping.get_maximum_refinement_level() << std::endl; + + std::array xyz_mid; + xyz_mid[0] = (P::xmax + P::xmin) / 2.0; + xyz_mid[1] = (P::ymax + P::ymin) / 2.0; + xyz_mid[2] = (P::zmax + P::zmin) / 2.0; + + std::vector refineSuccess; + + for (double x = xyz_mid[0] - P::amrBoxHalfWidth * P::dx_ini; x <= xyz_mid[0] + P::amrBoxHalfWidth * P::dx_ini; x += P::dx_ini) { + for (double y = xyz_mid[1] - P::amrBoxHalfWidth * P::dy_ini; y <= xyz_mid[1] + P::amrBoxHalfWidth * P::dy_ini; y += P::dy_ini) { + for (double z = xyz_mid[2] - P::amrBoxHalfWidth * P::dz_ini; z <= xyz_mid[2] + P::amrBoxHalfWidth * P::dz_ini; z += P::dz_ini) { + auto xyz = xyz_mid; + xyz[0] = x; + xyz[1] = y; + xyz[2] = z; + CellID myCell = mpiGrid.get_existing_cell(xyz); + if (mpiGrid.refine_completely_at(xyz)) { + std::cout << "Rank " << myRank << " is refining cell " << myCell << std::endl; + } + } + } + } + std::vector refinedCells = mpiGrid.stop_refining(true); + if(myRank == MASTER_RANK) std::cout << "Finished first level of refinement" << endl; + if(refinedCells.size() > 0) { + std::cout << "Refined cells produced by rank " << myRank << " are: "; + for (auto cellid : refinedCells) { + std::cout << cellid << " "; + } + std::cout << endl; + } + + mpiGrid.balance_load(); + + return true; + } + } //namespace projects diff --git a/projects/Flowthrough/Flowthrough.h b/projects/Flowthrough/Flowthrough.h index 5c923217d..7d5491269 100644 --- a/projects/Flowthrough/Flowthrough.h +++ b/projects/Flowthrough/Flowthrough.h @@ -56,6 +56,7 @@ namespace projects { creal& dvx, creal& dvy, creal& dvz, const uint popID ) const; + bool refineSpatialCells( dccrg::Dccrg& mpiGrid ) const; virtual void calcCellParameters(spatial_cell::SpatialCell* cell,creal& t); virtual Real calcPhaseSpaceDensity( creal& x, creal& y, creal& z, diff --git a/projects/Magnetosphere/Magnetosphere.cpp b/projects/Magnetosphere/Magnetosphere.cpp index a0a6c4165..24e9e7109 100644 --- a/projects/Magnetosphere/Magnetosphere.cpp +++ b/projects/Magnetosphere/Magnetosphere.cpp @@ -466,12 +466,10 @@ namespace projects { xyz_mid[2] = (P::zmax + P::zmin) / 2.0; std::vector refineSuccess; - - int boxHalfWidth = 5; - for (double x = xyz_mid[0] - boxHalfWidth * P::dx_ini; x <= xyz_mid[0] + boxHalfWidth * P::dx_ini; x += P::dx_ini) { - for (double y = xyz_mid[1] - boxHalfWidth * P::dy_ini; y <= xyz_mid[1] + boxHalfWidth * P::dy_ini; y += P::dy_ini) { - for (double z = xyz_mid[2] - boxHalfWidth * P::dz_ini; z <= xyz_mid[2] + boxHalfWidth * P::dz_ini; z += P::dz_ini) { + for (double x = xyz_mid[0] - P::amrBoxHalfWidth * P::dx_ini; x <= xyz_mid[0] + P::amrBoxHalfWidth * P::dx_ini; x += P::dx_ini) { + for (double y = xyz_mid[1] - P::amrBoxHalfWidth * P::dy_ini; y <= xyz_mid[1] + P::amrBoxHalfWidth * P::dy_ini; y += P::dy_ini) { + for (double z = xyz_mid[2] - P::amrBoxHalfWidth * P::dz_ini; z <= xyz_mid[2] + P::amrBoxHalfWidth * P::dz_ini; z += P::dz_ini) { auto xyz = xyz_mid; xyz[0] = x; xyz[1] = y; diff --git a/projects/testAmr/testAmr.cpp b/projects/testAmr/testAmr.cpp index 39e5a4e07..3ead603c2 100644 --- a/projects/testAmr/testAmr.cpp +++ b/projects/testAmr/testAmr.cpp @@ -268,11 +268,9 @@ namespace projects { xyz_mid[0] = (P::xmax + P::xmin) / 2.0; xyz_mid[1] = (P::ymax + P::ymin) / 2.0; xyz_mid[2] = (P::zmax + P::zmin) / 2.0; - - int boxHalfWidth = 1; - for (double x = xyz_mid[0] - boxHalfWidth * P::dx_ini; x <= xyz_mid[0] + boxHalfWidth * P::dx_ini; x += P::dx_ini) { - for (double y = xyz_mid[1] - boxHalfWidth * P::dy_ini; y <= xyz_mid[1] + boxHalfWidth * P::dy_ini; y += P::dy_ini) { + for (double x = xyz_mid[0] - P::amrBoxHalfWidth * P::dx_ini; x <= xyz_mid[0] + P::amrBoxHalfWidth * P::dx_ini; x += P::dx_ini) { + for (double y = xyz_mid[1] - P::amrBoxHalfWidth * P::dy_ini; y <= xyz_mid[1] + P::amrBoxHalfWidth * P::dy_ini; y += P::dy_ini) { auto xyz = xyz_mid; xyz[0] = x; xyz[1] = y; From ee4e7c89a3a67678a39e96a69564ecff695387e0 Mon Sep 17 00:00:00 2001 From: Tuomas Koskela Date: Tue, 5 Feb 2019 15:48:16 +0200 Subject: [PATCH 203/602] Free the memory allocated for receiving neighbor updates. Fixes some of the memory leaking Yann reported but not all of it. --- vlasovsolver/cpu_trans_map_amr.cpp | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/vlasovsolver/cpu_trans_map_amr.cpp b/vlasovsolver/cpu_trans_map_amr.cpp index eee1e8393..4bca1d0a2 100644 --- a/vlasovsolver/cpu_trans_map_amr.cpp +++ b/vlasovsolver/cpu_trans_map_amr.cpp @@ -1365,6 +1365,8 @@ void update_remote_mapping_contribution( set allNeighbors; int neighborhood = getNeighborhood(dimension,1); + vector allocatedPointers; + for (auto c : local_cells) { SpatialCell *ccell = mpiGrid[c]; @@ -1479,6 +1481,7 @@ void update_remote_mapping_contribution( ccell->neighbor_number_of_blocks.at(sendIndex) = mpiGrid[nbr]->get_number_of_velocity_blocks(popID); ccell->neighbor_block_data.at(sendIndex) = (Realf*) aligned_malloc(ccell->neighbor_number_of_blocks.at(sendIndex) * WID3 * sizeof(Realf), 64); + allocatedPointers.push_back(ccell->neighbor_block_data.at(sendIndex)); for (uint j = 0; j < ccell->neighbor_number_of_blocks.at(sendIndex) * WID3; ++j) { ccell->neighbor_block_data[sendIndex][j] = 0.0; } @@ -1523,6 +1526,7 @@ void update_remote_mapping_contribution( ncell->neighbor_number_of_blocks.at(recvIndex) = ccell->get_number_of_velocity_blocks(popID); ncell->neighbor_block_data.at(recvIndex) = (Realf*) aligned_malloc(ncell->neighbor_number_of_blocks.at(recvIndex) * WID3 * sizeof(Realf), 64); + allocatedPointers.push_back(ncell->neighbor_block_data.at(recvIndex)); } else if(nSiblings == 4 && n_nbrs.size() == 1) { @@ -1535,6 +1539,7 @@ void update_remote_mapping_contribution( ncell->neighbor_number_of_blocks.at(i_sib) = scell->get_number_of_velocity_blocks(popID); ncell->neighbor_block_data.at(i_sib) = (Realf*) aligned_malloc(ncell->neighbor_number_of_blocks.at(i_sib) * WID3 * sizeof(Realf), 64); + allocatedPointers.push_back(ncell->neighbor_block_data.at(recvIndex)); } } else if(nSiblings == 1 && n_nbrs.size() == 4) { @@ -1545,7 +1550,8 @@ void update_remote_mapping_contribution( ncell->neighbor_number_of_blocks.at(recvIndex) = ccell->get_number_of_velocity_blocks(popID); ncell->neighbor_block_data.at(recvIndex) = (Realf*) aligned_malloc(ncell->neighbor_number_of_blocks.at(recvIndex) * WID3 * sizeof(Realf), 64); - } + allocatedPointers.push_back(ncell->neighbor_block_data.at(recvIndex)); + } receive_cells.push_back(c); receive_origin_cells.push_back(nbr); @@ -1610,4 +1616,11 @@ void update_remote_mapping_contribution( } } } + + for (auto p : allocatedPointers) { + + aligned_free(p); + + } + } From f9451e7da64a963ce4f9d175d551715ab53e2781 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Tue, 12 Feb 2019 14:37:01 +0200 Subject: [PATCH 204/602] Removed debugging print statements --- vlasovsolver/vlasovmover.cpp | 17 ----------------- 1 file changed, 17 deletions(-) diff --git a/vlasovsolver/vlasovmover.cpp b/vlasovsolver/vlasovmover.cpp index 9563f4af5..76ed17dbd 100644 --- a/vlasovsolver/vlasovmover.cpp +++ b/vlasovsolver/vlasovmover.cpp @@ -76,11 +76,9 @@ void calculateSpatialTranslation( int trans_timer; bool localTargetGridGenerated = false; - const bool printLines = false; int myRank; MPI_Comm_rank(MPI_COMM_WORLD,&myRank); - if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; // ------------- SLICE - map dist function in Z --------------- // if(P::zcells_ini > 1){ @@ -101,8 +99,6 @@ void calculateSpatialTranslation( phiprof::stop("update_remote-z"); } - - if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; // ------------- SLICE - map dist function in X --------------- // if(P::xcells_ini > 1){ @@ -125,11 +121,8 @@ void calculateSpatialTranslation( update_remote_mapping_contribution(mpiGrid, 0,-1,popID); phiprof::stop("update_remote-x"); - if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; } - if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; - // ------------- SLICE - map dist function in Y --------------- // if(P::ycells_ini > 1) { @@ -335,15 +328,10 @@ void calculateAcceleration(dccrg::Dccrg& typedef Parameters P; const vector& cells = getLocalCells(); - const bool printLines = false; - int myRank; MPI_Comm_rank(MPI_COMM_WORLD,&myRank); - if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__; - if(printLines) cout << " " << dt << " " << P::tstep << endl; if (dt == 0.0 && P::tstep > 0) { - if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; // Even if acceleration is turned off we need to adjust velocity blocks // because the boundary conditions may have altered the velocity space, @@ -352,13 +340,10 @@ void calculateAcceleration(dccrg::Dccrg& adjustVelocityBlocks(mpiGrid, cells, true, popID); } - if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; goto momentCalculation; } phiprof::start("semilag-acc"); - if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; - // Accelerate all particle species for (uint popID=0; popID& } } - if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; // Compute global maximum for number of subcycles MPI_Allreduce(&maxSubcycles, &globalMaxSubcycles, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD); - if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; // substep global max times for(uint step=0; step<(uint)globalMaxSubcycles; ++step) { From af189a4ba9e50c100d68bf26c211fd432c2d849b Mon Sep 17 00:00:00 2001 From: tkoskela Date: Tue, 12 Feb 2019 14:39:54 +0200 Subject: [PATCH 205/602] Removed debugging print statements --- vlasiator.cpp | 83 ++------------------------------------------------- 1 file changed, 3 insertions(+), 80 deletions(-) diff --git a/vlasiator.cpp b/vlasiator.cpp index eacc6a756..528954503 100644 --- a/vlasiator.cpp +++ b/vlasiator.cpp @@ -269,9 +269,8 @@ int main(int argn,char* args[]) { Real newDt; bool dtIsChanged; - const bool printLines = false; const bool printCells = false; - const bool printSums = false; + const bool printSums = true; // Init MPI: int required=MPI_THREAD_FUNNELED; @@ -373,8 +372,6 @@ int main(int argn,char* args[]) { // Add AMR refinement criterias: amr_ref_criteria::addRefinementCriteria(); - if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; - // Initialize grid. After initializeGrid local cells have dist // functions, and B fields set. Cells have also been classified for // the various sys boundary conditions. All remote cells have been @@ -386,8 +383,6 @@ int main(int argn,char* args[]) { initializeGrid(argn,args,mpiGrid,sysBoundaries,*project); isSysBoundaryCondDynamic = sysBoundaries.isDynamic(); - if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; - phiprof::stop("Init grid"); // Initialize data reduction operators. This should be done elsewhere in order to initialize @@ -408,8 +403,8 @@ int main(int argn,char* args[]) { mpiGrid.topology.is_periodic(2)}; const int fsGridSize = (fsGridDimensions[0] + 4) * (fsGridDimensions[1] + 4) * (fsGridDimensions[2] + 4); - // adding 1 just to be safe - const int tagOffset = fsGridSize + 1; + // setting to 0, values greater than 2^21 cause overflows on cray-mpich + const int tagOffset = 0; int tagId = 0; FsGrid< std::array, 2> perBGrid(fsGridDimensions, comm, periodicity, tagOffset * tagId++); @@ -478,8 +473,6 @@ int main(int argn,char* args[]) { } } - if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; - perBGrid. finishGridCoupling(); perBDt2Grid. finishGridCoupling(); EGrid. finishGridCoupling(); @@ -495,29 +488,19 @@ int main(int argn,char* args[]) { technicalGrid. finishGridCoupling(); phiprof::stop("Initial fsgrid coupling"); - if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; - // Transfer initial field configuration into the FsGrids feedFieldDataIntoFsGrid(mpiGrid,cells,CellParams::PERBX,perBGrid); - if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; - feedBgFieldsIntoFsGrid(mpiGrid,cells,BgBGrid); BgBGrid.updateGhostCells(); - if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; - setupTechnicalFsGrid(mpiGrid, cells, technicalGrid); technicalGrid.updateGhostCells(); - if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; - // WARNING this means moments and dt2 moments are the same here. feedMomentsIntoFsGrid(mpiGrid, cells, momentsGrid,false); feedMomentsIntoFsGrid(mpiGrid, cells, momentsDt2Grid,false); - if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; - phiprof::start("Init field propagator"); if ( initializeFieldPropagator( @@ -542,8 +525,6 @@ int main(int argn,char* args[]) { } phiprof::stop("Init field propagator"); - if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; - // Initialize Poisson solver (if used) if (P::propagatePotential == true) { phiprof::start("Init Poisson solver"); @@ -557,8 +538,6 @@ int main(int argn,char* args[]) { // Free up memory: readparameters.finalize(); - if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; - if (P::isRestart == false) { // Run Vlasov solver once with zero dt to initialize //per-cell dt limits. In restarts, we read the dt from file. @@ -594,8 +573,6 @@ int main(int argn,char* args[]) { getVolumeFieldsFromFsGrid(volGrid, mpiGrid, cells); phiprof::stop("getVolumeFieldsFromFsGrid"); - if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; - // Save restart data if (P::writeInitialState) { phiprof::start("write-initial-state"); @@ -635,15 +612,11 @@ int main(int argn,char* args[]) { phiprof::stop("write-initial-state"); } - if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; - if (P::isRestart == false) { //compute new dt phiprof::start("compute-dt"); getFsGridMaxDt(technicalGrid, mpiGrid, cells); - if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; - computeNewTimeStep(mpiGrid,newDt,dtIsChanged); if (P::dynamicTimestep == true && dtIsChanged == true) { // Only actually update the timestep if dynamicTimestep is on @@ -652,8 +625,6 @@ int main(int argn,char* args[]) { phiprof::stop("compute-dt"); } - if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; - if (!P::isRestart) { //go forward by dt/2 in V, initializes leapfrog split. In restarts the //the distribution function is already propagated forward in time by dt/2 @@ -674,8 +645,6 @@ int main(int argn,char* args[]) { // ***** INITIALIZATION COMPLETE ***** // *********************************** - if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; - // Main simulation loop: if (myRank == MASTER_RANK) logFile << "(MAIN): Starting main simulation loop." << endl << writeVerbose; @@ -740,8 +709,6 @@ int main(int argn,char* args[]) { P::t-P::dt <= P::t_max+DT_EPSILON && wallTimeRestartCounter <= P::exitAfterRestarts) { - if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; - addTimedBarrier("barrier-loop-start"); phiprof::start("IO"); @@ -753,8 +720,6 @@ int main(int argn,char* args[]) { } phiprof::stop("checkExternalCommands"); - if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; - //write out phiprof profiles and logs with a lower interval than normal //diagnostic (every 10 diagnostic intervals). phiprof::start("logfile-io"); @@ -784,8 +749,6 @@ int main(int argn,char* args[]) { logFile << writeVerbose; phiprof::stop("logfile-io"); - if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; - // Check whether diagnostic output has to be produced if (P::diagnosticInterval != 0 && P::tstep % P::diagnosticInterval == 0) { vector::const_iterator it; @@ -812,8 +775,6 @@ int main(int argn,char* args[]) { phiprof::stop("diagnostic-io"); } - if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; - bool extractFsGridFields = true; // write system, loop through write classes for (uint i = 0; i < P::systemWriteTimeInterval.size(); i++) { @@ -867,15 +828,11 @@ int main(int argn,char* args[]) { } } - if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; - // Reduce globalflags::bailingOut from all processes phiprof::start("Bailout-allreduce"); MPI_Allreduce(&(globalflags::bailingOut), &(doBailout), 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD); phiprof::stop("Bailout-allreduce"); - if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; - // Write restart data if needed // Combined with checking of additional load balancing to have only one collective call. phiprof::start("compute-is-restart-written-and-extra-LB"); @@ -901,8 +858,6 @@ int main(int argn,char* args[]) { globalflags::balanceLoad = false; } } - if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; - MPI_Bcast( &doNow, 2 , MPI_INT , MASTER_RANK ,MPI_COMM_WORLD); writeRestartNow = doNow[0]; doNow[0] = 0; @@ -912,8 +867,6 @@ int main(int argn,char* args[]) { } phiprof::stop("compute-is-restart-written-and-extra-LB"); - if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; - if (writeRestartNow >= 1){ phiprof::start("write-restart"); if (writeRestartNow == 1) { @@ -934,8 +887,6 @@ int main(int argn,char* args[]) { phiprof::stop("IO"); addTimedBarrier("barrier-end-io"); - - if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; //no need to propagate if we are on the final step, we just //wanted to make sure all IO is done even for final step @@ -959,8 +910,6 @@ int main(int argn,char* args[]) { logFile << "(LB): ... done!" << endl << writeVerbose; P::prepareForRebalance = false; - if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; - // Re-couple fsgrids to updated grid situation phiprof::start("fsgrid-recouple-after-lb"); @@ -983,8 +932,6 @@ int main(int argn,char* args[]) { BgBGrid. setupForGridCoupling(); volGrid. setupForGridCoupling(); technicalGrid. setupForGridCoupling(); - - if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; // Each dccrg cell may have to communicate with multiple fsgrid cells, if they are on a lower refinement level. // Calculate the corresponding fsgrid ids for each dccrg cell and set coupling for each fsgrid id. @@ -1008,7 +955,6 @@ int main(int argn,char* args[]) { } } // cout << endl; - if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; perBGrid. finishGridCoupling(); perBDt2Grid. finishGridCoupling(); @@ -1027,8 +973,6 @@ int main(int argn,char* args[]) { overrideRebalanceNow = false; } - - if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; //get local cells const vector& cells = getLocalCells(); @@ -1093,7 +1037,6 @@ int main(int argn,char* args[]) { } phiprof::start("Spatial-space"); - if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; if( P::propagateVlasovTranslation) { calculateSpatialTranslation(mpiGrid,P::dt); @@ -1122,7 +1065,6 @@ int main(int argn,char* args[]) { if(printSums && myRank == MASTER_RANK) cout << " Global sum = " << globalSum << endl; } - if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; phiprof::stop("Spatial-space",computedCells,"Cells"); phiprof::start("Compute interp moments"); @@ -1139,8 +1081,6 @@ int main(int argn,char* args[]) { ); phiprof::stop("Compute interp moments"); - if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; - // Apply boundary conditions if (P::propagateVlasovTranslation || P::propagateVlasovAcceleration ) { phiprof::start("Update system boundaries (Vlasov post-translation)"); @@ -1149,15 +1089,11 @@ int main(int argn,char* args[]) { addTimedBarrier("barrier-boundary-conditions"); } - if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; - // Propagate fields forward in time by dt. This needs to be done before the // moments for t + dt are computed (field uses t and t+0.5dt) if (P::propagateField) { phiprof::start("Propagate Fields"); - if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; - phiprof::start("fsgrid-coupling-in"); // Copy moments over into the fsgrid. //setupTechnicalFsGrid(mpiGrid, cells, technicalGrid); @@ -1196,8 +1132,6 @@ int main(int argn,char* args[]) { poisson::solve(mpiGrid); } - if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; - phiprof::start("Velocity-space"); if ( P::propagateVlasovAcceleration ) { calculateAcceleration(mpiGrid,P::dt); @@ -1207,13 +1141,9 @@ int main(int argn,char* args[]) { calculateAcceleration(mpiGrid, 0.0); } - if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; - phiprof::stop("Velocity-space",computedCells,"Cells"); addTimedBarrier("barrier-after-acceleration"); - if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; - phiprof::start("Compute interp moments"); // *here we compute rho and rho_v for timestep t + dt, so next // timestep * // @@ -1230,16 +1160,12 @@ int main(int argn,char* args[]) { ); phiprof::stop("Compute interp moments"); - if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; - phiprof::stop("Propagate",computedCells,"Cells"); phiprof::start("Project endTimeStep"); project->hook(hook::END_OF_TIME_STEP, mpiGrid); phiprof::stop("Project endTimeStep"); - if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; - // Check timestep if (P::dt < P::bailout_min_dt) { stringstream s; @@ -1251,13 +1177,10 @@ int main(int argn,char* args[]) { ++P::tstep; P::t += P::dt; - if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; } double after = MPI_Wtime(); - if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; - phiprof::stop("Simulation"); phiprof::start("Finalization"); if (P::propagateField ) { From e5cdab56d354b52e9d27c3419170573049852957 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Tue, 12 Feb 2019 15:07:17 +0200 Subject: [PATCH 206/602] Rewrite of the remote neighbor update. Takes into account asymmetric sender/receiver neighborhoods between cells of different refinement levels, by using get_neighbors_to() for receive. Takes into account local siblings when allocating space for communications. Does not crash, but produces crazy diffusion. --- definitions.h | 1 + spatial_cell.cpp | 8 +- spatial_cell.hpp | 4 +- vlasiator.cpp | 2 +- vlasovsolver/cpu_trans_map_amr.cpp | 447 ++++++++++++++++------------- 5 files changed, 251 insertions(+), 211 deletions(-) diff --git a/definitions.h b/definitions.h index 8f8308494..abd033c6b 100644 --- a/definitions.h +++ b/definitions.h @@ -130,6 +130,7 @@ typedef Realf (*AmrVelRefinement)(const Realf* velBlock); #endif // Max number of face neighbors per dimension with AMR +#define MAX_NEIGHBORS_PER_DIM 8 #define MAX_FACE_NEIGHBORS_PER_DIM 4 #endif diff --git a/spatial_cell.cpp b/spatial_cell.cpp index a789456a5..e97774049 100644 --- a/spatial_cell.cpp +++ b/spatial_cell.cpp @@ -58,7 +58,7 @@ namespace spatial_cell { this->derivativesBVOL[i]=0; } - for (unsigned int i = 0; i < MAX_FACE_NEIGHBORS_PER_DIM; ++i) { + for (unsigned int i = 0; i < MAX_NEIGHBORS_PER_DIM; ++i) { this->neighbor_number_of_blocks[i] = 0; this->neighbor_block_data[i] = NULL; } @@ -649,7 +649,7 @@ namespace spatial_cell { * neighbor. The values of neighbor_block_data * and neighbor_number_of_blocks should be set in * solver.*/ - for ( int i = 0; i < MAX_FACE_NEIGHBORS_PER_DIM; ++i) { + for ( int i = 0; i < MAX_NEIGHBORS_PER_DIM; ++i) { displacements.push_back((uint8_t*) this->neighbor_block_data[i] - (uint8_t*) this); block_lengths.push_back(sizeof(Realf) * VELOCITY_BLOCK_LENGTH * this->neighbor_number_of_blocks[i]); } @@ -825,14 +825,14 @@ namespace spatial_cell { datatype = MPI_BYTE; } - const bool printMpiDatatype = false; + const bool printMpiDatatype = true; if(printMpiDatatype) { int mpiSize; int myRank; MPI_Type_size(datatype,&mpiSize); MPI_Comm_rank(MPI_COMM_WORLD,&myRank); cout << myRank << " get_mpi_datatype: " << cellID << " " << sender_rank << " " << receiver_rank << " " << mpiSize << ", Nblocks = " << populations[activePopID].N_blocks << ", nbr Nblocks ="; - for (uint i = 0; i < 4; ++i) { + for (uint i = 0; i < MAX_NEIGHBORS_PER_DIM; ++i) { cout << " " << this->neighbor_number_of_blocks[i]; } cout << endl; diff --git a/spatial_cell.hpp b/spatial_cell.hpp index 5d8437d56..4f3396b70 100644 --- a/spatial_cell.hpp +++ b/spatial_cell.hpp @@ -333,9 +333,9 @@ namespace spatial_cell { //Realf* neighbor_block_data; /**< Pointers for translation operator. We can point to neighbor // * cell block data. We do not allocate memory for the pointer.*/ //vmesh::LocalID neighbor_number_of_blocks; - std::array neighbor_block_data; /**< Pointers for translation operator. We can point to neighbor + std::array neighbor_block_data; /**< Pointers for translation operator. We can point to neighbor * cell block data. We do not allocate memory for the pointer.*/ - std::array neighbor_number_of_blocks; + std::array neighbor_number_of_blocks; uint sysBoundaryFlag; /**< What type of system boundary does the cell belong to. * Enumerated in the sysboundarytype namespace's enum.*/ uint sysBoundaryLayer; /**< Layers counted from closest systemBoundary. If 0 then it has not diff --git a/vlasiator.cpp b/vlasiator.cpp index 528954503..2a0ac9993 100644 --- a/vlasiator.cpp +++ b/vlasiator.cpp @@ -899,7 +899,7 @@ int main(int argn,char* args[]) { //Re-loadbalance if needed //TODO - add LB measure and do LB if it exceeds threshold #warning Re-loadbalance has been disabled temporarily for amr debugging - if(((P::tstep % P::rebalanceInterval == 0 && P::tstep > P::tstep_min) || overrideRebalanceNow)) { + if(((P::tstep % P::rebalanceInterval == 0 && P::tstep > P::tstep_min) || overrideRebalanceNow) && false) { logFile << "(LB): Start load balance, tstep = " << P::tstep << " t = " << P::t << endl << writeVerbose; balanceLoad(mpiGrid, sysBoundaries); addTimedBarrier("barrier-end-load-balance"); diff --git a/vlasovsolver/cpu_trans_map_amr.cpp b/vlasovsolver/cpu_trans_map_amr.cpp index 4bca1d0a2..f1b092437 100644 --- a/vlasovsolver/cpu_trans_map_amr.cpp +++ b/vlasovsolver/cpu_trans_map_amr.cpp @@ -596,11 +596,11 @@ void propagatePencil(Vec* dz, Vec* values, const uint dimension, z_1 = select(positiveTranslationDirection, 1.0 - z_translation, 0.0); z_2 = select(positiveTranslationDirection, 1.0, - z_translation); - if( horizontal_or(abs(z_1) > Vec(1.0)) || horizontal_or(abs(z_2) > Vec(1.0)) ) { - std::cout << "Error, CFL condition violated\n"; - std::cout << "Exiting\n"; - std::exit(1); - } + // if( horizontal_or(abs(z_1) > Vec(1.0)) || horizontal_or(abs(z_2) > Vec(1.0)) ) { + // std::cout << "Error, CFL condition violated\n"; + // std::cout << "Exiting\n"; + // std::exit(1); + // } for (uint planeVector = 0; planeVector < VEC_PER_PLANE; planeVector++) { @@ -956,7 +956,7 @@ bool trans_map_1d_amr(const dccrg::Dccrg& const Realv dt, const uint popID) { - const bool printPencils = false; + const bool printPencils = true; const bool printTargets = false; Realv dvz,vz_min; uint cell_indices_to_id[3]; /*< used when computing id of target cell in block*/ @@ -1306,7 +1306,28 @@ bool trans_map_1d_amr(const dccrg::Dccrg& return true; } +int get_sibling_index(dccrg::Dccrg& mpiGrid, const CellID& cellid) { + + const int NO_SIBLINGS = 0; + const int ERROR = -1; + + if(mpiGrid.get_refinement_level(cellid) == 0) { + return NO_SIBLINGS; + } + + CellID parent = mpiGrid.get_parent(cellid); + if (parent == INVALID_CELLID) { + return ERROR; + } + + vector siblings = mpiGrid.get_all_children(parent); + auto location = std::find(siblings.begin(),siblings.end(),cellid); + auto index = std::distance(siblings.begin(), location); + + return index; + +} /*! @@ -1325,182 +1346,180 @@ void update_remote_mapping_contribution( const vector local_cells = mpiGrid.get_cells(); const vector remote_cells = mpiGrid.get_remote_cells_on_process_boundary(VLASOV_SOLVER_NEIGHBORHOOD_ID); vector receive_cells; - vector send_cells; + set send_cells; vector receive_origin_cells; - vector receive_origin_index; - - //normalize - if(direction > 0) direction = 1; - if(direction < 0) direction = -1; + vector receive_origin_index; + + int neighborhood = 0; + + // For debugging + int myRank; + MPI_Comm_rank(MPI_COMM_WORLD,&myRank); - for (auto c : remote_cells) { + //normalize and set neighborhoods + if(direction > 0) { + direction = 1; + switch (dimension) { + case 0: + neighborhood = SHIFT_P_X_NEIGHBORHOOD_ID; + break; + case 1: + neighborhood = SHIFT_P_Y_NEIGHBORHOOD_ID; + break; + case 2: + neighborhood = SHIFT_P_Z_NEIGHBORHOOD_ID; + break; + } + } + if(direction < 0) { + direction = -1; + switch (dimension) { + case 0: + neighborhood = SHIFT_M_X_NEIGHBORHOOD_ID; + break; + case 1: + neighborhood = SHIFT_M_Y_NEIGHBORHOOD_ID; + break; + case 2: + neighborhood = SHIFT_M_Z_NEIGHBORHOOD_ID; + break; + } + } - SpatialCell *ccell = mpiGrid[c]; + MPI_Barrier(MPI_COMM_WORLD); + cout << "begin update_remote_mapping_contribution, dimension = " << dimension << ", direction = " << direction << endl; + MPI_Barrier(MPI_COMM_WORLD); + // Initialize remote cells + for (auto rc : remote_cells) { + SpatialCell *ccell = mpiGrid[rc]; // Initialize number of blocks to 0 and block data to a default value. // We need the default for 1 to 1 communications if(ccell) { - for (uint i = 0; i < MAX_FACE_NEIGHBORS_PER_DIM; ++i) { + for (uint i = 0; i < MAX_NEIGHBORS_PER_DIM; ++i) { ccell->neighbor_block_data[i] = ccell->get_data(popID); ccell->neighbor_number_of_blocks[i] = 0; } } } - for (auto c : local_cells) { - - SpatialCell *ccell = mpiGrid[c]; - + // Initialize local cells + for (auto lc : local_cells) { + SpatialCell *ccell = mpiGrid[lc]; if(ccell) { - - // Initialize number of blocks to 0 and block data to a default value. - for (uint i = 0; i < MAX_FACE_NEIGHBORS_PER_DIM; ++i) { + // Initialize number of blocks to 0 and neighbor block data pointer to the local block data pointer + for (uint i = 0; i < MAX_NEIGHBORS_PER_DIM; ++i) { ccell->neighbor_block_data[i] = ccell->get_data(popID); ccell->neighbor_number_of_blocks[i] = 0; } } } - set allNeighbors; - int neighborhood = getNeighborhood(dimension,1); - - vector allocatedPointers; + vector receiveBuffers; + vector sendBuffers; for (auto c : local_cells) { SpatialCell *ccell = mpiGrid[c]; - if (!ccell) { - continue; - } - - auto* nbrPairVector = mpiGrid.get_neighbors_of(c, neighborhood); - - // neighbors in the positive direction - vector p_nbrs; - // neighbors on the negative direction - vector n_nbrs; - - // Collect neighbors on the positive and negative sides into separate lists - for (auto nbrPair : *nbrPairVector) { - - if (nbrPair.second.at(dimension) == direction) { - p_nbrs.push_back(nbrPair.first); - } - - if (nbrPair.second.at(dimension) == -direction) { - n_nbrs.push_back(nbrPair.first); - } - } - - // if (all_of(nbrPairVector->begin(), nbrPairVector->end(), - // [&mpiGrid](pair > p){return mpiGrid.is_local(p.first);})) { - // // Only local neighbors, move on. - // continue; - // } + if (!ccell) continue; - uint nSiblings = 1; + // Send to neighbors_to + auto* nbrToPairVector = mpiGrid.get_neighbors_to(c, neighborhood); + // Receive from neighbors_of + auto* nbrOfPairVector = mpiGrid.get_neighbors_of(c, neighborhood); + uint sendIndex = 0; uint recvIndex = 0; - vector siblings; - auto myIndices = mpiGrid.mapping.get_indices(c); - auto myParent = mpiGrid.get_parent(c); - - // Find out which cell in the list of siblings this cell is. That will determine which - // neighbor_block_data element gets allocated and read after the communication. - - if( c != myParent) { - auto allSiblings = mpiGrid.get_all_children(myParent); + int mySiblingIndex = get_sibling_index(mpiGrid,c); - for (auto sibling : allSiblings) { - auto indices = mpiGrid.mapping.get_indices(sibling); - if(indices[dimension] == myIndices[dimension]) { - siblings.push_back(sibling); - } - } - - auto myLocation = std::find(siblings.begin(),siblings.end(),c); - - if(myLocation != siblings.end()) { - nSiblings = siblings.size(); - sendIndex = std::distance(siblings.begin(), myLocation); - recvIndex = std::distance(siblings.begin(), myLocation); - } - } - - // Set up sends if any neighbor cells in p_nbrs are non-local. - if (!all_of(p_nbrs.begin(), p_nbrs.end(),[&mpiGrid](CellID i){return mpiGrid.is_local(i);})) { + // Set up sends if any neighbor cells in nbrToPairVector are non-local. + if (!all_of(nbrToPairVector->begin(), nbrToPairVector->end(), + [&mpiGrid](pair> i){return mpiGrid.is_local(i.first);})) { // ccell adds a neighbor_block_data block for each neighbor in the positive direction to its local data - for (uint i_nbr = 0; i_nbr < p_nbrs.size(); ++i_nbr) { + for (auto nbrPair : *nbrToPairVector) { bool initBlocksForEmptySiblings = false; - CellID nbr = p_nbrs[i_nbr]; - - if(nSiblings == 1 && p_nbrs.size() == 4) { - sendIndex = i_nbr; - } - + CellID nbr = nbrPair.first; + //Send data in nbr target array that we just mapped to, if // 1) it is a valid target, // 2) the source cell in center was translated, - if(nbr != INVALID_CELLID && do_translate_cell(ccell)) { - - // 3) Cell is remote. - if (!mpiGrid.is_local(nbr)) { + // 3) Cell is remote. + if(nbr != INVALID_CELLID && do_translate_cell(ccell) && !mpiGrid.is_local(nbr)) { + + /* + Select the index to the neighbor_block_data and neighbor_number_of_blocks arrays + 1) Ref_c == Ref_nbr == 0, index = 0 + 2) Ref_c == Ref_nbr != 0, index = c sibling index + 3) Ref_c > Ref_nbr , index = c sibling index + 4) Ref_c < Ref_nbr , index = nbr sibling index + */ + + if(mpiGrid.get_refinement_level(c) >= mpiGrid.get_refinement_level(nbr)) { + sendIndex = mySiblingIndex; + } else if (mpiGrid.get_refinement_level(nbr) > mpiGrid.get_refinement_level(c)) { + sendIndex = get_sibling_index(mpiGrid,nbr); + } - SpatialCell *pcell = mpiGrid[nbr]; + SpatialCell *pcell = mpiGrid[nbr]; - // 4) it is not a boundary cell, - if(pcell && pcell->sysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY) { - - if(allNeighbors.find(nbr) == allNeighbors.end()) { - // 5a) We have not already sent data from this rank to this cell. + // 4) it exists and is not a boundary cell, + if(pcell && pcell->sysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY) { + + if(send_cells.find(nbr) == send_cells.end()) { + // 5a) We have not already sent data from this rank to this cell. - allNeighbors.insert(nbr); + ccell->neighbor_number_of_blocks.at(sendIndex) = pcell->get_number_of_velocity_blocks(popID); + ccell->neighbor_block_data.at(sendIndex) = pcell->get_data(popID); - ccell->neighbor_number_of_blocks.at(sendIndex) = pcell->get_number_of_velocity_blocks(popID); - ccell->neighbor_block_data.at(sendIndex) = pcell->get_data(popID); - send_cells.push_back(nbr); - } else { - initBlocksForEmptySiblings = true; + auto *allNbrs = mpiGrid.get_neighbors_of(c, FULL_NEIGHBORHOOD_ID); + bool faceNeighbor = false; + for (auto nbrPair : *allNbrs) { + if(nbrPair.first == nbr && abs(nbrPair.second.at(dimension)) == 1) { + faceNeighbor = true; + } } - } - } else { - // If some but not all neighbors are local, we need to initialize their number of blocks - // and block data - // for communication because we don't check at receive how many siblings are remote. - initBlocksForEmptySiblings = true; - } - } - - if(initBlocksForEmptySiblings) { + if (faceNeighbor) { + send_cells.insert(nbr); + } + + } else { + ccell->neighbor_number_of_blocks.at(sendIndex) = mpiGrid[nbr]->get_number_of_velocity_blocks(popID); + ccell->neighbor_block_data.at(sendIndex) = + (Realf*) aligned_malloc(ccell->neighbor_number_of_blocks.at(sendIndex) * WID3 * sizeof(Realf), 64); + sendBuffers.push_back(ccell->neighbor_block_data.at(sendIndex)); + for (uint j = 0; j < ccell->neighbor_number_of_blocks.at(sendIndex) * WID3; ++j) { + ccell->neighbor_block_data[sendIndex][j] = 0.0; + + } // closes for(uint j = 0; j < ccell->neighbor_number_of_blocks.at(sendIndex) * WID3; ++j) + + } // closes if(send_cells.find(nbr) == send_cells.end()) + + } // closes if(pcell && pcell->sysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY) + + } // closes if(nbr != INVALID_CELLID && do_translate_cell(ccell) && !mpiGrid.is_local(nbr)) - ccell->neighbor_number_of_blocks.at(sendIndex) = mpiGrid[nbr]->get_number_of_velocity_blocks(popID); - ccell->neighbor_block_data.at(sendIndex) = - (Realf*) aligned_malloc(ccell->neighbor_number_of_blocks.at(sendIndex) * WID3 * sizeof(Realf), 64); - allocatedPointers.push_back(ccell->neighbor_block_data.at(sendIndex)); - for (uint j = 0; j < ccell->neighbor_number_of_blocks.at(sendIndex) * WID3; ++j) { - ccell->neighbor_block_data[sendIndex][j] = 0.0; - } - } - } - } + } // closes for(uint i_nbr = 0; i_nbr < nbrs_to.size(); ++i_nbr) + + } // closes if(!all_of(nbrs_to.begin(), nbrs_to.end(),[&mpiGrid](CellID i){return mpiGrid.is_local(i);})) - // Set up receives if any neighbor cells in n_nbrs are non-local. - if (!all_of(n_nbrs.begin(), n_nbrs.end(),[&mpiGrid](CellID i){return mpiGrid.is_local(i);})) { - - for (uint i_nbr = 0; i_nbr < n_nbrs.size(); ++i_nbr) { + // Set up receives if any neighbor cells in nbrOfPairVector are non-local. + if (!all_of(nbrOfPairVector->begin(), nbrOfPairVector->end(), + [&mpiGrid](pair> i){return mpiGrid.is_local(i.first);})) { + + // ccell adds a neighbor_block_data block for each neighbor in the positive direction to its local data + for (auto nbrPair : *nbrOfPairVector) { - CellID nbr = n_nbrs[i_nbr]; + CellID nbr = nbrPair.first; if (nbr != INVALID_CELLID && !mpiGrid.is_local(nbr) && ccell->sysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY) { //Receive data that ncell mapped to this local cell data array, //if 1) ncell is a valid source cell, 2) center cell is to be updated (normal cell) 3) ncell is remote - //we will here allocate a receive buffer, since we need to aggregate values SpatialCell *ncell = mpiGrid[nbr]; @@ -1508,85 +1527,80 @@ void update_remote_mapping_contribution( if(!ncell) { continue; } - - // There are four possibilities for how we receive data - // 1) sibling of 1 receiving from sibling of 1 - // Receiving cell reads from 0th element of neighbor_block_data - // 2) sibling of 4 receiving from sibling of 1 - // Receiving cell reads from recveIndex'th element of neighbor_block_data - // 3) sibling of 1 receiving from sibling of 4 - // Receiving cell reads all elements from neighbor_block_data that have data from remote neighbors - // 4) sibling of 4 receiving from sibling of 4 - // Receiving cell reads from recvIndex'th element of neighbor_block_data - - // This covers options 1 & 4 - if(mpiGrid.get_refinement_level(nbr) == mpiGrid.get_refinement_level(c)) { - - // Allocate memory for one sibling. Each cell will send/receive with the previously calculated recvIndex. - ncell->neighbor_number_of_blocks.at(recvIndex) = ccell->get_number_of_velocity_blocks(popID); - ncell->neighbor_block_data.at(recvIndex) = - (Realf*) aligned_malloc(ncell->neighbor_number_of_blocks.at(recvIndex) * WID3 * sizeof(Realf), 64); - allocatedPointers.push_back(ncell->neighbor_block_data.at(recvIndex)); - - } else if(nSiblings == 4 && n_nbrs.size() == 1) { - - // Allocate memory for each sibling to receive all the data sent by ncell. + + /* + Select the index to the neighbor_block_data and neighbor_number_of_blocks arrays + 1) Ref_nbr == Ref_c == 0, index = 0 + 2) Ref_nbr == Ref_c != 0, index = nbr sibling index + 3) Ref_nbr > Ref_c , index = nbr sibling index + 4) Ref_nbr < Ref_c , index = c sibling index + */ - for (uint i_sib = 0; i_sib < MAX_FACE_NEIGHBORS_PER_DIM; ++i_sib) { + if(mpiGrid.get_refinement_level(nbr) >= mpiGrid.get_refinement_level(c)) { + recvIndex = get_sibling_index(mpiGrid,nbr); + } else if (mpiGrid.get_refinement_level(c) > mpiGrid.get_refinement_level(nbr)) { + recvIndex = mySiblingIndex; + } + + if(mpiGrid.get_refinement_level(nbr) < mpiGrid.get_refinement_level(c)) { + + auto mySiblings = mpiGrid.get_all_children(mpiGrid.get_parent(c)); + auto myIndices = mpiGrid.mapping.get_indices(c); + + // Allocate memory for each sibling to receive all the data sent by coarser ncell. + // nbrs_to of the sender will only include the face neighbors, only allocate blocks for those. + for (uint i_sib = 0; i_sib < MAX_NEIGHBORS_PER_DIM; ++i_sib) { - auto* scell = mpiGrid[siblings.at(i_sib)]; + auto sibling = mySiblings.at(i_sib); + auto sibIndices = mpiGrid.mapping.get_indices(sibling); - ncell->neighbor_number_of_blocks.at(i_sib) = scell->get_number_of_velocity_blocks(popID); - ncell->neighbor_block_data.at(i_sib) = - (Realf*) aligned_malloc(ncell->neighbor_number_of_blocks.at(i_sib) * WID3 * sizeof(Realf), 64); - allocatedPointers.push_back(ncell->neighbor_block_data.at(recvIndex)); - } - - } else if(nSiblings == 1 && n_nbrs.size() == 4) { - - // Each remote neighbor will allocate memory for the data it's about to receive at recvIndex = i_nbr. - - recvIndex = i_nbr; + // Only allocate siblings that are remote face neighbors to ncell + if(mpiGrid.get_process(sibling) != mpiGrid.get_process(nbr) + && myIndices.at(dimension) == sibIndices.at(dimension)) { + + auto* scell = mpiGrid[sibling]; + + ncell->neighbor_number_of_blocks.at(i_sib) = scell->get_number_of_velocity_blocks(popID); + ncell->neighbor_block_data.at(i_sib) = + (Realf*) aligned_malloc(ncell->neighbor_number_of_blocks.at(i_sib) * WID3 * sizeof(Realf), 64); + receiveBuffers.push_back(ncell->neighbor_block_data.at(i_sib)); + } + } + } else { + + // Allocate memory for one sibling at the previously calculated recvIndex. ncell->neighbor_number_of_blocks.at(recvIndex) = ccell->get_number_of_velocity_blocks(popID); ncell->neighbor_block_data.at(recvIndex) = (Realf*) aligned_malloc(ncell->neighbor_number_of_blocks.at(recvIndex) * WID3 * sizeof(Realf), 64); - allocatedPointers.push_back(ncell->neighbor_block_data.at(recvIndex)); + receiveBuffers.push_back(ncell->neighbor_block_data.at(recvIndex)); } + + // Only nearest neighbors (nbrpair.second(dimension) == 1 are added to the + // block data of the receiving cells + if (abs(nbrPair.second.at(dimension)) == 1) { + + receive_cells.push_back(c); + receive_origin_cells.push_back(nbr); + receive_origin_index.push_back(recvIndex); + + } + } // closes (nbr != INVALID_CELLID && !mpiGrid.is_local(nbr) && ...) - receive_cells.push_back(c); - receive_origin_cells.push_back(nbr); - receive_origin_index.push_back(recvIndex); - - } - } - } - } + } // closes for(uint i_nbr = 0; i_nbr < nbrs_of.size(); ++i_nbr) + + } // closes if(!all_of(nbrs_of.begin(), nbrs_of.end(),[&mpiGrid](CellID i){return mpiGrid.is_local(i);})) + + } // closes for (auto c : local_cells) { // Do communication SpatialCell::setCommunicatedSpecies(popID); SpatialCell::set_mpi_transfer_type(Transfer::NEIGHBOR_VEL_BLOCK_DATA); - switch(dimension) { - case 0: - if(direction > 0) mpiGrid.update_copies_of_remote_neighbors(SHIFT_P_X_NEIGHBORHOOD_ID); - if(direction < 0) mpiGrid.update_copies_of_remote_neighbors(SHIFT_M_X_NEIGHBORHOOD_ID); - break; - case 1: - if(direction > 0) mpiGrid.update_copies_of_remote_neighbors(SHIFT_P_Y_NEIGHBORHOOD_ID); - if(direction < 0) mpiGrid.update_copies_of_remote_neighbors(SHIFT_M_Y_NEIGHBORHOOD_ID); - break; - case 2: - if(direction > 0) mpiGrid.update_copies_of_remote_neighbors(SHIFT_P_Z_NEIGHBORHOOD_ID); - if(direction < 0) mpiGrid.update_copies_of_remote_neighbors(SHIFT_M_Z_NEIGHBORHOOD_ID); - break; - } + mpiGrid.update_copies_of_remote_neighbors(neighborhood); - int myRank; - MPI_Comm_rank(MPI_COMM_WORLD,&myRank); - -#pragma omp parallel + // Reduce data: sum received data in the data array to + // the target grid in the temporary block container + //#pragma omp parallel { - // Reduce data: sum received data in the data array to - // the target grid in the temporary block container for (size_t c = 0; c < receive_cells.size(); ++c) { SpatialCell* receive_cell = mpiGrid[receive_cells[c]]; SpatialCell* origin_cell = mpiGrid[receive_origin_cells[c]]; @@ -1597,19 +1611,39 @@ void update_remote_mapping_contribution( Realf *blockData = receive_cell->get_data(popID); Realf *neighborData = origin_cell->neighbor_block_data[receive_origin_index[c]]; + + cout << "Rank " << myRank << ", dim " << dimension << ", dir " << direction; + cout << ". Neighbor data of remote cell " << receive_origin_cells[c] << " is added to local cell " << receive_cells[c]; + cout << " with index " << receive_origin_index[c]; + + Realf checksum = 0.0; -#pragma omp for - for(unsigned int vCell = 0; vCell < VELOCITY_BLOCK_LENGTH * receive_cell->get_number_of_velocity_blocks(popID); ++vCell) { + //#pragma omp for + for(uint vCell = 0; vCell < VELOCITY_BLOCK_LENGTH * receive_cell->get_number_of_velocity_blocks(popID); ++vCell) { blockData[vCell] += neighborData[vCell]; + checksum += neighborData[vCell]; + } + + //cout << ". Sum is " << checksum << endl; + + array allChecksums = {}; + cout << ". Sums are "; + for (uint i = 0; i < MAX_NEIGHBORS_PER_DIM; ++i) { + neighborData = origin_cell->neighbor_block_data[i]; + for(uint vCell = 0; vCell < VELOCITY_BLOCK_LENGTH * receive_cell->get_number_of_velocity_blocks(popID); ++vCell) { + allChecksums[i] += neighborData[vCell]; + } + cout << allChecksums[i] << " "; } + cout << endl; } // send cell data is set to zero. This is to avoid double copy if // one cell is the neighbor on bot + and - side to the same process - for (size_t c = 0; c < send_cells.size(); ++c) { - SpatialCell* spatial_cell = mpiGrid[send_cells[c]]; + for (auto c : send_cells) { + SpatialCell* spatial_cell = mpiGrid[c]; Realf * blockData = spatial_cell->get_data(popID); -#pragma omp for nowait + //#pragma omp for nowait for(unsigned int vCell = 0; vCell < VELOCITY_BLOCK_LENGTH * spatial_cell->get_number_of_velocity_blocks(popID); ++vCell) { // copy received target data to temporary array where target data is stored. blockData[vCell] = 0; @@ -1617,10 +1651,15 @@ void update_remote_mapping_contribution( } } - for (auto p : allocatedPointers) { - + for (auto p : receiveBuffers) { aligned_free(p); - } + for (auto p : sendBuffers) { + aligned_free(p); + } + + MPI_Barrier(MPI_COMM_WORLD); + cout << "end update_remote_mapping_contribution, dimension = " << dimension << ", direction = " << direction << endl; + MPI_Barrier(MPI_COMM_WORLD); } From d9f6f748fb20fdf413e44285445ff8165207ddf0 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Tue, 12 Feb 2019 15:28:55 +0200 Subject: [PATCH 207/602] Removed debugging print statements --- spatial_cell.cpp | 2 +- vlasiator.cpp | 2 +- vlasovsolver/cpu_trans_map_amr.cpp | 40 +++++++++++++++--------------- 3 files changed, 22 insertions(+), 22 deletions(-) diff --git a/spatial_cell.cpp b/spatial_cell.cpp index e97774049..1ea7081de 100644 --- a/spatial_cell.cpp +++ b/spatial_cell.cpp @@ -825,7 +825,7 @@ namespace spatial_cell { datatype = MPI_BYTE; } - const bool printMpiDatatype = true; + const bool printMpiDatatype = false; if(printMpiDatatype) { int mpiSize; int myRank; diff --git a/vlasiator.cpp b/vlasiator.cpp index 2a0ac9993..17f7bc568 100644 --- a/vlasiator.cpp +++ b/vlasiator.cpp @@ -270,7 +270,7 @@ int main(int argn,char* args[]) { bool dtIsChanged; const bool printCells = false; - const bool printSums = true; + const bool printSums = false; // Init MPI: int required=MPI_THREAD_FUNNELED; diff --git a/vlasovsolver/cpu_trans_map_amr.cpp b/vlasovsolver/cpu_trans_map_amr.cpp index f1b092437..0b5aa9123 100644 --- a/vlasovsolver/cpu_trans_map_amr.cpp +++ b/vlasovsolver/cpu_trans_map_amr.cpp @@ -956,7 +956,7 @@ bool trans_map_1d_amr(const dccrg::Dccrg& const Realv dt, const uint popID) { - const bool printPencils = true; + const bool printPencils = false; const bool printTargets = false; Realv dvz,vz_min; uint cell_indices_to_id[3]; /*< used when computing id of target cell in block*/ @@ -1387,9 +1387,9 @@ void update_remote_mapping_contribution( } } - MPI_Barrier(MPI_COMM_WORLD); - cout << "begin update_remote_mapping_contribution, dimension = " << dimension << ", direction = " << direction << endl; - MPI_Barrier(MPI_COMM_WORLD); + // MPI_Barrier(MPI_COMM_WORLD); + // cout << "begin update_remote_mapping_contribution, dimension = " << dimension << ", direction = " << direction << endl; + // MPI_Barrier(MPI_COMM_WORLD); // Initialize remote cells for (auto rc : remote_cells) { @@ -1612,9 +1612,9 @@ void update_remote_mapping_contribution( Realf *blockData = receive_cell->get_data(popID); Realf *neighborData = origin_cell->neighbor_block_data[receive_origin_index[c]]; - cout << "Rank " << myRank << ", dim " << dimension << ", dir " << direction; - cout << ". Neighbor data of remote cell " << receive_origin_cells[c] << " is added to local cell " << receive_cells[c]; - cout << " with index " << receive_origin_index[c]; + // cout << "Rank " << myRank << ", dim " << dimension << ", dir " << direction; + // cout << ". Neighbor data of remote cell " << receive_origin_cells[c] << " is added to local cell " << receive_cells[c]; + // cout << " with index " << receive_origin_index[c]; Realf checksum = 0.0; @@ -1626,16 +1626,16 @@ void update_remote_mapping_contribution( //cout << ". Sum is " << checksum << endl; - array allChecksums = {}; - cout << ". Sums are "; - for (uint i = 0; i < MAX_NEIGHBORS_PER_DIM; ++i) { - neighborData = origin_cell->neighbor_block_data[i]; - for(uint vCell = 0; vCell < VELOCITY_BLOCK_LENGTH * receive_cell->get_number_of_velocity_blocks(popID); ++vCell) { - allChecksums[i] += neighborData[vCell]; - } - cout << allChecksums[i] << " "; - } - cout << endl; + // array allChecksums = {}; + // cout << ". Sums are "; + // for (uint i = 0; i < MAX_NEIGHBORS_PER_DIM; ++i) { + // neighborData = origin_cell->neighbor_block_data[i]; + // for(uint vCell = 0; vCell < VELOCITY_BLOCK_LENGTH * receive_cell->get_number_of_velocity_blocks(popID); ++vCell) { + // allChecksums[i] += neighborData[vCell]; + // } + // cout << allChecksums[i] << " "; + // } + // cout << endl; } // send cell data is set to zero. This is to avoid double copy if @@ -1658,8 +1658,8 @@ void update_remote_mapping_contribution( aligned_free(p); } - MPI_Barrier(MPI_COMM_WORLD); - cout << "end update_remote_mapping_contribution, dimension = " << dimension << ", direction = " << direction << endl; - MPI_Barrier(MPI_COMM_WORLD); + // MPI_Barrier(MPI_COMM_WORLD); + // cout << "end update_remote_mapping_contribution, dimension = " << dimension << ", direction = " << direction << endl; + // MPI_Barrier(MPI_COMM_WORLD); } From f3e4a250cd128c2d31bdef6bbfe5657773269382 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Wed, 13 Feb 2019 09:46:15 +0200 Subject: [PATCH 208/602] Removed tag id offsets from fsgrid because too large values caused problems on cray mpi --- vlasiator.cpp | 28 +++++++++++++--------------- 1 file changed, 13 insertions(+), 15 deletions(-) diff --git a/vlasiator.cpp b/vlasiator.cpp index 17f7bc568..66af95c6e 100644 --- a/vlasiator.cpp +++ b/vlasiator.cpp @@ -404,22 +404,20 @@ int main(int argn,char* args[]) { const int fsGridSize = (fsGridDimensions[0] + 4) * (fsGridDimensions[1] + 4) * (fsGridDimensions[2] + 4); // setting to 0, values greater than 2^21 cause overflows on cray-mpich - const int tagOffset = 0; - int tagId = 0; - FsGrid< std::array, 2> perBGrid(fsGridDimensions, comm, periodicity, tagOffset * tagId++); - FsGrid< std::array, 2> perBDt2Grid(fsGridDimensions, comm, periodicity, tagOffset * tagId++); - FsGrid< std::array, 2> EGrid(fsGridDimensions, comm, periodicity, tagOffset * tagId++); - FsGrid< std::array, 2> EDt2Grid(fsGridDimensions, comm, periodicity, tagOffset * tagId++); - FsGrid< std::array, 2> EHallGrid(fsGridDimensions, comm, periodicity, tagOffset * tagId++); - FsGrid< std::array, 2> EGradPeGrid(fsGridDimensions, comm, periodicity, tagOffset * tagId++); - FsGrid< std::array, 2> momentsGrid(fsGridDimensions, comm, periodicity, tagOffset * tagId++); - FsGrid< std::array, 2> momentsDt2Grid(fsGridDimensions, comm, periodicity, tagOffset * tagId++); - FsGrid< std::array, 2> dPerBGrid(fsGridDimensions, comm, periodicity, tagOffset * tagId++); - FsGrid< std::array, 2> dMomentsGrid(fsGridDimensions, comm, periodicity, tagOffset * tagId++); - FsGrid< std::array, 2> BgBGrid(fsGridDimensions, comm, periodicity, tagOffset * tagId++); - FsGrid< std::array, 2> volGrid(fsGridDimensions, comm, periodicity, tagOffset * tagId++); - FsGrid< fsgrids::technical, 2> technicalGrid(fsGridDimensions, comm, periodicity, tagOffset * tagId); + FsGrid< std::array, 2> perBGrid(fsGridDimensions, comm, periodicity); + FsGrid< std::array, 2> perBDt2Grid(fsGridDimensions, comm, periodicity); + FsGrid< std::array, 2> EGrid(fsGridDimensions, comm, periodicity); + FsGrid< std::array, 2> EDt2Grid(fsGridDimensions, comm, periodicity); + FsGrid< std::array, 2> EHallGrid(fsGridDimensions, comm, periodicity); + FsGrid< std::array, 2> EGradPeGrid(fsGridDimensions, comm, periodicity); + FsGrid< std::array, 2> momentsGrid(fsGridDimensions, comm, periodicity); + FsGrid< std::array, 2> momentsDt2Grid(fsGridDimensions, comm, periodicity); + FsGrid< std::array, 2> dPerBGrid(fsGridDimensions, comm, periodicity); + FsGrid< std::array, 2> dMomentsGrid(fsGridDimensions, comm, periodicity); + FsGrid< std::array, 2> BgBGrid(fsGridDimensions, comm, periodicity); + FsGrid< std::array, 2> volGrid(fsGridDimensions, comm, periodicity); + FsGrid< fsgrids::technical, 2> technicalGrid(fsGridDimensions, comm, periodicity); // Set DX,DY and DZ // TODO: This is currently just taking the values from cell 1, and assuming them to be // constant throughout the simulation. From 8ed104b13d623bbbb3bf949519605a060cbfeffa Mon Sep 17 00:00:00 2001 From: tkoskela Date: Wed, 13 Feb 2019 09:47:49 +0200 Subject: [PATCH 209/602] Removed printlines statements --- vlasovsolver/cpu_trans_map.cpp | 17 ----------------- 1 file changed, 17 deletions(-) diff --git a/vlasovsolver/cpu_trans_map.cpp b/vlasovsolver/cpu_trans_map.cpp index ad8a9ff47..19bfb8606 100644 --- a/vlasovsolver/cpu_trans_map.cpp +++ b/vlasovsolver/cpu_trans_map.cpp @@ -652,10 +652,8 @@ void update_remote_mapping_contribution( vector receiveBuffers; int myRank; - const bool printLines = true; MPI_Comm_rank(MPI_COMM_WORLD,&myRank); - if(printLines) cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << endl; //normalize if(direction > 0) direction = 1; @@ -667,13 +665,9 @@ void update_remote_mapping_contribution( ccell->neighbor_number_of_blocks = 0; } - if(printLines) std::cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << std::endl; - //TODO: prepare arrays, make parallel by avoidin push_back and by checking also for other stuff for (size_t c = 0; c < local_cells.size(); ++c) { - //if(printLines) std::cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << std::endl; - SpatialCell *ccell = mpiGrid[local_cells[c]]; //default values, to avoid any extra sends and receives ccell->neighbor_block_data = ccell->get_data(popID); @@ -720,8 +714,6 @@ void update_remote_mapping_contribution( m_ngbr = NbrPairVector->front().first; p_ngbr = NbrPairVector->back().first; - //if(printLines) std::cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << std::endl; - //internal cell, not much to do if (mpiGrid.is_local(p_ngbr) && mpiGrid.is_local(m_ngbr)) continue; @@ -754,9 +746,6 @@ void update_remote_mapping_contribution( } } - MPI_Barrier(MPI_COMM_WORLD); - if(printLines) std::cout << "I am process " << myRank << " at line " << __LINE__ << " of " << __FILE__ << " " << direction << " " << dimension < Date: Fri, 15 Feb 2019 12:54:39 +0200 Subject: [PATCH 210/602] Sort list of local cells to eliminate a special case where the same rank receives on both a refined face neighbor and a non-refined non-face neighbor, causing a crash if the block counts differ and the non-refined neighbor writes its block count to the receiving remote cell neighbor field after the refined neighbor. --- vlasovsolver/cpu_trans_map_amr.cpp | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/vlasovsolver/cpu_trans_map_amr.cpp b/vlasovsolver/cpu_trans_map_amr.cpp index 0b5aa9123..ec4a28c5e 100644 --- a/vlasovsolver/cpu_trans_map_amr.cpp +++ b/vlasovsolver/cpu_trans_map_amr.cpp @@ -1343,7 +1343,7 @@ void update_remote_mapping_contribution( int direction, const uint popID) { - const vector local_cells = mpiGrid.get_cells(); + vector local_cells = mpiGrid.get_cells(); const vector remote_cells = mpiGrid.get_remote_cells_on_process_boundary(VLASOV_SOLVER_NEIGHBORHOOD_ID); vector receive_cells; set send_cells; @@ -1419,6 +1419,13 @@ void update_remote_mapping_contribution( vector receiveBuffers; vector sendBuffers; + // Sort local cells. + // This is done to make sure that when receiving data, a refined neighbor will overwrite a less-refined neighbor + // on the same rank. This is done because a non-refined neighbor, if such exist simultaneously with a refined neighbor + // is a non-face neighbor and therefore does not store the received data, but can screw up the block count. + // The sending rank will only consider face neighbors when determining the number of blocks it will send. + std::sort(local_cells.begin(), local_cells.end()); + for (auto c : local_cells) { SpatialCell *ccell = mpiGrid[c]; @@ -1584,6 +1591,7 @@ void update_remote_mapping_contribution( receive_origin_index.push_back(recvIndex); } + } // closes (nbr != INVALID_CELLID && !mpiGrid.is_local(nbr) && ...) } // closes for(uint i_nbr = 0; i_nbr < nbrs_of.size(); ++i_nbr) From 42e8f7810aa8124485620c97c0dc394d5734d19a Mon Sep 17 00:00:00 2001 From: tkoskela Date: Tue, 19 Feb 2019 11:01:13 +0200 Subject: [PATCH 211/602] Remove local cell sorting, implement a different, more robust fix. When setting up receve, detect whether the receiving cell is a face neighbor or not and if not, adjust block count to that of a remote face neighbor to match the send. --- vlasovsolver/cpu_trans_map_amr.cpp | 72 +++++++++++++++++++++--------- 1 file changed, 52 insertions(+), 20 deletions(-) diff --git a/vlasovsolver/cpu_trans_map_amr.cpp b/vlasovsolver/cpu_trans_map_amr.cpp index ec4a28c5e..532ec9eb8 100644 --- a/vlasovsolver/cpu_trans_map_amr.cpp +++ b/vlasovsolver/cpu_trans_map_amr.cpp @@ -1418,13 +1418,6 @@ void update_remote_mapping_contribution( vector receiveBuffers; vector sendBuffers; - - // Sort local cells. - // This is done to make sure that when receiving data, a refined neighbor will overwrite a less-refined neighbor - // on the same rank. This is done because a non-refined neighbor, if such exist simultaneously with a refined neighbor - // is a non-face neighbor and therefore does not store the received data, but can screw up the block count. - // The sending rank will only consider face neighbors when determining the number of blocks it will send. - std::sort(local_cells.begin(), local_cells.end()); for (auto c : local_cells) { @@ -1542,15 +1535,56 @@ void update_remote_mapping_contribution( 3) Ref_nbr > Ref_c , index = nbr sibling index 4) Ref_nbr < Ref_c , index = c sibling index */ - + if(mpiGrid.get_refinement_level(nbr) >= mpiGrid.get_refinement_level(c)) { + + // Allocate memory for one sibling at recvIndex. + recvIndex = get_sibling_index(mpiGrid,nbr); - } else if (mpiGrid.get_refinement_level(c) > mpiGrid.get_refinement_level(nbr)) { - recvIndex = mySiblingIndex; - } - - if(mpiGrid.get_refinement_level(nbr) < mpiGrid.get_refinement_level(c)) { + SpatialCell* scell = NULL; + + if (abs(nbrPair.second.at(dimension)) != 1) { + + // nbr is not face neighbor to c --> we are not receiving data mapped to c but to its face neighbor. + // This happens because DCCRG does not allow defining neighborhoods with face neighbors only. + // Figure out who is the face neighbor that nbr maps its data to, then get its number of blocks, + // if it is a remote neighbor to nbr. + + auto myIndices = mpiGrid.mapping.get_indices(nbr); + for (auto localNbrPair : *nbrOfPairVector) { + auto nbrIndices = mpiGrid.mapping.get_indices(localNbrPair.first); + int i1 = (dimension + 1) % 3; + int i2 = (dimension + 2) % 3; + if(myIndices.at(i1) == nbrIndices.at(i1) + && myIndices.at(i2) == nbrIndices.at(i2) + && abs(localNbrPair.second.at(dimension)) == 1 + && mpiGrid.get_process(nbr) != mpiGrid.get_process(localNbrPair.first)) { + + scell = mpiGrid[localNbrPair.first]; + + } + } + } + + if(scell) { + + ncell->neighbor_number_of_blocks.at(recvIndex) = scell->get_number_of_velocity_blocks(popID); + + } else { + + ncell->neighbor_number_of_blocks.at(recvIndex) = ccell->get_number_of_velocity_blocks(popID); + } + + // ncell->neighbor_number_of_blocks.at(recvIndex) = ccell->get_number_of_velocity_blocks(popID); + ncell->neighbor_block_data.at(recvIndex) = + (Realf*) aligned_malloc(ncell->neighbor_number_of_blocks.at(recvIndex) * WID3 * sizeof(Realf), 64); + receiveBuffers.push_back(ncell->neighbor_block_data.at(recvIndex)); + + } else { + + recvIndex = mySiblingIndex; + auto mySiblings = mpiGrid.get_all_children(mpiGrid.get_parent(c)); auto myIndices = mpiGrid.mapping.get_indices(c); @@ -1573,13 +1607,7 @@ void update_remote_mapping_contribution( receiveBuffers.push_back(ncell->neighbor_block_data.at(i_sib)); } } - } else { - - // Allocate memory for one sibling at the previously calculated recvIndex. - ncell->neighbor_number_of_blocks.at(recvIndex) = ccell->get_number_of_velocity_blocks(popID); - ncell->neighbor_block_data.at(recvIndex) = - (Realf*) aligned_malloc(ncell->neighbor_number_of_blocks.at(recvIndex) * WID3 * sizeof(Realf), 64); - receiveBuffers.push_back(ncell->neighbor_block_data.at(recvIndex)); + } // Only nearest neighbors (nbrpair.second(dimension) == 1 are added to the @@ -1599,12 +1627,16 @@ void update_remote_mapping_contribution( } // closes if(!all_of(nbrs_of.begin(), nbrs_of.end(),[&mpiGrid](CellID i){return mpiGrid.is_local(i);})) } // closes for (auto c : local_cells) { + + MPI_Barrier(MPI_COMM_WORLD); // Do communication SpatialCell::setCommunicatedSpecies(popID); SpatialCell::set_mpi_transfer_type(Transfer::NEIGHBOR_VEL_BLOCK_DATA); mpiGrid.update_copies_of_remote_neighbors(neighborhood); + MPI_Barrier(MPI_COMM_WORLD); + // Reduce data: sum received data in the data array to // the target grid in the temporary block container //#pragma omp parallel From 2991e8e11ed06e6718f35afe42397413c213d255 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Tue, 19 Feb 2019 12:25:56 +0200 Subject: [PATCH 212/602] Added more options for Amr box in the config file. Can now define X,Y,Z half widths separately and also define the center point X,Y,Z coordinates. --- parameters.cpp | 18 +++++++++++++++--- parameters.h | 7 ++++++- projects/Flowthrough/Flowthrough.cpp | 16 ++++++---------- projects/Magnetosphere/Magnetosphere.cpp | 16 ++++++---------- projects/testAmr/testAmr.cpp | 16 +++++++--------- 5 files changed, 40 insertions(+), 33 deletions(-) diff --git a/parameters.cpp b/parameters.cpp index 85b6e72c0..e1b1db149 100644 --- a/parameters.cpp +++ b/parameters.cpp @@ -137,7 +137,12 @@ Realf P::amrRefineLimit = 1.0; Realf P::amrCoarsenLimit = 0.5; string P::amrVelRefCriterion = ""; int P::amrMaxSpatialRefLevel = 1; -int P::amrBoxHalfWidth = 5; +int P::amrBoxHalfWidthX = 1; +int P::amrBoxHalfWidthY = 1; +int P::amrBoxHalfWidthZ = 1; +Realf P::amrBoxCenterX = 0.0; +Realf P::amrBoxCenterY = 0.0; +Realf P::amrBoxCenterZ = 0.0; bool Parameters::addParameters(){ //the other default parameters we read through the add/get interface @@ -225,7 +230,9 @@ bool Parameters::addParameters(){ Readparameters::add("AMR.refine_limit","If the refinement criterion function returns a larger value than this, block is refined",(Realf)1.0); Readparameters::add("AMR.coarsen_limit","If the refinement criterion function returns a smaller value than this, block can be coarsened",(Realf)0.5); Readparameters::add("AMR.max_spatial_level","Maximum spatial mesh refinement level",(uint)1); - Readparameters::add("AMR.box_half_width","Half width of the box around origin that is refined (for testing)",(uint)5); + Readparameters::add("AMR.box_half_width_x","Half width of the box around origin that is refined (for testing)",(uint)1); + Readparameters::add("AMR.box_half_width_y","Half width of the box around origin that is refined (for testing)",(uint)1); + Readparameters::add("AMR.box_half_width_z","Half width of the box around origin that is refined (for testing)",(uint)1); return true; } @@ -380,7 +387,12 @@ bool Parameters::getParameters(){ } Readparameters::get("AMR.max_velocity_level",P::amrMaxVelocityRefLevel); Readparameters::get("AMR.max_spatial_level",P::amrMaxSpatialRefLevel); - Readparameters::get("AMR.box_half_width",P::amrBoxHalfWidth); + Readparameters::get("AMR.box_half_width_x",P::amrBoxHalfWidthX); + Readparameters::get("AMR.box_half_width_y",P::amrBoxHalfWidthY); + Readparameters::get("AMR.box_half_width_z",P::amrBoxHalfWidthZ); + Readparameters::get("AMR.box_center_x",P::amrBoxCenterX); + Readparameters::get("AMR.box_center_y",P::amrBoxCenterY); + Readparameters::get("AMR.box_center_z",P::amrBoxCenterZ); Readparameters::get("AMR.vel_refinement_criterion",P::amrVelRefCriterion); Readparameters::get("AMR.refine_limit",P::amrRefineLimit); Readparameters::get("AMR.coarsen_limit",P::amrCoarsenLimit); diff --git a/parameters.h b/parameters.h index 04c7895db..3c9e3d80a 100644 --- a/parameters.h +++ b/parameters.h @@ -137,7 +137,12 @@ struct Parameters { * The value must be larger than amrCoarsenLimit.*/ static std::string amrVelRefCriterion; /**< Name of the velocity block refinement criterion function.*/ static int amrMaxSpatialRefLevel; - static int amrBoxHalfWidth; + static int amrBoxHalfWidthX; + static int amrBoxHalfWidthY; + static int amrBoxHalfWidthZ; + static Realf amrBoxCenterX; + static Realf amrBoxCenterY; + static Realf amrBoxCenterZ; /*! \brief Add the global parameters. * diff --git a/projects/Flowthrough/Flowthrough.cpp b/projects/Flowthrough/Flowthrough.cpp index d95e9ed0e..e00ab4c41 100644 --- a/projects/Flowthrough/Flowthrough.cpp +++ b/projects/Flowthrough/Flowthrough.cpp @@ -238,17 +238,13 @@ namespace projects { // cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; if(myRank == MASTER_RANK) std::cout << "Maximum refinement level is " << mpiGrid.mapping.get_maximum_refinement_level() << std::endl; - std::array xyz_mid; - xyz_mid[0] = (P::xmax + P::xmin) / 2.0; - xyz_mid[1] = (P::ymax + P::ymin) / 2.0; - xyz_mid[2] = (P::zmax + P::zmin) / 2.0; - std::vector refineSuccess; - - for (double x = xyz_mid[0] - P::amrBoxHalfWidth * P::dx_ini; x <= xyz_mid[0] + P::amrBoxHalfWidth * P::dx_ini; x += P::dx_ini) { - for (double y = xyz_mid[1] - P::amrBoxHalfWidth * P::dy_ini; y <= xyz_mid[1] + P::amrBoxHalfWidth * P::dy_ini; y += P::dy_ini) { - for (double z = xyz_mid[2] - P::amrBoxHalfWidth * P::dz_ini; z <= xyz_mid[2] + P::amrBoxHalfWidth * P::dz_ini; z += P::dz_ini) { - auto xyz = xyz_mid; + + for (double x = P::amrBoxCenterX - P::amrBoxHalfWidthX * P::dx_ini; x <= P::amrBoxCenterX + P::amrBoxHalfWidthX * P::dx_ini; x += P::dx_ini) { + for (double y = P::amrBoxCenterY - P::amrBoxHalfWidthY * P::dy_ini; y <= P::amrBoxCenterY + P::amrBoxHalfWidthY * P::dy_ini; y += P::dy_ini) { + for (double z = P::amrBoxCenterZ - P::amrBoxHalfWidthZ * P::dz_ini; z <= P::amrBoxCenterZ + P::amrBoxHalfWidthZ * P::dz_ini; z += P::dz_ini) { + + std::array xyz; xyz[0] = x; xyz[1] = y; xyz[2] = z; diff --git a/projects/Magnetosphere/Magnetosphere.cpp b/projects/Magnetosphere/Magnetosphere.cpp index 24e9e7109..378631001 100644 --- a/projects/Magnetosphere/Magnetosphere.cpp +++ b/projects/Magnetosphere/Magnetosphere.cpp @@ -460,17 +460,13 @@ namespace projects { // cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; if(myRank == MASTER_RANK) std::cout << "Maximum refinement level is " << mpiGrid.mapping.get_maximum_refinement_level() << std::endl; - std::array xyz_mid; - xyz_mid[0] = (P::xmax + P::xmin) / 2.0; - xyz_mid[1] = (P::ymax + P::ymin) / 2.0; - xyz_mid[2] = (P::zmax + P::zmin) / 2.0; - std::vector refineSuccess; - - for (double x = xyz_mid[0] - P::amrBoxHalfWidth * P::dx_ini; x <= xyz_mid[0] + P::amrBoxHalfWidth * P::dx_ini; x += P::dx_ini) { - for (double y = xyz_mid[1] - P::amrBoxHalfWidth * P::dy_ini; y <= xyz_mid[1] + P::amrBoxHalfWidth * P::dy_ini; y += P::dy_ini) { - for (double z = xyz_mid[2] - P::amrBoxHalfWidth * P::dz_ini; z <= xyz_mid[2] + P::amrBoxHalfWidth * P::dz_ini; z += P::dz_ini) { - auto xyz = xyz_mid; + + for (double x = P::amrBoxCenterX - P::amrBoxHalfWidthX * P::dx_ini; x <= P::amrBoxCenterX + P::amrBoxHalfWidthX * P::dx_ini; x += P::dx_ini) { + for (double y = P::amrBoxCenterY - P::amrBoxHalfWidthY * P::dy_ini; y <= P::amrBoxCenterY + P::amrBoxHalfWidthY * P::dy_ini; y += P::dy_ini) { + for (double z = P::amrBoxCenterZ - P::amrBoxHalfWidthZ * P::dz_ini; z <= P::amrBoxCenterZ + P::amrBoxHalfWidthZ * P::dz_ini; z += P::dz_ini) { + + std::array xyz; xyz[0] = x; xyz[1] = y; xyz[2] = z; diff --git a/projects/testAmr/testAmr.cpp b/projects/testAmr/testAmr.cpp index 3ead603c2..dfffbec3d 100644 --- a/projects/testAmr/testAmr.cpp +++ b/projects/testAmr/testAmr.cpp @@ -263,15 +263,13 @@ namespace projects { // cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; if(myRank == MASTER_RANK) std::cout << "Maximum refinement level is " << mpiGrid.mapping.get_maximum_refinement_level() << std::endl; - - std::array xyz_mid; - xyz_mid[0] = (P::xmax + P::xmin) / 2.0; - xyz_mid[1] = (P::ymax + P::ymin) / 2.0; - xyz_mid[2] = (P::zmax + P::zmin) / 2.0; - - for (double x = xyz_mid[0] - P::amrBoxHalfWidth * P::dx_ini; x <= xyz_mid[0] + P::amrBoxHalfWidth * P::dx_ini; x += P::dx_ini) { - for (double y = xyz_mid[1] - P::amrBoxHalfWidth * P::dy_ini; y <= xyz_mid[1] + P::amrBoxHalfWidth * P::dy_ini; y += P::dy_ini) { - auto xyz = xyz_mid; + + std::vector refineSuccess; + + for (double x = P::amrBoxCenterX - P::amrBoxHalfWidthX * P::dx_ini; x <= P::amrBoxCenterX + P::amrBoxHalfWidthX * P::dx_ini; x += P::dx_ini) { + for (double y = P::amrBoxCenterY - P::amrBoxHalfWidthY * P::dy_ini; y <= P::amrBoxCenterY + P::amrBoxHalfWidthY * P::dy_ini; y += P::dy_ini) { + + std::array xyz; xyz[0] = x; xyz[1] = y; //std::cout << "Trying to refine at " << xyz[0] << ", " << xyz[1] << ", " << xyz[2] << std::endl; From 4a2b19a20565b4495ad67817ffdcf844c3faf7bb Mon Sep 17 00:00:00 2001 From: tkoskela Date: Tue, 19 Feb 2019 12:34:21 +0200 Subject: [PATCH 213/602] Added readParamters - calls to addParameters. --- parameters.cpp | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/parameters.cpp b/parameters.cpp index e1b1db149..fe9853f37 100644 --- a/parameters.cpp +++ b/parameters.cpp @@ -230,9 +230,12 @@ bool Parameters::addParameters(){ Readparameters::add("AMR.refine_limit","If the refinement criterion function returns a larger value than this, block is refined",(Realf)1.0); Readparameters::add("AMR.coarsen_limit","If the refinement criterion function returns a smaller value than this, block can be coarsened",(Realf)0.5); Readparameters::add("AMR.max_spatial_level","Maximum spatial mesh refinement level",(uint)1); - Readparameters::add("AMR.box_half_width_x","Half width of the box around origin that is refined (for testing)",(uint)1); - Readparameters::add("AMR.box_half_width_y","Half width of the box around origin that is refined (for testing)",(uint)1); - Readparameters::add("AMR.box_half_width_z","Half width of the box around origin that is refined (for testing)",(uint)1); + Readparameters::add("AMR.box_half_width_x","Half width of the box that is refined (for testing)",(uint)1); + Readparameters::add("AMR.box_half_width_y","Half width of the box that is refined (for testing)",(uint)1); + Readparameters::add("AMR.box_half_width_z","Half width of the box that is refined (for testing)",(uint)1); + Readparameters::add("AMR.box_center_x","x coordinate of the center of the box that is refined (for testing)",(uint)1); + Readparameters::add("AMR.box_center_y","y coordinate of the center of the box that is refined (for testing)",(uint)1); + Readparameters::add("AMR.box_center_z","z coordinate of the center of the box that is refined (for testing)",(uint)1); return true; } From ea936e5f58d7bc295c4ee27bc9f1259ec6f650d9 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Thu, 21 Feb 2019 11:12:43 +0200 Subject: [PATCH 214/602] Boundary layer labeling for testing --- fieldsolver/gridGlue.cpp | 72 ++++++++++++++++++++++++++++++++++++++-- vlasiator.cpp | 2 ++ 2 files changed, 71 insertions(+), 3 deletions(-) diff --git a/fieldsolver/gridGlue.cpp b/fieldsolver/gridGlue.cpp index 6d9e08561..d53773d2d 100644 --- a/fieldsolver/gridGlue.cpp +++ b/fieldsolver/gridGlue.cpp @@ -387,7 +387,69 @@ void setupTechnicalFsGrid(dccrg::Dccrg& m technicalGrid.finishTransfersIn(); + auto localSize = technicalGrid.getLocalSize(); + // Add layer calculation here. Include diagonals +-1. + + // Initialize layer flags to 0. + for (int x = 0; x < localSize[0]; ++x) { + for (int y = 0; y < localSize[1]; ++y) { + for (int z = 0; z < localSize[2]; ++z) { + technicalGrid.get(x,y,z)->sysBoundaryLayer = 0; + } + } + } + + // begin with layer 1 + int layer = 0; + bool noCellsInLayer = false; + const int MAX_NUMBER_OF_BOUNDARY_LAYERS = localSize[0]*localSize[1]*localSize[2]; + + // loop through layers until an empty layer is encountered + while(!noCellsInLayer && layer < MAX_NUMBER_OF_BOUNDARY_LAYERS) { + noCellsInLayer = true; + layer++; + + // loop through all cells in grid + for (int x = 0; x < localSize[0]; ++x) { + for (int y = 0; y < localSize[1]; ++y) { + for (int z = 0; z < localSize[2]; ++z) { + + // examine all cells that belong to a boundary and have their layer set to the initial value 0 + if(technicalGrid.get(x,y,z)->sysBoundaryFlag != sysboundarytype::NOT_SYSBOUNDARY && + technicalGrid.get(x,y,z)->sysBoundaryLayer == 0) { + + bool belongsToLayer = false; + + // loop through all neighbors (including diagonals) + for (int ix = -1; ix <= 1; ++ix) { + for (int iy = -1; iy <= 1; ++iy) { + for (int iz = -1; iz <= 1; ++iz) { + + // not strictly necessary but logically we should not consider the cell itself + // among its neighbors. + if( ix == 0 && iy == 0 && iz == 0) continue; + + // in the first layer, boundary cell belongs if it has a non-boundary neighbor + if(layer == 1 && technicalGrid.get(x+ix,y+iy,z+iz)->sysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY) { + belongsToLayer = true; + + // in all other layers, boundary cell belongs if it has a neighbor in the previous layer + } else if (layer > 1 && technicalGrid.get(x+ix,y+iy,z+iz)->sysBoundaryLayer == layer - 1) { + belongsToLayer = true; + } + } + } + } + if (belongsToLayer) { + technicalGrid.get(x,y,z)->sysBoundaryLayer = layer; + noCellsInLayer = false; + } + } + } + } + } + } } void getFsGridMaxDt(FsGrid< fsgrids::technical, 2>& technicalGrid, @@ -422,17 +484,21 @@ void getFsGridMaxDt(FsGrid< fsgrids::technical, 2>& technicalGrid, auto cellParams = mpiGrid[dccrgId]->get_cell_parameters(); // Calculate the number of fsgrid cells we need to average into the current dccrg cell - int nCells = pow(pow(2,mpiGrid.mapping.get_maximum_refinement_level() - mpiGrid.mapping.get_refinement_level(dccrgId)),3); + int nCells = pow(pow(2,mpiGrid.get_maximum_refinement_level() - mpiGrid.get_refinement_level(dccrgId)),3); cellParams[CellParams::MAXFDT] = std::numeric_limits::max(); //cellParams[CellParams::FSGRID_RANK] = 0; //cellParams[CellParams::FSGRID_BOUNDARYTYPE] = 0; for (int iCell = 0; iCell < nCells; ++iCell) { - + fsgrids::technical* thisCellData = transferBufferPointer[i] + iCell; + + if (thisCellData->sysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY || thisCellData->sysBoundaryLayer == 1) { - cellParams[CellParams::MAXFDT] = std::min(cellParams[CellParams::MAXFDT],thisCellData->maxFsDt); + cellParams[CellParams::MAXFDT] = std::min(cellParams[CellParams::MAXFDT],thisCellData->maxFsDt); + + } //TODO: Implement something for FSGRID_RANK and FSGRID_BOUNDARYTYPE //cellParams[CellParams::FSGRID_RANK] = thisCellData->fsGridRank; diff --git a/vlasiator.cpp b/vlasiator.cpp index 66af95c6e..5a9cc07b0 100644 --- a/vlasiator.cpp +++ b/vlasiator.cpp @@ -495,6 +495,8 @@ int main(int argn,char* args[]) { setupTechnicalFsGrid(mpiGrid, cells, technicalGrid); technicalGrid.updateGhostCells(); + technicalGrid.debugOutput([](const fsgrids::technical& a)->void{cerr << a.sysBoundaryLayer << " ";}); + // WARNING this means moments and dt2 moments are the same here. feedMomentsIntoFsGrid(mpiGrid, cells, momentsGrid,false); feedMomentsIntoFsGrid(mpiGrid, cells, momentsDt2Grid,false); From 11a55703080ac90060f84c750c8413d040826ea1 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Thu, 21 Feb 2019 11:29:53 +0200 Subject: [PATCH 215/602] Added null pointer check --- fieldsolver/gridGlue.cpp | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/fieldsolver/gridGlue.cpp b/fieldsolver/gridGlue.cpp index d53773d2d..44d934b29 100644 --- a/fieldsolver/gridGlue.cpp +++ b/fieldsolver/gridGlue.cpp @@ -428,8 +428,10 @@ void setupTechnicalFsGrid(dccrg::Dccrg& m // not strictly necessary but logically we should not consider the cell itself // among its neighbors. - if( ix == 0 && iy == 0 && iz == 0) continue; - + if( ix == 0 && iy == 0 && iz == 0 || !technicalGrid.get(x+ix,y+iy,z+iz)) { + continue; + } + // in the first layer, boundary cell belongs if it has a non-boundary neighbor if(layer == 1 && technicalGrid.get(x+ix,y+iy,z+iz)->sysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY) { belongsToLayer = true; From 526d07776e6d02d36056364d824e1d2ebe8a4b44 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Thu, 21 Feb 2019 11:30:52 +0200 Subject: [PATCH 216/602] Removed boundary layer copy from dccrg --- fieldsolver/gridGlue.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fieldsolver/gridGlue.cpp b/fieldsolver/gridGlue.cpp index 44d934b29..9bac19f1a 100644 --- a/fieldsolver/gridGlue.cpp +++ b/fieldsolver/gridGlue.cpp @@ -368,7 +368,7 @@ void setupTechnicalFsGrid(dccrg::Dccrg& m // Data needs to be collected from some different places for this grid. thisCellData->sysBoundaryFlag = mpiGrid[cells[i]]->sysBoundaryFlag; // Remove boundary layer copy here - thisCellData->sysBoundaryLayer = mpiGrid[cells[i]]->sysBoundaryLayer; + // thisCellData->sysBoundaryLayer = mpiGrid[cells[i]]->sysBoundaryLayer; thisCellData->maxFsDt = std::numeric_limits::max(); } From c52f80b22ce9cff52a90789e02d38a412bfc3c77 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Thu, 21 Feb 2019 13:03:23 +0200 Subject: [PATCH 217/602] Setting sysBoundaryFlag on layers > 1 to DO_NOT_COMPUTE --- fieldsolver/gridGlue.cpp | 4 ++++ vlasiator.cpp | 4 +++- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/fieldsolver/gridGlue.cpp b/fieldsolver/gridGlue.cpp index 9bac19f1a..0a3aa90f1 100644 --- a/fieldsolver/gridGlue.cpp +++ b/fieldsolver/gridGlue.cpp @@ -445,7 +445,11 @@ void setupTechnicalFsGrid(dccrg::Dccrg& m } if (belongsToLayer) { technicalGrid.get(x,y,z)->sysBoundaryLayer = layer; + if (layer > 1) { + technicalGrid.get(x,y,z)->sysBoundaryFlag == sysboundarytype::DO_NOT_COMPUTE; + } noCellsInLayer = false; + //std::cout << "boundary layer at " << x << ", " << y << ", " << z << " = " << layer << std::endl; } } } diff --git a/vlasiator.cpp b/vlasiator.cpp index 5a9cc07b0..81207037b 100644 --- a/vlasiator.cpp +++ b/vlasiator.cpp @@ -495,7 +495,9 @@ int main(int argn,char* args[]) { setupTechnicalFsGrid(mpiGrid, cells, technicalGrid); technicalGrid.updateGhostCells(); - technicalGrid.debugOutput([](const fsgrids::technical& a)->void{cerr << a.sysBoundaryLayer << " ";}); + // if(myRank == MASTER_RANK) { + // technicalGrid.debugOutput([](const fsgrids::technical& a)->void{cerr << a.sysBoundaryLayer << " ";}); + // } // WARNING this means moments and dt2 moments are the same here. feedMomentsIntoFsGrid(mpiGrid, cells, momentsGrid,false); From d7b8b29529c65a8ab4f99cfce79e3f631cbb6e2d Mon Sep 17 00:00:00 2001 From: tkoskela Date: Fri, 22 Feb 2019 15:06:44 +0200 Subject: [PATCH 218/602] Moved the neighbor check in the boundary layer calculation to a separate function to clarify code. --- fieldsolver/gridGlue.cpp | 66 ++++++++++++++++++++++------------------ 1 file changed, 37 insertions(+), 29 deletions(-) diff --git a/fieldsolver/gridGlue.cpp b/fieldsolver/gridGlue.cpp index 0a3aa90f1..6803630f0 100644 --- a/fieldsolver/gridGlue.cpp +++ b/fieldsolver/gridGlue.cpp @@ -350,7 +350,37 @@ void getDerivativesFromFsGrid(FsGrid< std::array, } } - + +bool belongsToLayer(const int layer, const int x, const int y, const int z, + FsGrid< fsgrids::technical, 2>& technicalGrid) { + + bool belongs = false; + + // loop through all neighbors (including diagonals) + for (int ix = -1; ix <= 1; ++ix) { + for (int iy = -1; iy <= 1; ++iy) { + for (int iz = -1; iz <= 1; ++iz) { + + // not strictly necessary but logically we should not consider the cell itself + // among its neighbors. + if( ix == 0 && iy == 0 && iz == 0 || !technicalGrid.get(x+ix,y+iy,z+iz)) { + continue; + } + + if(layer == 1 && technicalGrid.get(x+ix,y+iy,z+iz)->sysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY) { + // in the first layer, boundary cell belongs if it has a non-boundary neighbor + belongs = true; + + } else if (layer > 1 && technicalGrid.get(x+ix,y+iy,z+iz)->sysBoundaryLayer == layer - 1) { + // in all other layers, boundary cell belongs if it has a neighbor in the previous layer + belongs = true; + } + } + } + } + + return belongs; +} void setupTechnicalFsGrid(dccrg::Dccrg& mpiGrid, const std::vector& cells, FsGrid< fsgrids::technical, 2>& technicalGrid) { @@ -391,7 +421,7 @@ void setupTechnicalFsGrid(dccrg::Dccrg& m // Add layer calculation here. Include diagonals +-1. - // Initialize layer flags to 0. + // Initialize boundary layer flags to 0. for (int x = 0; x < localSize[0]; ++x) { for (int y = 0; y < localSize[1]; ++y) { for (int z = 0; z < localSize[2]; ++z) { @@ -419,39 +449,17 @@ void setupTechnicalFsGrid(dccrg::Dccrg& m if(technicalGrid.get(x,y,z)->sysBoundaryFlag != sysboundarytype::NOT_SYSBOUNDARY && technicalGrid.get(x,y,z)->sysBoundaryLayer == 0) { - bool belongsToLayer = false; - - // loop through all neighbors (including diagonals) - for (int ix = -1; ix <= 1; ++ix) { - for (int iy = -1; iy <= 1; ++iy) { - for (int iz = -1; iz <= 1; ++iz) { - - // not strictly necessary but logically we should not consider the cell itself - // among its neighbors. - if( ix == 0 && iy == 0 && iz == 0 || !technicalGrid.get(x+ix,y+iy,z+iz)) { - continue; - } - - // in the first layer, boundary cell belongs if it has a non-boundary neighbor - if(layer == 1 && technicalGrid.get(x+ix,y+iy,z+iz)->sysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY) { - belongsToLayer = true; - - // in all other layers, boundary cell belongs if it has a neighbor in the previous layer - } else if (layer > 1 && technicalGrid.get(x+ix,y+iy,z+iz)->sysBoundaryLayer == layer - 1) { - belongsToLayer = true; - } - } - } - } - if (belongsToLayer) { + if (belongsToLayer(layer, x, y, z, technicalGrid)) { + technicalGrid.get(x,y,z)->sysBoundaryLayer = layer; + if (layer > 1) { technicalGrid.get(x,y,z)->sysBoundaryFlag == sysboundarytype::DO_NOT_COMPUTE; } noCellsInLayer = false; - //std::cout << "boundary layer at " << x << ", " << y << ", " << z << " = " << layer << std::endl; + std::cout << "boundary layer at " << x << ", " << y << ", " << z << " = " << layer << std::endl; } - } + } } } } From 15de9bf7896a734424072094b7d49eec2f1fdd29 Mon Sep 17 00:00:00 2001 From: Tuomas Koskela Date: Fri, 22 Feb 2019 20:19:08 +0200 Subject: [PATCH 219/602] 1) Changed default max refinemnet level to 0 2) updated refine function in test_fp, should really put it in a separate header file --- parameters.cpp | 4 ++-- projects/test_fp/test_fp.cpp | 20 +++++++------------- 2 files changed, 9 insertions(+), 15 deletions(-) diff --git a/parameters.cpp b/parameters.cpp index fe9853f37..ffdb71967 100644 --- a/parameters.cpp +++ b/parameters.cpp @@ -136,7 +136,7 @@ uint P::amrMaxVelocityRefLevel = 0; Realf P::amrRefineLimit = 1.0; Realf P::amrCoarsenLimit = 0.5; string P::amrVelRefCriterion = ""; -int P::amrMaxSpatialRefLevel = 1; +int P::amrMaxSpatialRefLevel = 0; int P::amrBoxHalfWidthX = 1; int P::amrBoxHalfWidthY = 1; int P::amrBoxHalfWidthZ = 1; @@ -229,7 +229,7 @@ bool Parameters::addParameters(){ Readparameters::add("AMR.max_velocity_level","Maximum velocity mesh refinement level",(uint)0); Readparameters::add("AMR.refine_limit","If the refinement criterion function returns a larger value than this, block is refined",(Realf)1.0); Readparameters::add("AMR.coarsen_limit","If the refinement criterion function returns a smaller value than this, block can be coarsened",(Realf)0.5); - Readparameters::add("AMR.max_spatial_level","Maximum spatial mesh refinement level",(uint)1); + Readparameters::add("AMR.max_spatial_level","Maximum spatial mesh refinement level",(uint)0); Readparameters::add("AMR.box_half_width_x","Half width of the box that is refined (for testing)",(uint)1); Readparameters::add("AMR.box_half_width_y","Half width of the box that is refined (for testing)",(uint)1); Readparameters::add("AMR.box_half_width_z","Half width of the box that is refined (for testing)",(uint)1); diff --git a/projects/test_fp/test_fp.cpp b/projects/test_fp/test_fp.cpp index a00aa2567..5dfc2f63a 100644 --- a/projects/test_fp/test_fp.cpp +++ b/projects/test_fp/test_fp.cpp @@ -270,20 +270,13 @@ namespace projects { // cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; if(myRank == MASTER_RANK) std::cout << "Maximum refinement level is " << mpiGrid.mapping.get_maximum_refinement_level() << std::endl; - - std::array xyz_mid; - xyz_mid[0] = (P::xmax + P::xmin) / 2.0; - xyz_mid[1] = (P::ymax + P::ymin) / 2.0; - xyz_mid[2] = (P::zmax + P::zmin) / 2.0; - std::vector refineSuccess; - int boxHalfWidth = 1; - - for (double x = xyz_mid[0] - boxHalfWidth * P::dx_ini; x <= xyz_mid[0] + boxHalfWidth * P::dx_ini; x += P::dx_ini) { - for (double y = xyz_mid[1] - boxHalfWidth * P::dy_ini; y <= xyz_mid[1] + boxHalfWidth * P::dy_ini; y += P::dy_ini) { - for (double z = xyz_mid[2] - boxHalfWidth * P::dz_ini; z <= xyz_mid[2] + boxHalfWidth * P::dz_ini; z += P::dz_ini) { - auto xyz = xyz_mid; + for (double x = P::amrBoxCenterX - P::amrBoxHalfWidthX * P::dx_ini; x <= P::amrBoxCenterX + P::amrBoxHalfWidthX * P::dx_ini; x += 0.99 * P::dx_ini) { + for (double y = P::amrBoxCenterY - P::amrBoxHalfWidthY * P::dy_ini; y <= P::amrBoxCenterY + P::amrBoxHalfWidthY * P::dy_ini; y += 0.99 * P::dy_ini) { + for (double z = P::amrBoxCenterZ - P::amrBoxHalfWidthZ * P::dz_ini; z <= P::amrBoxCenterZ + P::amrBoxHalfWidthZ * P::dz_ini; z += 0.99 * P::dz_ini) { + + std::array xyz; xyz[0] = x; xyz[1] = y; xyz[2] = z; @@ -294,6 +287,7 @@ namespace projects { } } } + std::vector refinedCells = mpiGrid.stop_refining(true); if(myRank == MASTER_RANK) std::cout << "Finished first level of refinement" << endl; if(refinedCells.size() > 0) { @@ -302,7 +296,7 @@ namespace projects { std::cout << cellid << " "; } std::cout << endl; - } + } mpiGrid.balance_load(); From b3f4b98678783c3bea60481685a5c038c6664a49 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Mon, 25 Feb 2019 13:35:04 +0200 Subject: [PATCH 220/602] Some tidy-up in the labeling and refinemnet function of Magnetosphere.cpp --- fieldsolver/gridGlue.cpp | 25 +++++++++++++++++++----- projects/Magnetosphere/Magnetosphere.cpp | 17 ++++++++++------ 2 files changed, 31 insertions(+), 11 deletions(-) diff --git a/fieldsolver/gridGlue.cpp b/fieldsolver/gridGlue.cpp index 6803630f0..247ab7e5f 100644 --- a/fieldsolver/gridGlue.cpp +++ b/fieldsolver/gridGlue.cpp @@ -370,10 +370,12 @@ bool belongsToLayer(const int layer, const int x, const int y, const int z, if(layer == 1 && technicalGrid.get(x+ix,y+iy,z+iz)->sysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY) { // in the first layer, boundary cell belongs if it has a non-boundary neighbor belongs = true; + return belongs; } else if (layer > 1 && technicalGrid.get(x+ix,y+iy,z+iz)->sysBoundaryLayer == layer - 1) { // in all other layers, boundary cell belongs if it has a neighbor in the previous layer belongs = true; + return belongs; } } } @@ -432,12 +434,12 @@ void setupTechnicalFsGrid(dccrg::Dccrg& m // begin with layer 1 int layer = 0; - bool noCellsInLayer = false; + bool emptyLayer = false; const int MAX_NUMBER_OF_BOUNDARY_LAYERS = localSize[0]*localSize[1]*localSize[2]; // loop through layers until an empty layer is encountered - while(!noCellsInLayer && layer < MAX_NUMBER_OF_BOUNDARY_LAYERS) { - noCellsInLayer = true; + while(!emptyLayer && layer < MAX_NUMBER_OF_BOUNDARY_LAYERS) { + emptyLayer = true; layer++; // loop through all cells in grid @@ -456,14 +458,27 @@ void setupTechnicalFsGrid(dccrg::Dccrg& m if (layer > 1) { technicalGrid.get(x,y,z)->sysBoundaryFlag == sysboundarytype::DO_NOT_COMPUTE; } - noCellsInLayer = false; - std::cout << "boundary layer at " << x << ", " << y << ", " << z << " = " << layer << std::endl; + emptyLayer = false; } } } } } } + + // for (int x = 0; x < localSize[0]; ++x) { + // for (int y = 0; y < localSize[1]; ++y) { + // for (int z = 0; z < localSize[2]; ++z) { + // std::cout << "boundary layer+flag at " << x << ", " << y << ", " << z << " = "; + // std::cout << technicalGrid.get(x,y,z)->sysBoundaryLayer; + // std::cout << " "; + // std::cout << technicalGrid.get(x,y,z)->sysBoundaryFlag; + // } + // } + // } + + + abort(); } void getFsGridMaxDt(FsGrid< fsgrids::technical, 2>& technicalGrid, diff --git a/projects/Magnetosphere/Magnetosphere.cpp b/projects/Magnetosphere/Magnetosphere.cpp index 378631001..c7dfacccd 100644 --- a/projects/Magnetosphere/Magnetosphere.cpp +++ b/projects/Magnetosphere/Magnetosphere.cpp @@ -462,14 +462,19 @@ namespace projects { std::vector refineSuccess; - for (double x = P::amrBoxCenterX - P::amrBoxHalfWidthX * P::dx_ini; x <= P::amrBoxCenterX + P::amrBoxHalfWidthX * P::dx_ini; x += P::dx_ini) { - for (double y = P::amrBoxCenterY - P::amrBoxHalfWidthY * P::dy_ini; y <= P::amrBoxCenterY + P::amrBoxHalfWidthY * P::dy_ini; y += P::dy_ini) { - for (double z = P::amrBoxCenterZ - P::amrBoxHalfWidthZ * P::dz_ini; z <= P::amrBoxCenterZ + P::amrBoxHalfWidthZ * P::dz_ini; z += P::dz_ini) { + // for (double x = P::amrBoxCenterX - P::amrBoxHalfWidthX * P::dx_ini; x <= P::amrBoxCenterX + P::amrBoxHalfWidthX * P::dx_ini; x += P::dx_ini) { + // for (double y = P::amrBoxCenterY - P::amrBoxHalfWidthY * P::dy_ini; y <= P::amrBoxCenterY + P::amrBoxHalfWidthY * P::dy_ini; y += P::dy_ini) { + // for (double z = P::amrBoxCenterZ - P::amrBoxHalfWidthZ * P::dz_ini; z <= P::amrBoxCenterZ + P::amrBoxHalfWidthZ * P::dz_ini; z += P::dz_ini) { + + for (int i = 0; i < 2 * P::amrBoxHalfWidthX; ++i) { + for (int j = 0; j < 2 * P::amrBoxHalfWidthY; ++j) { + for (int k = 0; k < 2 * P::amrBoxHalfWidthZ; ++k) { std::array xyz; - xyz[0] = x; - xyz[1] = y; - xyz[2] = z; + xyz[0] = P::amrBoxCenterX + (0.5 + i - P::amrBoxHalfWidthX) * P::dx_ini; + xyz[1] = P::amrBoxCenterY + (0.5 + j - P::amrBoxHalfWidthY) * P::dy_ini; + xyz[2] = P::amrBoxCenterZ + (0.5 + k - P::amrBoxHalfWidthZ) * P::dz_ini; + CellID myCell = mpiGrid.get_existing_cell(xyz); if (mpiGrid.refine_completely_at(xyz)) { std::cout << "Rank " << myRank << " is refining cell " << myCell << std::endl; From 628c9c3b203b5e02a9a4ff8169d2c43c4fdf2346 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Wed, 27 Feb 2019 15:19:47 +0200 Subject: [PATCH 221/602] Unified fsgird boundary layer labeling conditions with the vlasov grid. Removed abort! --- fieldsolver/gridGlue.cpp | 26 ++++++++++---------------- 1 file changed, 10 insertions(+), 16 deletions(-) diff --git a/fieldsolver/gridGlue.cpp b/fieldsolver/gridGlue.cpp index 247ab7e5f..c074da8f9 100644 --- a/fieldsolver/gridGlue.cpp +++ b/fieldsolver/gridGlue.cpp @@ -432,24 +432,21 @@ void setupTechnicalFsGrid(dccrg::Dccrg& m } } - // begin with layer 1 - int layer = 0; - bool emptyLayer = false; - const int MAX_NUMBER_OF_BOUNDARY_LAYERS = localSize[0]*localSize[1]*localSize[2]; - - // loop through layers until an empty layer is encountered - while(!emptyLayer && layer < MAX_NUMBER_OF_BOUNDARY_LAYERS) { - emptyLayer = true; - layer++; + // In dccrg initialization the max number of boundary layers is set to 3. + const int MAX_NUMBER_OF_BOUNDARY_LAYERS = 3 * (mpiGrid.get_maximum_refinement_level() + 1); + + // loop through max number of layers + for(layer = 1, layer <= MAX_NUMBER_OF_BOUNDARY_LAYERS, ++layer) { // loop through all cells in grid for (int x = 0; x < localSize[0]; ++x) { for (int y = 0; y < localSize[1]; ++y) { for (int z = 0; z < localSize[2]; ++z) { - // examine all cells that belong to a boundary and have their layer set to the initial value 0 - if(technicalGrid.get(x,y,z)->sysBoundaryFlag != sysboundarytype::NOT_SYSBOUNDARY && - technicalGrid.get(x,y,z)->sysBoundaryLayer == 0) { + // for the first layer, consider all cells that belong to a boundary, for other layers + // consider all cells that have not yet been labeled. + if((layer == 1 && technicalGrid.get(x,y,z)->sysBoundaryFlag != sysboundarytype::NOT_SYSBOUNDARY) || + (layer > 1 && technicalGrid.get(x,y,z)->sysBoundaryLayer == 0)) { if (belongsToLayer(layer, x, y, z, technicalGrid)) { @@ -458,7 +455,6 @@ void setupTechnicalFsGrid(dccrg::Dccrg& m if (layer > 1) { technicalGrid.get(x,y,z)->sysBoundaryFlag == sysboundarytype::DO_NOT_COMPUTE; } - emptyLayer = false; } } } @@ -476,9 +472,7 @@ void setupTechnicalFsGrid(dccrg::Dccrg& m // } // } // } - - - abort(); + } void getFsGridMaxDt(FsGrid< fsgrids::technical, 2>& technicalGrid, From fb9f4eaf11f14a229b6ef596d881c7a4ffec5a42 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Wed, 27 Feb 2019 15:21:14 +0200 Subject: [PATCH 222/602] Removed debug print statements --- sysboundary/sysboundary.cpp | 16 ---------------- 1 file changed, 16 deletions(-) diff --git a/sysboundary/sysboundary.cpp b/sysboundary/sysboundary.cpp index 63ac59ffc..370133e1f 100644 --- a/sysboundary/sysboundary.cpp +++ b/sysboundary/sysboundary.cpp @@ -335,8 +335,6 @@ bool SysBoundary::initSysBoundaries( bool SysBoundary::classifyCells(dccrg::Dccrg& mpiGrid) { bool success = true; vector cells = mpiGrid.get_cells(); - - const bool printLines = false; /*set all cells to default value, not_sysboundary*/ for(uint i=0; isysBoundaryLayer=0; /*Initial value*/ @@ -444,18 +438,12 @@ bool SysBoundary::classifyCells(dccrg::Dccrg Date: Thu, 28 Feb 2019 14:39:57 +0200 Subject: [PATCH 223/602] Fixed typos in gridGlue.cpp. Added a check in grid.cpp that exits if any boundary cells have been refined. --- fieldsolver/gridGlue.cpp | 2 +- grid.cpp | 18 ++++++++++++++++++ 2 files changed, 19 insertions(+), 1 deletion(-) diff --git a/fieldsolver/gridGlue.cpp b/fieldsolver/gridGlue.cpp index c074da8f9..28f00c1c4 100644 --- a/fieldsolver/gridGlue.cpp +++ b/fieldsolver/gridGlue.cpp @@ -436,7 +436,7 @@ void setupTechnicalFsGrid(dccrg::Dccrg& m const int MAX_NUMBER_OF_BOUNDARY_LAYERS = 3 * (mpiGrid.get_maximum_refinement_level() + 1); // loop through max number of layers - for(layer = 1, layer <= MAX_NUMBER_OF_BOUNDARY_LAYERS, ++layer) { + for(uint layer = 1; layer <= MAX_NUMBER_OF_BOUNDARY_LAYERS; ++layer) { // loop through all cells in grid for (int x = 0; x < localSize[0]; ++x) { diff --git a/grid.cpp b/grid.cpp index 7c94957bb..77d58dc85 100644 --- a/grid.cpp +++ b/grid.cpp @@ -163,6 +163,24 @@ void initializeGrid( phiprof::stop("Classify cells (sys boundary conditions)"); + // Check refined cells do not touch boundary cells + phiprof::start("Check refined cells do not touch boundaries"); + for (auto cellId : mpiGrid.get_cells()) { + SpatialCell* cell = mpiGrid[cellId]; + if(cell && + mpiGrid.get_refinement_level(cellId) > 0 && + (cell->sysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY || + cell->sysBoundaryLayer > 0)) { + cerr << "(MAIN) ERROR: Boundary cell " << cellId; + cerr << " (sysBoundaryFlag = " << cell->sysBoundaryFlag; + cerr << ", sysBoundaryLayer = " << cell->sysBoundaryLayer; + cerr << ") has refinement level " << mpiGrid.get_refinement_level(cellId); + cerr << endl; + exit(1); + } + } + phiprof::stop("Check refined cells do not touch boundaries"); + if (P::isRestart) { logFile << "Restart from "<< P::restartFileName << std::endl << writeVerbose; phiprof::start("Read restart"); From fd34ea12c410f2a9dec8e027aee1dc2cd3a2d06f Mon Sep 17 00:00:00 2001 From: Markus Battarbee Date: Thu, 28 Feb 2019 15:08:07 +0200 Subject: [PATCH 224/602] Simple parametrization of Magnetosphere project refinement --- projects/Magnetosphere/Magnetosphere.cfg | 8 +- projects/Magnetosphere/Magnetosphere.cpp | 114 +++++++++++++++++++---- projects/Magnetosphere/Magnetosphere.h | 6 ++ 3 files changed, 107 insertions(+), 21 deletions(-) diff --git a/projects/Magnetosphere/Magnetosphere.cfg b/projects/Magnetosphere/Magnetosphere.cfg index 4aa49e3c1..dbaa760f4 100644 --- a/projects/Magnetosphere/Magnetosphere.cfg +++ b/projects/Magnetosphere/Magnetosphere.cfg @@ -94,8 +94,14 @@ minValue = 1.0e-13 constBgBX = 0.0 constBgBY = 0.0 constBgBZ = -1.0e-9 - noDipoleInSW = 0.0 + +refine_L2radius = 9.5565e7 # 15 RE +refine_L2tailthick = 3.1855e7 # 5 RE +refine_L1radius = 1.59275e8 # 25 RE +refine_L1tailthick = 6.371e7 # 10 RE + + [ionosphere] centerX = 0.0 centerY = 0.0 diff --git a/projects/Magnetosphere/Magnetosphere.cpp b/projects/Magnetosphere/Magnetosphere.cpp index c7dfacccd..7d2360866 100644 --- a/projects/Magnetosphere/Magnetosphere.cpp +++ b/projects/Magnetosphere/Magnetosphere.cpp @@ -52,6 +52,11 @@ namespace projects { RP::add("Magnetosphere.dipoleType","0: Normal 3D dipole, 1: line-dipole for 2D polar simulations, 2: line-dipole with mirror, 3: 3D dipole with mirror", 0); RP::add("Magnetosphere.dipoleMirrorLocationX","x-coordinate of dipole Mirror", -1.0); + RP::add("Magnetosphere.refine_L2radius","Radius of L2-refined sphere", 9.5565e7); // 15 RE + RP::add("Magnetosphere.refine_L2tailthick","Thickness of L2-refined tail region", 3.1855e7); // 5 RE + RP::add("Magnetosphere.refine_L1radius","Radius of L1-refined sphere", 1.59275e8); // 25 RE + RP::add("Magnetosphere.refine_L1tailthick","Thickness of L1-refined tail region", 6.371e7); // 10 RE + // Per-population parameters for(uint i=0; i< getObjectWrapper().particleSpecies.size(); i++) { const std::string& pop = getObjectWrapper().particleSpecies[i].name; @@ -126,6 +131,24 @@ namespace projects { } + if(!Readparameters::get("Magnetosphere.refine_L2radius", this->refine_L2radius)) { + if(myRank == MASTER_RANK) cerr << __FILE__ << ":" << __LINE__ << " ERROR: This option has not been added!" << endl; + exit(1); + } + if(!Readparameters::get("Magnetosphere.refine_L2tailthick", this->refine_L2tailthick)) { + if(myRank == MASTER_RANK) cerr << __FILE__ << ":" << __LINE__ << " ERROR: This option has not been added!" << endl; + exit(1); + } + if(!Readparameters::get("Magnetosphere.refine_L1radius", this->refine_L1radius)) { + if(myRank == MASTER_RANK) cerr << __FILE__ << ":" << __LINE__ << " ERROR: This option has not been added!" << endl; + exit(1); + } + if(!Readparameters::get("Magnetosphere.refine_L1tailthick", this->refine_L1tailthick)) { + if(myRank == MASTER_RANK) cerr << __FILE__ << ":" << __LINE__ << " ERROR: This option has not been added!" << endl; + exit(1); + } + + // Per-population parameters for(uint i=0; i< getObjectWrapper().particleSpecies.size(); i++) { const std::string& pop = getObjectWrapper().particleSpecies[i].name; @@ -457,32 +480,42 @@ namespace projects { // mpiGrid.set_maximum_refinement_level(std::min(this->maxSpatialRefinementLevel, mpiGrid.mapping.get_maximum_refinement_level())); + std::vector refinedCells; + // cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; if(myRank == MASTER_RANK) std::cout << "Maximum refinement level is " << mpiGrid.mapping.get_maximum_refinement_level() << std::endl; - std::vector refineSuccess; - - // for (double x = P::amrBoxCenterX - P::amrBoxHalfWidthX * P::dx_ini; x <= P::amrBoxCenterX + P::amrBoxHalfWidthX * P::dx_ini; x += P::dx_ini) { - // for (double y = P::amrBoxCenterY - P::amrBoxHalfWidthY * P::dy_ini; y <= P::amrBoxCenterY + P::amrBoxHalfWidthY * P::dy_ini; y += P::dy_ini) { - // for (double z = P::amrBoxCenterZ - P::amrBoxHalfWidthZ * P::dz_ini; z <= P::amrBoxCenterZ + P::amrBoxHalfWidthZ * P::dz_ini; z += P::dz_ini) { - - for (int i = 0; i < 2 * P::amrBoxHalfWidthX; ++i) { - for (int j = 0; j < 2 * P::amrBoxHalfWidthY; ++j) { - for (int k = 0; k < 2 * P::amrBoxHalfWidthZ; ++k) { + // Calculate regions for refinement + if (P::amrMaxSpatialRefLevel > 0) { + // L1 refinement. Does not touch a 2-cell thick (at L0) boundary layer. + for (uint i = 2; i < P::xcells_ini-2; ++i) { + for (uint j = 2; j < P::ycells_ini-2; ++j) { + for (uint k = 2; k < P::zcells_ini-2; ++k) { - std::array xyz; - xyz[0] = P::amrBoxCenterX + (0.5 + i - P::amrBoxHalfWidthX) * P::dx_ini; - xyz[1] = P::amrBoxCenterY + (0.5 + j - P::amrBoxHalfWidthY) * P::dy_ini; - xyz[2] = P::amrBoxCenterZ + (0.5 + k - P::amrBoxHalfWidthZ) * P::dz_ini; - - CellID myCell = mpiGrid.get_existing_cell(xyz); - if (mpiGrid.refine_completely_at(xyz)) { - std::cout << "Rank " << myRank << " is refining cell " << myCell << std::endl; - } + std::array xyz; + xyz[0] = P::xmin + (i+0.5)*P::dx_ini; + xyz[1] = P::ymin + (j+0.5)*P::dy_ini; + xyz[2] = P::zmin + (k+0.5)*P::dz_ini; + + Real radius2 = (xyz[0]*xyz[0]+xyz[1]*xyz[1]+xyz[2]*xyz[2]); + // Check if cell is within L1 sphere, or within L1 tail slice + if ((radius2 < refine_L1radius*refine_L1radius) || + ((xyz[0] < 0) && (std::abs(xyz[1]) < refine_L1radius) && + (std::abs(xyz[2])sysBoundaryFlag != sysboundarytype::DO_NOT_COMPUTE) { + if (mpiGrid.refine_completely_at(xyz)) { + std::cout << "Rank " << myRank << " is refining cell " << myCell << std::endl; + } + } + } } - } + } + } } - std::vector refinedCells = mpiGrid.stop_refining(true); + refinedCells = mpiGrid.stop_refining(true); if(myRank == MASTER_RANK) std::cout << "Finished first level of refinement" << endl; if(refinedCells.size() > 0) { std::cout << "Refined cells produced by rank " << myRank << " are: "; @@ -491,6 +524,47 @@ namespace projects { } std::cout << endl; } + + if (P::amrMaxSpatialRefLevel > 1) { + // L2 refinement. Does not touch a 5-cell thick (at L1) boundary layer. + // This means a boundary width of 2 L0 cells and one L1 cell in between + // as a buffer + for (uint i = 5; i < 2*P::xcells_ini-5; ++i) { + for (uint j = 5; j < 2*P::ycells_ini-5; ++j) { + for (uint k = 5; k < 2*P::zcells_ini-5; ++k) { + + std::array xyz; + xyz[0] = P::xmin + (i+0.5)*0.5*P::dx_ini; + xyz[1] = P::ymin + (j+0.5)*0.5*P::dy_ini; + xyz[2] = P::zmin + (k+0.5)*0.5*P::dz_ini; + + Real radius2 = (xyz[0]*xyz[0]+xyz[1]*xyz[1]+xyz[2]*xyz[2]); + // Check if cell is within L1 sphere, or within L1 tail slice + if ((radius2 < refine_L2radius*refine_L2radius) || + ((xyz[0] < 0) && (std::abs(xyz[1]) < refine_L2radius) && + (std::abs(xyz[2])sysBoundaryFlag != sysboundarytype::DO_NOT_COMPUTE) { + if (mpiGrid.refine_completely_at(xyz)) { + std::cout << "Rank " << myRank << " is refining cell " << myCell << std::endl; + } + } + } + } + } + } + } + refinedCells = mpiGrid.stop_refining(true); + if(myRank == MASTER_RANK) std::cout << "Finished second level of refinement" << endl; + if(refinedCells.size() > 0) { + std::cout << "Refined cells produced by rank " << myRank << " are: "; + for (auto cellid : refinedCells) { + std::cout << cellid << " "; + } + std::cout << endl; + } mpiGrid.balance_load(); diff --git a/projects/Magnetosphere/Magnetosphere.h b/projects/Magnetosphere/Magnetosphere.h index 3259e5d64..28d6bdd32 100644 --- a/projects/Magnetosphere/Magnetosphere.h +++ b/projects/Magnetosphere/Magnetosphere.h @@ -80,6 +80,12 @@ namespace projects { Real dipoleScalingFactor; Real dipoleMirrorLocationX; uint dipoleType; + + Real refine_L2radius; + Real refine_L2tailthick; + Real refine_L1radius; + Real refine_L1tailthick; + std::vector speciesParams; }; // class Magnetosphere } // namespace projects From e995b1bcdf4948a8b59c008d9ac67a952bb911bf Mon Sep 17 00:00:00 2001 From: tkoskela Date: Fri, 1 Mar 2019 11:22:41 +0200 Subject: [PATCH 225/602] Changed boundary refinement check to allow refined boundaries as long as refinement does not change within the boundary and its neighbors. --- grid.cpp | 22 ++++++----------- sysboundary/sysboundary.cpp | 47 +++++++++++++++++++++++++++++++++++++ sysboundary/sysboundary.h | 1 + 3 files changed, 55 insertions(+), 15 deletions(-) diff --git a/grid.cpp b/grid.cpp index 77d58dc85..159b1998e 100644 --- a/grid.cpp +++ b/grid.cpp @@ -164,22 +164,14 @@ void initializeGrid( phiprof::stop("Classify cells (sys boundary conditions)"); // Check refined cells do not touch boundary cells - phiprof::start("Check refined cells do not touch boundaries"); - for (auto cellId : mpiGrid.get_cells()) { - SpatialCell* cell = mpiGrid[cellId]; - if(cell && - mpiGrid.get_refinement_level(cellId) > 0 && - (cell->sysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY || - cell->sysBoundaryLayer > 0)) { - cerr << "(MAIN) ERROR: Boundary cell " << cellId; - cerr << " (sysBoundaryFlag = " << cell->sysBoundaryFlag; - cerr << ", sysBoundaryLayer = " << cell->sysBoundaryLayer; - cerr << ") has refinement level " << mpiGrid.get_refinement_level(cellId); - cerr << endl; - exit(1); - } + phiprof::start("Check boundary refinement"); + + if(!sysBoundaries.checkRefinement(mpiGrid)) { + cerr << "(MAIN) ERROR: Boundary cells must have identical refinement level " << endl; + exit(1); } - phiprof::stop("Check refined cells do not touch boundaries"); + + phiprof::stop("Check boundary refinement"); if (P::isRestart) { logFile << "Restart from "<< P::restartFileName << std::endl << writeVerbose; diff --git a/sysboundary/sysboundary.cpp b/sysboundary/sysboundary.cpp index 370133e1f..8d6bffb16 100644 --- a/sysboundary/sysboundary.cpp +++ b/sysboundary/sysboundary.cpp @@ -325,6 +325,53 @@ bool SysBoundary::initSysBoundaries( return success; } +bool SysBoundary::checkRefinement(dccrg::Dccrg& mpiGrid) { + + // Set is used to avoid storing duplicates - each cell only needs to be checked once + std::set innerBoundaryCells; + std::set outerBoundaryCells; + + // Collect cells by sysboundarytype + for (auto cellId : mpiGrid.get_cells()) { + SpatialCell* cell = mpiGrid[cellId]; + if (cell->sysBoundaryFlag == sysboundarytype::IONOSPHERE) { + innerBoundaryCells.insert(cellId); + if (cell->sysBoundaryLayer == 1) { + // Add non-boundary neighbors of layer 1 cells + auto* nbrPairVector = mpiGrid.get_neighbors_of(cellId,FULL_NEIGHBORHOOD_ID); + for (auto nbrPair : *nbrPairVector) { + innerBoundaryCells.insert(nbrPair.first); + } + } + } else if (cell->sysBoundaryFlag != sysboundarytype::NOT_SYSBOUNDARY && + cell->sysBoundaryFlag != sysboundarytype::DO_NOT_COMPUTE) { + outerBoundaryCells.insert(cellId); + // Add non-boundary neighbors of outer boundary cells + auto* nbrPairVector = mpiGrid.get_neighbors_of(cellId,FULL_NEIGHBORHOOD_ID); + for (auto nbrPair : *nbrPairVector) { + outerBoundaryCells.insert(nbrPair.first); + } + } + } + + int refLvl0 = mpiGrid.get_refinement_level(*innerBoundaryCells.begin()); + for (auto cellId : innerBoundaryCells) { + if (mpiGrid.get_refinement_level(cellId) != refLvl0) { + return false; + } + } + + refLvl0 = mpiGrid.get_refinement_level(*outerBoundaryCells.begin()); + for (auto cellId : outerBoundaryCells) { + if (mpiGrid.get_refinement_level(cellId) != refLvl0) { + return false; + } + } + + return true; +} + + /*!\brief Classify all simulation cells with respect to the system boundary conditions. * * Loops through all cells and and for each assigns the correct sysBoundaryFlag depending on diff --git a/sysboundary/sysboundary.h b/sysboundary/sysboundary.h index 30eda1513..7c29a5c21 100644 --- a/sysboundary/sysboundary.h +++ b/sysboundary/sysboundary.h @@ -67,6 +67,7 @@ class SysBoundary { Project& project, creal& t ); + bool checkRefinement(dccrg::Dccrg& mpiGrid); bool classifyCells(dccrg::Dccrg& mpiGrid); bool applyInitialState( dccrg::Dccrg& mpiGrid, From 7cf38b53691446f1aa541df4a7daccfa45538d91 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Fri, 1 Mar 2019 11:23:20 +0200 Subject: [PATCH 226/602] Removed unnecessary comment lines --- projects/Magnetosphere/Magnetosphere.cpp | 7 ------- 1 file changed, 7 deletions(-) diff --git a/projects/Magnetosphere/Magnetosphere.cpp b/projects/Magnetosphere/Magnetosphere.cpp index c7dfacccd..f710ccd71 100644 --- a/projects/Magnetosphere/Magnetosphere.cpp +++ b/projects/Magnetosphere/Magnetosphere.cpp @@ -455,17 +455,10 @@ namespace projects { int myRank; MPI_Comm_rank(MPI_COMM_WORLD,&myRank); - // mpiGrid.set_maximum_refinement_level(std::min(this->maxSpatialRefinementLevel, mpiGrid.mapping.get_maximum_refinement_level())); - - // cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; if(myRank == MASTER_RANK) std::cout << "Maximum refinement level is " << mpiGrid.mapping.get_maximum_refinement_level() << std::endl; std::vector refineSuccess; - // for (double x = P::amrBoxCenterX - P::amrBoxHalfWidthX * P::dx_ini; x <= P::amrBoxCenterX + P::amrBoxHalfWidthX * P::dx_ini; x += P::dx_ini) { - // for (double y = P::amrBoxCenterY - P::amrBoxHalfWidthY * P::dy_ini; y <= P::amrBoxCenterY + P::amrBoxHalfWidthY * P::dy_ini; y += P::dy_ini) { - // for (double z = P::amrBoxCenterZ - P::amrBoxHalfWidthZ * P::dz_ini; z <= P::amrBoxCenterZ + P::amrBoxHalfWidthZ * P::dz_ini; z += P::dz_ini) { - for (int i = 0; i < 2 * P::amrBoxHalfWidthX; ++i) { for (int j = 0; j < 2 * P::amrBoxHalfWidthY; ++j) { for (int k = 0; k < 2 * P::amrBoxHalfWidthZ; ++k) { From 7f4ca9734105dffd095119b65213b25b829482e6 Mon Sep 17 00:00:00 2001 From: Tuomas Koskela Date: Wed, 6 Mar 2019 14:49:39 +0200 Subject: [PATCH 227/602] Fix attempt to readNblocks in ioread.cpp to take AMR into account --- ioread.cpp | 119 ++++++++++++++++++++++++++++++++++++++++++++++------- 1 file changed, 103 insertions(+), 16 deletions(-) diff --git a/ioread.cpp b/ioread.cpp index c1dc5bc31..49e079432 100644 --- a/ioread.cpp +++ b/ioread.cpp @@ -230,13 +230,47 @@ bool readNBlocks(vlsv::ParallelReader& file,const std::string& meshName, // (This is *not* the physical coordinate bounding box.) uint64_t bbox[6]; uint64_t* bbox_ptr = bbox; - list > attribs; - attribs.push_back(make_pair("mesh",meshName)); - if (file.read("MESH_BBOX",attribs,0,6,bbox_ptr,false) == false) return false; + list > attribsIn; + map attribsOut; + attribsIn.push_back(make_pair("mesh",meshName)); + + // Read number of domains and domain sizes + uint64_t N_domains; + file.getArrayAttributes("MESH_DOMAIN_SIZES",attribsIn,attribsOut); + auto it = attribsOut.find("arraysize"); + if (it == attribsOut.end()) { + cerr << "VLSV\t\t ERROR: Array 'MESH_DOMAIN_SIZES' XML tag does not have attribute 'arraysize'" << endl; + return false; + } else { + cerr << "VLSV\t\t Mesh has " << it->second << " domains" << endl; + N_domains = atoi(it->second.c_str()); + } + + uint64_t N_spatialCells = 0; + + if(N_domains == 1) { + + if (file.read("MESH_BBOX",attribsIn,0,6,bbox_ptr,false) == false) return false; + + // Resize the output vector and init to zero values + N_spatialCells = bbox[0]*bbox[1]*bbox[2]; + + } else { + + int64_t* domainInfo = NULL; + if (file.read("MESH_DOMAIN_SIZES",attribsIn,0,N_domains,domainInfo) == false) return false; + + for (uint i_domain = 0; i_domain < N_domains; ++i_domain) { + + N_spatialCells += domainInfo[2*i_domain]; + + } + + } - // Resize the output vector and init to zero values - const uint64_t N_spatialCells = bbox[0]*bbox[1]*bbox[2]; nBlocks.resize(N_spatialCells); + + #pragma omp parallel for for (size_t i=0; i::const_iterator s=speciesNames.begin(); s!=speciesNames.end(); ++s) { - attribs.clear(); - attribs.push_back(make_pair("mesh",meshName)); - attribs.push_back(make_pair("name",*s)); - if (file.getArrayInfo("BLOCKSPERCELL",attribs,arraySize,vectorSize,dataType,byteSize) == false) return false; + attribsIn.clear(); + attribsIn.push_back(make_pair("mesh",meshName)); + attribsIn.push_back(make_pair("name",*s)); + if (file.getArrayInfo("BLOCKSPERCELL",attribsIn,arraySize,vectorSize,dataType,byteSize) == false) return false; - if (file.read("BLOCKSPERCELL",attribs,0,arraySize,buffer) == false) { + if (file.read("BLOCKSPERCELL",attribsIn,0,arraySize,buffer) == false) { delete [] buffer; buffer = NULL; return false; } @@ -873,6 +907,8 @@ bool exec_readGrid(dccrg::Dccrg& mpiGrid, success = readNBlocks(file,meshName,nBlocks,MASTER_RANK,MPI_COMM_WORLD); } + exitOnError(success,"1 (RESTART) Cell migration failed",MPI_COMM_WORLD); + //make sure all cells are empty, we will anyway overwrite everything and // in that case moving cells is easier... { @@ -908,14 +944,16 @@ bool exec_readGrid(dccrg::Dccrg& mpiGrid, mpiGrid.pin(fileCells[i],newCellProcess); } } - - //Do initial load balance based on pins. Need to transfer at least sysboundaryflags + SpatialCell::set_mpi_transfer_type(Transfer::ALL_SPATIAL_DATA); - mpiGrid.balance_load(false); + + const bool useZoltan = false; + + //Do initial load balance based on pins. Need to transfer at least sysboundaryflags + mpiGrid.balance_load(useZoltan); //update list of local gridcells recalculateLocalCellsCache(); - //getObjectWrapper().meshData.reallocate(); //get new list of local gridcells const vector& gridCells = getLocalCells(); @@ -925,17 +963,66 @@ bool exec_readGrid(dccrg::Dccrg& mpiGrid, mpiGrid.unpin(gridCells[i]); } + exitOnError(success,"(RESTART) 2 Cell migration failed",MPI_COMM_WORLD); + + int refCount = 0; + int refinedBdryCount = 0; + int coarseBdryCount = 0; + int totalCount = 0; + for (auto cellid : gridCells) { + SpatialCell* cell = mpiGrid[cellid]; + if(mpiGrid.is_local(cellid)) { + refCount += mpiGrid.get_refinement_level(cellid); + if(cell->sysBoundaryFlag != sysboundarytype::NOT_SYSBOUNDARY) { + if(mpiGrid.get_refinement_level(cellid) > 0) { + refinedBdryCount++; + } else { + coarseBdryCount++; + } + } + totalCount++; + } + } + // Check for errors, has migration succeeded if (localCells != gridCells.size() ) { success=false; + cout << "Size check FAILED: rank "<< myRank << ", localCells = " << localCells << " gridCells.size() = " << gridCells.size(); + } else { + cout << "Size check SUCCESS: rank "<< myRank << ", localCells = " << localCells << " gridCells.size() = " << gridCells.size(); } + cout << " numberOfRefinedBoundaryCells = " << refinedBdryCount; + cout << " numberOfCoarseBoundaryCells = " << coarseBdryCount; + cout << endl; + +// MPI_Barrier(MPI_COMM_WORLD); + +// for (int i = 0; i < processes; ++i) { +// MPI_Barrier(MPI_COMM_WORLD); +// if(i == myRank) { +// cout << "List of cellids for rank " << myRank << ": "; +// for (auto cellid : gridCells) { +// if(mpiGrid.is_local(cellid)) { +// cout << cellid << " "; +// } +// } +// cout << endl; +// cout << endl; +// } +// MPI_Barrier(MPI_COMM_WORLD); +// } + + exitOnError(success,"(RESTART) 3 Cell migration failed",MPI_COMM_WORLD); + if (success == true) { for (uint64_t i=localCellStartOffset; i Date: Thu, 7 Mar 2019 10:26:53 +0200 Subject: [PATCH 228/602] Improved boundary refinement check --- sysboundary/sysboundary.cpp | 46 ++++++++++++++++++++++--------------- 1 file changed, 28 insertions(+), 18 deletions(-) diff --git a/sysboundary/sysboundary.cpp b/sysboundary/sysboundary.cpp index 8d6bffb16..882659a7c 100644 --- a/sysboundary/sysboundary.cpp +++ b/sysboundary/sysboundary.cpp @@ -331,39 +331,49 @@ bool SysBoundary::checkRefinement(dccrg::Dccrg innerBoundaryCells; std::set outerBoundaryCells; + int innerBoundaryRefLvl = -1; + int outerBoundaryRefLvl = -1; + // Collect cells by sysboundarytype for (auto cellId : mpiGrid.get_cells()) { SpatialCell* cell = mpiGrid[cellId]; - if (cell->sysBoundaryFlag == sysboundarytype::IONOSPHERE) { - innerBoundaryCells.insert(cellId); - if (cell->sysBoundaryLayer == 1) { - // Add non-boundary neighbors of layer 1 cells + if(cell) { + if (cell->sysBoundaryFlag == sysboundarytype::IONOSPHERE) { + innerBoundaryCells.insert(cellId); + innerBoundaryRefLvl = mpiGrid.get_refinement_level(cellId); + if (cell->sysBoundaryLayer == 1) { + // Add non-boundary neighbors of layer 1 cells + auto* nbrPairVector = mpiGrid.get_neighbors_of(cellId,FULL_NEIGHBORHOOD_ID); + for (auto nbrPair : *nbrPairVector) { + if(nbrPair.first != INVALID_CELLID) { + innerBoundaryCells.insert(nbrPair.first); + } + } + } + } else if (cell->sysBoundaryFlag != sysboundarytype::NOT_SYSBOUNDARY && + cell->sysBoundaryFlag != sysboundarytype::DO_NOT_COMPUTE) { + outerBoundaryCells.insert(cellId); + outerBoundaryRefLvl = mpiGrid.get_refinement_level(cellId); + // Add non-boundary neighbors of outer boundary cells auto* nbrPairVector = mpiGrid.get_neighbors_of(cellId,FULL_NEIGHBORHOOD_ID); for (auto nbrPair : *nbrPairVector) { - innerBoundaryCells.insert(nbrPair.first); + if(nbrPair.first != INVALID_CELLID) { + outerBoundaryCells.insert(nbrPair.first); + } } } - } else if (cell->sysBoundaryFlag != sysboundarytype::NOT_SYSBOUNDARY && - cell->sysBoundaryFlag != sysboundarytype::DO_NOT_COMPUTE) { - outerBoundaryCells.insert(cellId); - // Add non-boundary neighbors of outer boundary cells - auto* nbrPairVector = mpiGrid.get_neighbors_of(cellId,FULL_NEIGHBORHOOD_ID); - for (auto nbrPair : *nbrPairVector) { - outerBoundaryCells.insert(nbrPair.first); - } - } + } } - int refLvl0 = mpiGrid.get_refinement_level(*innerBoundaryCells.begin()); for (auto cellId : innerBoundaryCells) { - if (mpiGrid.get_refinement_level(cellId) != refLvl0) { + if (cellId != INVALID_CELLID && mpiGrid.get_refinement_level(cellId) != innerBoundaryRefLvl) { return false; } } - refLvl0 = mpiGrid.get_refinement_level(*outerBoundaryCells.begin()); for (auto cellId : outerBoundaryCells) { - if (mpiGrid.get_refinement_level(cellId) != refLvl0) { + if (cellId != INVALID_CELLID && mpiGrid.get_refinement_level(cellId) != outerBoundaryRefLvl) { + // cout << "Failed refinement check " << cellId << " " << mpiGrid.get_refinement_level(cellId) << " "<< outerBoundaryRefLvl << endl; return false; } } From 3e374c9b53976c2bada02c95e33961cd2e3e509e Mon Sep 17 00:00:00 2001 From: Tuomas Koskela Date: Thu, 7 Mar 2019 10:43:00 +0200 Subject: [PATCH 229/602] Removed debugging abort --- fieldsolver/gridGlue.cpp | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/fieldsolver/gridGlue.cpp b/fieldsolver/gridGlue.cpp index 247ab7e5f..a06284b1a 100644 --- a/fieldsolver/gridGlue.cpp +++ b/fieldsolver/gridGlue.cpp @@ -475,10 +475,8 @@ void setupTechnicalFsGrid(dccrg::Dccrg& m // std::cout << technicalGrid.get(x,y,z)->sysBoundaryFlag; // } // } - // } - - - abort(); + // } + //abort(); } void getFsGridMaxDt(FsGrid< fsgrids::technical, 2>& technicalGrid, From 1a2e4b678c35a2dcd7dc7e295c1db42017dae2d6 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Fri, 8 Mar 2019 08:48:18 +0200 Subject: [PATCH 230/602] Modified refinement function --- projects/Flowthrough/Flowthrough.cpp | 71 ++++++++++++++++++++-------- 1 file changed, 50 insertions(+), 21 deletions(-) diff --git a/projects/Flowthrough/Flowthrough.cpp b/projects/Flowthrough/Flowthrough.cpp index e00ab4c41..a01e8769e 100644 --- a/projects/Flowthrough/Flowthrough.cpp +++ b/projects/Flowthrough/Flowthrough.cpp @@ -227,27 +227,25 @@ namespace projects { centerPoints.push_back(point); return centerPoints; } - - bool Flowthrough::refineSpatialCells( dccrg::Dccrg& mpiGrid ) const { - int myRank; + bool Flowthrough::refineSpatialCells( dccrg::Dccrg& mpiGrid ) const { + + int myRank; MPI_Comm_rank(MPI_COMM_WORLD,&myRank); - // mpiGrid.set_maximum_refinement_level(std::min(this->maxSpatialRefinementLevel, mpiGrid.mapping.get_maximum_refinement_level())); - - // cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; if(myRank == MASTER_RANK) std::cout << "Maximum refinement level is " << mpiGrid.mapping.get_maximum_refinement_level() << std::endl; - - std::vector refineSuccess; - for (double x = P::amrBoxCenterX - P::amrBoxHalfWidthX * P::dx_ini; x <= P::amrBoxCenterX + P::amrBoxHalfWidthX * P::dx_ini; x += P::dx_ini) { - for (double y = P::amrBoxCenterY - P::amrBoxHalfWidthY * P::dy_ini; y <= P::amrBoxCenterY + P::amrBoxHalfWidthY * P::dy_ini; y += P::dy_ini) { - for (double z = P::amrBoxCenterZ - P::amrBoxHalfWidthZ * P::dz_ini; z <= P::amrBoxCenterZ + P::amrBoxHalfWidthZ * P::dz_ini; z += P::dz_ini) { + std::vector refineSuccess; + + for (int i = 0; i < 2 * P::amrBoxHalfWidthX; ++i) { + for (int j = 0; j < 2 * P::amrBoxHalfWidthY; ++j) { + for (int k = 0; k < 2 * P::amrBoxHalfWidthZ; ++k) { std::array xyz; - xyz[0] = x; - xyz[1] = y; - xyz[2] = z; + xyz[0] = P::amrBoxCenterX + (0.5 + i - P::amrBoxHalfWidthX) * P::dx_ini; + xyz[1] = P::amrBoxCenterY + (0.5 + j - P::amrBoxHalfWidthY) * P::dy_ini; + xyz[2] = P::amrBoxCenterZ + (0.5 + k - P::amrBoxHalfWidthZ) * P::dz_ini; + CellID myCell = mpiGrid.get_existing_cell(xyz); if (mpiGrid.refine_completely_at(xyz)) { std::cout << "Rank " << myRank << " is refining cell " << myCell << std::endl; @@ -255,18 +253,49 @@ namespace projects { } } } - std::vector refinedCells = mpiGrid.stop_refining(true); + std::vector refinedCells = mpiGrid.stop_refining(true); if(myRank == MASTER_RANK) std::cout << "Finished first level of refinement" << endl; if(refinedCells.size() > 0) { - std::cout << "Refined cells produced by rank " << myRank << " are: "; - for (auto cellid : refinedCells) { - std::cout << cellid << " "; - } - std::cout << endl; + std::cout << "Refined cells produced by rank " << myRank << " are: "; + for (auto cellid : refinedCells) { + std::cout << cellid << " "; + } + std::cout << endl; } - + mpiGrid.balance_load(); + if(mpiGrid.get_maximum_refinement_level() > 1) { + + for (int i = 0; i < 0.5 * P::amrBoxHalfWidthX; ++i) { + for (int j = 0; j < 0.5 * P::amrBoxHalfWidthY; ++j) { + for (int k = 0; k < 0.5 * P::amrBoxHalfWidthZ; ++k) { + + std::array xyz; + xyz[0] = P::amrBoxCenterX + (0.5 + i - 0.5 * P::amrBoxHalfWidthX) * P::dx_ini; + xyz[1] = P::amrBoxCenterY + (0.5 + j - 0.5 * P::amrBoxHalfWidthY) * P::dy_ini; + xyz[2] = P::amrBoxCenterZ + (0.5 + k - 0.5 * P::amrBoxHalfWidthZ) * P::dz_ini; + + CellID myCell = mpiGrid.get_existing_cell(xyz); + if (mpiGrid.refine_completely_at(xyz)) { + std::cout << "Rank " << myRank << " is refining cell " << myCell << std::endl; + } + } + } + } + + std::vector refinedCells = mpiGrid.stop_refining(true); + if(myRank == MASTER_RANK) std::cout << "Finished second level of refinement" << endl; + if(refinedCells.size() > 0) { + std::cout << "Refined cells produced by rank " << myRank << " are: "; + for (auto cellid : refinedCells) { + std::cout << cellid << " "; + } + std::cout << endl; + } + mpiGrid.balance_load(); + } + return true; } From 4f6de759d2478119faa8fec4fa96e79a262d470e Mon Sep 17 00:00:00 2001 From: Tuomas Koskela Date: Fri, 8 Mar 2019 15:22:21 +0200 Subject: [PATCH 231/602] Write MESH_DOMAIN_SIZE into restatrt files --- iowrite.cpp | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/iowrite.cpp b/iowrite.cpp index 0b262684b..b1e1f0a13 100644 --- a/iowrite.cpp +++ b/iowrite.cpp @@ -1127,6 +1127,10 @@ bool writeRestart(dccrg::Dccrg& mpiGrid, //Write zone global id numbers: if( writeZoneGlobalIdNumbers( mpiGrid, vlsvWriter, meshName, local_cells, ghost_cells ) == false ) return false; + + //Write domain sizes: + if( writeDomainSizes( vlsvWriter, meshName, local_cells.size(), ghost_cells.size() ) == false ) return false; + phiprof::stop("metadataIO"); phiprof::start("reduceddataIO"); //write out DROs we need for restarts From 83a8f18fe685fdb11db9098ffd478ac1c8c7342e Mon Sep 17 00:00:00 2001 From: Tuomas Koskela Date: Fri, 8 Mar 2019 15:29:02 +0200 Subject: [PATCH 232/602] Clean-up --- ioread.cpp | 56 +++--------------------------------------------------- 1 file changed, 3 insertions(+), 53 deletions(-) diff --git a/ioread.cpp b/ioread.cpp index 49e079432..02953780b 100644 --- a/ioread.cpp +++ b/ioread.cpp @@ -907,8 +907,6 @@ bool exec_readGrid(dccrg::Dccrg& mpiGrid, success = readNBlocks(file,meshName,nBlocks,MASTER_RANK,MPI_COMM_WORLD); } - exitOnError(success,"1 (RESTART) Cell migration failed",MPI_COMM_WORLD); - //make sure all cells are empty, we will anyway overwrite everything and // in that case moving cells is easier... { @@ -947,10 +945,8 @@ bool exec_readGrid(dccrg::Dccrg& mpiGrid, SpatialCell::set_mpi_transfer_type(Transfer::ALL_SPATIAL_DATA); - const bool useZoltan = false; - //Do initial load balance based on pins. Need to transfer at least sysboundaryflags - mpiGrid.balance_load(useZoltan); + mpiGrid.balance_load(false); //update list of local gridcells recalculateLocalCellsCache(); @@ -963,56 +959,10 @@ bool exec_readGrid(dccrg::Dccrg& mpiGrid, mpiGrid.unpin(gridCells[i]); } - exitOnError(success,"(RESTART) 2 Cell migration failed",MPI_COMM_WORLD); - - int refCount = 0; - int refinedBdryCount = 0; - int coarseBdryCount = 0; - int totalCount = 0; - for (auto cellid : gridCells) { - SpatialCell* cell = mpiGrid[cellid]; - if(mpiGrid.is_local(cellid)) { - refCount += mpiGrid.get_refinement_level(cellid); - if(cell->sysBoundaryFlag != sysboundarytype::NOT_SYSBOUNDARY) { - if(mpiGrid.get_refinement_level(cellid) > 0) { - refinedBdryCount++; - } else { - coarseBdryCount++; - } - } - totalCount++; - } - } - // Check for errors, has migration succeeded if (localCells != gridCells.size() ) { success=false; - cout << "Size check FAILED: rank "<< myRank << ", localCells = " << localCells << " gridCells.size() = " << gridCells.size(); - } else { - cout << "Size check SUCCESS: rank "<< myRank << ", localCells = " << localCells << " gridCells.size() = " << gridCells.size(); - } - cout << " numberOfRefinedBoundaryCells = " << refinedBdryCount; - cout << " numberOfCoarseBoundaryCells = " << coarseBdryCount; - cout << endl; - -// MPI_Barrier(MPI_COMM_WORLD); - -// for (int i = 0; i < processes; ++i) { -// MPI_Barrier(MPI_COMM_WORLD); -// if(i == myRank) { -// cout << "List of cellids for rank " << myRank << ": "; -// for (auto cellid : gridCells) { -// if(mpiGrid.is_local(cellid)) { -// cout << cellid << " "; -// } -// } -// cout << endl; -// cout << endl; -// } -// MPI_Barrier(MPI_COMM_WORLD); -// } - - exitOnError(success,"(RESTART) 3 Cell migration failed",MPI_COMM_WORLD); + } if (success == true) { for (uint64_t i=localCellStartOffset; i& mpiGrid, } } - exitOnError(success,"(RESTART) Cell 4 migration failed",MPI_COMM_WORLD); + exitOnError(success,"(RESTART) Cell migration failed",MPI_COMM_WORLD); // Set cell coordinates based on cfg (mpigrid) information for (size_t i=0; i Date: Fri, 8 Mar 2019 15:31:22 +0200 Subject: [PATCH 233/602] DCCRG function mapping.get_cell_length_in_indices was not doing what I expected. Replaced calls to it by a pow(2,mpiGrid.get_maximum_refinement_level() - mpiGrid.get_refinement_level(cellid)), which does the thing originally intended. --- fieldsolver/gridGlue.cpp | 10 +++++----- vlasovsolver/cpu_trans_map_amr.cpp | 3 ++- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/fieldsolver/gridGlue.cpp b/fieldsolver/gridGlue.cpp index 2bd6c2286..2ff2ba607 100644 --- a/fieldsolver/gridGlue.cpp +++ b/fieldsolver/gridGlue.cpp @@ -537,17 +537,17 @@ Map from dccrg cell id to fsgrid global cell ids when they aren't identical (ie. std::vector mapDccrgIdToFsGridGlobalID(dccrg::Dccrg& mpiGrid, CellID dccrgID) { - const auto cellLength = mpiGrid.mapping.get_cell_length_in_indices(dccrgID); - const auto gridLength = mpiGrid.length.get(); const auto maxRefLvl = mpiGrid.get_maximum_refinement_level(); + const auto refLvl = mpiGrid.get_refinement_level(dccrgID); + const auto cellLength = pow(2,maxRefLvl-refLvl); const auto topLeftIndices = mpiGrid.mapping.get_indices(dccrgID); std::array indices; std::vector> allIndices; std::array fsgridDims; - fsgridDims[0] = P::xcells_ini * (mpiGrid.get_maximum_refinement_level() + 1); - fsgridDims[1] = P::ycells_ini * (mpiGrid.get_maximum_refinement_level() + 1); - fsgridDims[2] = P::zcells_ini * (mpiGrid.get_maximum_refinement_level() + 1); + fsgridDims[0] = P::xcells_ini * pow(2,mpiGrid.get_maximum_refinement_level()); + fsgridDims[1] = P::ycells_ini * pow(2,mpiGrid.get_maximum_refinement_level()); + fsgridDims[2] = P::zcells_ini * pow(2,mpiGrid.get_maximum_refinement_level()); for (uint k = 0; k < cellLength; ++k) { for (uint j = 0; j < cellLength; ++j) { diff --git a/vlasovsolver/cpu_trans_map_amr.cpp b/vlasovsolver/cpu_trans_map_amr.cpp index 532ec9eb8..9589cde44 100644 --- a/vlasovsolver/cpu_trans_map_amr.cpp +++ b/vlasovsolver/cpu_trans_map_amr.cpp @@ -362,7 +362,8 @@ setOfPencils buildPencilsWithNeighbors( const dccrg::Dccrg localIndices; auto indices = grid.mapping.get_indices(id); - auto length = grid.mapping.get_cell_length_in_indices(grid.mapping.get_level_0_parent(id)); + //auto length = grid.mapping.get_cell_length_in_indices(grid.mapping.get_level_0_parent(id)); + int length = pow(2,grid.get_maximum_refinement_level() - grid.get_refinement_level(id)); for (auto index : indices) { localIndices.push_back(index % length); } From 79cb9b04b44b054c1aa4c100bdbdd41420155f5d Mon Sep 17 00:00:00 2001 From: tkoskela Date: Fri, 8 Mar 2019 15:32:48 +0200 Subject: [PATCH 234/602] Cleaned up unused lines --- vlasiator.cpp | 3 --- 1 file changed, 3 deletions(-) diff --git a/vlasiator.cpp b/vlasiator.cpp index 81207037b..71dfef411 100644 --- a/vlasiator.cpp +++ b/vlasiator.cpp @@ -401,9 +401,6 @@ int main(int argn,char* args[]) { std::array periodicity{mpiGrid.topology.is_periodic(0), mpiGrid.topology.is_periodic(1), mpiGrid.topology.is_periodic(2)}; - - const int fsGridSize = (fsGridDimensions[0] + 4) * (fsGridDimensions[1] + 4) * (fsGridDimensions[2] + 4); - // setting to 0, values greater than 2^21 cause overflows on cray-mpich FsGrid< std::array, 2> perBGrid(fsGridDimensions, comm, periodicity); FsGrid< std::array, 2> perBDt2Grid(fsGridDimensions, comm, periodicity); From f9c78292660d2c792e23dc37ae0f33ee11c3a077 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Fri, 8 Mar 2019 16:44:26 +0200 Subject: [PATCH 235/602] Trimmed outputs --- projects/Magnetosphere/Magnetosphere.cpp | 24 ++++++++---------------- 1 file changed, 8 insertions(+), 16 deletions(-) diff --git a/projects/Magnetosphere/Magnetosphere.cpp b/projects/Magnetosphere/Magnetosphere.cpp index 1cdb3e5be..a726151b9 100644 --- a/projects/Magnetosphere/Magnetosphere.cpp +++ b/projects/Magnetosphere/Magnetosphere.cpp @@ -509,9 +509,9 @@ namespace projects { CellID myCell = mpiGrid.get_existing_cell(xyz); // Check if the cell is tagged as do not compute if (mpiGrid[myCell]->sysBoundaryFlag != sysboundarytype::DO_NOT_COMPUTE) { - if (mpiGrid.refine_completely_at(xyz)) { - std::cout << "Rank " << myRank << " is refining cell " << myCell << std::endl; - } + if (!mpiGrid.refine_completely_at(xyz)) { + std::cerr << "ERROR: Failed to refine cell " << myCell << endl; + } } } } @@ -521,11 +521,7 @@ namespace projects { refinedCells = mpiGrid.stop_refining(true); if(myRank == MASTER_RANK) std::cout << "Finished first level of refinement" << endl; if(refinedCells.size() > 0) { - std::cout << "Refined cells produced by rank " << myRank << " are: "; - for (auto cellid : refinedCells) { - std::cout << cellid << " "; - } - std::cout << endl; + std::cout << "Rank " << myRank << " refined " << refinedCells.size() << " cells. " << std::endl; } if (P::amrMaxSpatialRefLevel > 1) { @@ -550,8 +546,8 @@ namespace projects { CellID myCell = mpiGrid.get_existing_cell(xyz); // Check if the cell is tagged as do not compute if (mpiGrid[myCell]->sysBoundaryFlag != sysboundarytype::DO_NOT_COMPUTE) { - if (mpiGrid.refine_completely_at(xyz)) { - std::cout << "Rank " << myRank << " is refining cell " << myCell << std::endl; + if (!mpiGrid.refine_completely_at(xyz)) { + std::cerr << "ERROR: Failed to refine cell " << myCell << endl; } } } @@ -562,13 +558,9 @@ namespace projects { refinedCells = mpiGrid.stop_refining(true); if(myRank == MASTER_RANK) std::cout << "Finished second level of refinement" << endl; if(refinedCells.size() > 0) { - std::cout << "Refined cells produced by rank " << myRank << " are: "; - for (auto cellid : refinedCells) { - std::cout << cellid << " "; - } - std::cout << endl; + std::cout << "Rank " << myRank << " refined " << refinedCells.size() << " cells. " << std::endl; } - + mpiGrid.balance_load(); return true; From bc5b26da482decf5a0e983397f45f73d210d512e Mon Sep 17 00:00:00 2001 From: Tuomas Koskela Date: Fri, 8 Mar 2019 16:45:51 +0200 Subject: [PATCH 236/602] Added cfg file for BCH-like lowres Magnetosphere amr test run --- .../Magnetosphere/Magnetosphere_BCH-like.cfg | 175 ++++++++++++++++++ 1 file changed, 175 insertions(+) create mode 100644 projects/Magnetosphere/Magnetosphere_BCH-like.cfg diff --git a/projects/Magnetosphere/Magnetosphere_BCH-like.cfg b/projects/Magnetosphere/Magnetosphere_BCH-like.cfg new file mode 100644 index 000000000..347b9aa5e --- /dev/null +++ b/projects/Magnetosphere/Magnetosphere_BCH-like.cfg @@ -0,0 +1,175 @@ +project = Magnetosphere +ParticlePopulations = proton + +#[restart] +#filename = restart.0000822.vlsv + +[io] +diagnostic_write_interval = 1 +write_initial_state = 0 +restart_walltime_interval = 21000 +number_of_restarts = 1000 + + +system_write_t_interval = 0.5 +system_write_file_name = bulk +system_write_distribution_stride = 0 +system_write_distribution_xline_stride = 0 +system_write_distribution_yline_stride = 0 +system_write_distribution_zline_stride = 0 + +[gridbuilder] +x_length = 50 +y_length = 40 +z_length = 40 +x_min = -6.0e8 +x_max = 3.0e8 +y_min = -3.6e8 +y_max = 3.6e8 +z_min = -3.6e8 +z_max = 3.6e8 + +timestep_max = 50 +t_max = 200.0 + +# We have protons and completely ionized heliums +[proton_properties] +mass = 1 +mass_units = PROTON +charge = 1 + +[proton_vspace] +vx_min = -4.08e6 +vx_max = +4.08e6 +vy_min = -4.08e6 +vy_max = +4.08e6 +vz_min = -4.08e6 +vz_max = +4.08e6 +vx_length = 34 +vy_length = 34 +vz_length = 34 + +[proton_sparse] +minValue = 1.0e-15 +dynamicAlgorithm = 1 +dynamicBulkValue1 = 1.0e6 +dynamicBulkValue2 = 1.0e7 +dynamicMinValue1 = 1.0e-15 +dynamicMinValue2 = 1.0e-13 + +[Magnetosphere] +constBgBX = 0.0 +constBgBY = 0.0 +constBgBZ = -5.0e-9 +noDipoleInSW = 1.0 +dipoleType = 2 +#dipoleMirrorLocationX = 600000000.0 +dipoleMirrorLocationX = 5.64e8 + +refine_L2radius = 9.5565e7 # 15 RE +refine_L2tailthick = 3.1855e7 # 5 RE +refine_L1radius = 1.59275e8 # 25 RE +refine_L1tailthick = 6.371e7 # 10 RE + + +[ionosphere] +centerX = 0.0 +centerY = 0.0 +centerZ = 0.0 +radius = 31.8e6 +taperRadius = 80.0e0 +precedence = 2 + +[proton_Magnetosphere] +T = 0.5e6 +rho = 1.0e6 +VX0 = -7.5e5 +VY0 = 0.0 +VZ0 = 0.0 + +nSpaceSamples = 1 +nVelocitySamples = 1 + +[proton_ionosphere] +rho = 1.0e6 +VX0 = 0.0 +VY0 = 0.0 +VZ0 = 0.0 + +[loadBalance] +rebalanceInterval = 10 +tolerance = 1.2 + +[variables] +output = Rhom +output = Rhoq +output = B +output = VolB +output = E +output = VolE +output = Pressure +output = RhoV +output = BoundaryType +output = MPIrank +output = derivs +output = BVOLderivs +output = BoundaryLayer +output = BackgroundB +output = PerturbedB +output = LBweight +output = MaxVdt +output = MaxRdt +output = MaxFieldsdt +output = Blocks +output = PTensor +output = fSaved +output = populations_Blocks +diagnostic = Blocks +diagnostic = populations_RhoLossAdjust +diagnostic = RhoLossVelBoundary +diagnostic = MaxDistributionFunction +diagnostic = MinDistributionFunction + +[boundaries] +periodic_x = no +periodic_y = no +periodic_z = no +boundary = Outflow +boundary = Maxwellian +boundary = Ionosphere + +[outflow] +precedence = 3 + +[proton_outflow] +face = x- +face = y- +face = y+ +face = z- +face = z+ + +[maxwellian] +dynamic = 0 +face = x+ +precedence = 4 + +[proton_maxwellian] +dynamic = 0 +file_x+ = sw1.dat + +[bailout] +max_memory = 58 + +[fieldsolver] +maxSubcycles = 50 +ohmHallTerm = 2 +minCFL = 0.4 +maxCFL = 0.5 +maxWaveVelocity = 7494811.45 #2.5% of speed of light... + +[vlasovsolver] +minCFL = 0.8 +maxCFL = 0.99 +maxSlAccelerationRotation = 22 +maxSlAccelerationSubcycles = 2 + From addd8b68ca79032e9d44a4cd7e7007f14e9744c2 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Fri, 8 Mar 2019 18:04:01 +0200 Subject: [PATCH 237/602] Added [AMR]max_spatial_level = 2 --- projects/Magnetosphere/Magnetosphere_BCH-like.cfg | 2 ++ 1 file changed, 2 insertions(+) diff --git a/projects/Magnetosphere/Magnetosphere_BCH-like.cfg b/projects/Magnetosphere/Magnetosphere_BCH-like.cfg index 347b9aa5e..5b37de055 100644 --- a/projects/Magnetosphere/Magnetosphere_BCH-like.cfg +++ b/projects/Magnetosphere/Magnetosphere_BCH-like.cfg @@ -173,3 +173,5 @@ maxCFL = 0.99 maxSlAccelerationRotation = 22 maxSlAccelerationSubcycles = 2 +[AMR] +max_spatial_level = 2 \ No newline at end of file From 2c93c2a9ea71075c2974782c177aad0ca35b6e59 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Fri, 8 Mar 2019 18:06:46 +0200 Subject: [PATCH 238/602] Changed dipole type to 3 --- projects/Magnetosphere/Magnetosphere_BCH-like.cfg | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/projects/Magnetosphere/Magnetosphere_BCH-like.cfg b/projects/Magnetosphere/Magnetosphere_BCH-like.cfg index 5b37de055..95d573f4c 100644 --- a/projects/Magnetosphere/Magnetosphere_BCH-like.cfg +++ b/projects/Magnetosphere/Magnetosphere_BCH-like.cfg @@ -62,7 +62,7 @@ constBgBX = 0.0 constBgBY = 0.0 constBgBZ = -5.0e-9 noDipoleInSW = 1.0 -dipoleType = 2 +dipoleType = 3 #dipoleMirrorLocationX = 600000000.0 dipoleMirrorLocationX = 5.64e8 From 60f0a5f676cfa2e4089bb7cd02c8413085aad2d2 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Fri, 8 Mar 2019 18:17:54 +0200 Subject: [PATCH 239/602] Added sw1 - file --- projects/Magnetosphere/Magnetosphere_BCH-like.cfg | 2 +- projects/Magnetosphere/sw1_BCH-like.dat | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 projects/Magnetosphere/sw1_BCH-like.dat diff --git a/projects/Magnetosphere/Magnetosphere_BCH-like.cfg b/projects/Magnetosphere/Magnetosphere_BCH-like.cfg index 95d573f4c..53242aee8 100644 --- a/projects/Magnetosphere/Magnetosphere_BCH-like.cfg +++ b/projects/Magnetosphere/Magnetosphere_BCH-like.cfg @@ -155,7 +155,7 @@ precedence = 4 [proton_maxwellian] dynamic = 0 -file_x+ = sw1.dat +file_x+ = sw1_BCH-like.dat [bailout] max_memory = 58 diff --git a/projects/Magnetosphere/sw1_BCH-like.dat b/projects/Magnetosphere/sw1_BCH-like.dat new file mode 100644 index 000000000..a8a70a464 --- /dev/null +++ b/projects/Magnetosphere/sw1_BCH-like.dat @@ -0,0 +1 @@ +0.0 1.0e6 0.5e6 -7.5e5 0.0 0.0 0.0 0.0 -5.0e-9 From 1b1dd31badd7d3842cc0c66fc413bca32d02ce46 Mon Sep 17 00:00:00 2001 From: Tuomas Koskela Date: Mon, 11 Mar 2019 10:12:52 +0200 Subject: [PATCH 240/602] Removed check for boundary flag, boundary flags have not yet been set when the refinement function gets called. --- projects/Magnetosphere/Magnetosphere.cpp | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/projects/Magnetosphere/Magnetosphere.cpp b/projects/Magnetosphere/Magnetosphere.cpp index a726151b9..05a6f0c68 100644 --- a/projects/Magnetosphere/Magnetosphere.cpp +++ b/projects/Magnetosphere/Magnetosphere.cpp @@ -507,12 +507,9 @@ namespace projects { (std::abs(xyz[2])sysBoundaryFlag != sysboundarytype::DO_NOT_COMPUTE) { - if (!mpiGrid.refine_completely_at(xyz)) { - std::cerr << "ERROR: Failed to refine cell " << myCell << endl; - } - } + if (!mpiGrid.refine_completely_at(xyz)) { + std::cerr << "ERROR: Failed to refine cell " << myCell << endl; + } } } } From a680985e63acd385de6701ef86df21170db65523 Mon Sep 17 00:00:00 2001 From: Tuomas Koskela Date: Mon, 11 Mar 2019 18:36:50 +0200 Subject: [PATCH 241/602] changed output.Blocks to output.populations_Blocks --- projects/Magnetosphere/Magnetosphere_BCH-like.cfg | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/projects/Magnetosphere/Magnetosphere_BCH-like.cfg b/projects/Magnetosphere/Magnetosphere_BCH-like.cfg index 53242aee8..109430c20 100644 --- a/projects/Magnetosphere/Magnetosphere_BCH-like.cfg +++ b/projects/Magnetosphere/Magnetosphere_BCH-like.cfg @@ -124,7 +124,7 @@ output = Blocks output = PTensor output = fSaved output = populations_Blocks -diagnostic = Blocks +diagnostic = populations_Blocks diagnostic = populations_RhoLossAdjust diagnostic = RhoLossVelBoundary diagnostic = MaxDistributionFunction From abad38d3d9cc443a4c797c441acfc4a70d4f92f5 Mon Sep 17 00:00:00 2001 From: Tuomas Koskela Date: Mon, 11 Mar 2019 20:05:37 +0200 Subject: [PATCH 242/602] Updated output variables --- .../Magnetosphere/Magnetosphere_BCH-like.cfg | 28 +++++++++---------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/projects/Magnetosphere/Magnetosphere_BCH-like.cfg b/projects/Magnetosphere/Magnetosphere_BCH-like.cfg index 109430c20..c0b3765cd 100644 --- a/projects/Magnetosphere/Magnetosphere_BCH-like.cfg +++ b/projects/Magnetosphere/Magnetosphere_BCH-like.cfg @@ -103,32 +103,32 @@ tolerance = 1.2 [variables] output = Rhom output = Rhoq +output = V +output = populations_Rho +output = populations_V +output = populations_moments_Backstream +output = populations_moments_NonBackstream +#output = populations_RhomLossAdjust +output = populations_accSubcycles output = B output = VolB output = E +output = HallE output = VolE -output = Pressure -output = RhoV +output = populations_PTensor output = BoundaryType -output = MPIrank -output = derivs -output = BVOLderivs output = BoundaryLayer -output = BackgroundB -output = PerturbedB +output = MPIrank +output = FsGridRank output = LBweight output = MaxVdt output = MaxRdt output = MaxFieldsdt -output = Blocks -output = PTensor -output = fSaved output = populations_Blocks +output = fSaved +output = populations_MinValue diagnostic = populations_Blocks -diagnostic = populations_RhoLossAdjust -diagnostic = RhoLossVelBoundary -diagnostic = MaxDistributionFunction -diagnostic = MinDistributionFunction +diagnostic = Rhom [boundaries] periodic_x = no From a41bd065c7b13b91c7d583971d1200eef21aafa0 Mon Sep 17 00:00:00 2001 From: Tuomas Koskela Date: Mon, 11 Mar 2019 20:15:45 +0200 Subject: [PATCH 243/602] Fixed default values of readparameters::add AMR.box_center_* variables --- parameters.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/parameters.cpp b/parameters.cpp index ffdb71967..8ed1207e0 100644 --- a/parameters.cpp +++ b/parameters.cpp @@ -233,9 +233,9 @@ bool Parameters::addParameters(){ Readparameters::add("AMR.box_half_width_x","Half width of the box that is refined (for testing)",(uint)1); Readparameters::add("AMR.box_half_width_y","Half width of the box that is refined (for testing)",(uint)1); Readparameters::add("AMR.box_half_width_z","Half width of the box that is refined (for testing)",(uint)1); - Readparameters::add("AMR.box_center_x","x coordinate of the center of the box that is refined (for testing)",(uint)1); - Readparameters::add("AMR.box_center_y","y coordinate of the center of the box that is refined (for testing)",(uint)1); - Readparameters::add("AMR.box_center_z","z coordinate of the center of the box that is refined (for testing)",(uint)1); + Readparameters::add("AMR.box_center_x","x coordinate of the center of the box that is refined (for testing)",0.0); + Readparameters::add("AMR.box_center_y","y coordinate of the center of the box that is refined (for testing)",0.0); + Readparameters::add("AMR.box_center_z","z coordinate of the center of the box that is refined (for testing)",0.0); return true; } From 10887d880ba5b1e604855fdba952c1cdd9684d80 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Fri, 15 Mar 2019 12:35:29 +0200 Subject: [PATCH 244/602] Fix for bug in determining initial paths for pencils that start in refined cells. --- vlasovsolver/cpu_trans_map_amr.cpp | 92 ++++++++++-------------------- vlasovsolver/cpu_trans_map_amr.hpp | 17 +++--- 2 files changed, 38 insertions(+), 71 deletions(-) diff --git a/vlasovsolver/cpu_trans_map_amr.cpp b/vlasovsolver/cpu_trans_map_amr.cpp index 9589cde44..4f26ce754 100644 --- a/vlasovsolver/cpu_trans_map_amr.cpp +++ b/vlasovsolver/cpu_trans_map_amr.cpp @@ -349,7 +349,7 @@ setOfPencils buildPencilsWithNeighbors( const dccrg::Dccrg path.size() ) { - vector localIndices; - auto indices = grid.mapping.get_indices(id); - //auto length = grid.mapping.get_cell_length_in_indices(grid.mapping.get_level_0_parent(id)); - int length = pow(2,grid.get_maximum_refinement_level() - grid.get_refinement_level(id)); - for (auto index : indices) { - localIndices.push_back(index % length); - } + CellID myId = startingId; - for ( uint i = path.size(); i < startingRefLvl; i++) { + for ( uint i = path.size(); i < startingRefLvl; ++i) { - vector localIndicesOnRefLvl; + CellID parentId = grid.get_parent(myId); - for ( auto lid : localIndices ) { - localIndicesOnRefLvl.push_back( lid / pow(2, startingRefLvl - (i + 1) )); - } + auto myCoords = grid.get_center(myId); + auto parentCoords = grid.get_center(parentId); + + int ix = (dimension + 1) % 3; + int iy = (dimension + 2) % 3; - int i1 = 0; - int i2 = 0; + int step = -1; - switch( dimension ) { - case 0: - i1 = localIndicesOnRefLvl.at(1); - i2 = localIndicesOnRefLvl.at(2); - break; - case 1: - i1 = localIndicesOnRefLvl.at(0); - i2 = localIndicesOnRefLvl.at(2); - break; - case 2: - i1 = localIndicesOnRefLvl.at(0); - i2 = localIndicesOnRefLvl.at(1); - break; + if (myCoords.at(ix) < parentCoords.at(ix) && myCoords.at(iy) < parentCoords.at(iy)) { + step = 0; + } else if (myCoords.at(ix) > parentCoords.at(ix) && myCoords.at(iy) < parentCoords.at(iy)) { + step = 1; + } else if (myCoords.at(ix) < parentCoords.at(ix) && myCoords.at(iy) > parentCoords.at(iy)) { + step = 2; + } else if (myCoords.at(ix) > parentCoords.at(ix) && myCoords.at(iy) > parentCoords.at(iy)) { + step = 3; } - if( i1 > 1 || i2 > 1) { - std::cout << __FILE__ << " " << __LINE__ << " Something went wrong, i1 = " << i1 << ", i2 = " << i2 << std::endl; + if(path.size() == 0) { + path.push_back(step); + } else { + auto it = path.end(); + path.insert(it - 1, step); } - path.push_back(i1 + 2 * i2); + myId = parentId; } } - - id = startingId; - - bool periodic = false; while (id != INVALID_CELLID) { @@ -510,34 +499,11 @@ setOfPencils buildPencilsWithNeighbors( const dccrg::Dccrg& mpi int myCount = std::count(pencils.ids.begin(), pencils.ids.end(), id); - if( myCount == 0 || (myCount != 1 && myCount % 4 != 0)) { + if( myCount == 0 ) { std::cerr << "ERROR: Cell ID " << id << " Appears in pencils " << myCount << " times!"<< std::endl; correct = false; diff --git a/vlasovsolver/cpu_trans_map_amr.hpp b/vlasovsolver/cpu_trans_map_amr.hpp index a85b0d9c1..b2d9281ce 100644 --- a/vlasovsolver/cpu_trans_map_amr.hpp +++ b/vlasovsolver/cpu_trans_map_amr.hpp @@ -96,24 +96,25 @@ struct setOfPencils { return idsOut; } - // Split one pencil into four pencils covering the same space. + // Split one pencil into up to four pencils covering the same space. // dx and dy are the dimensions of the original pencil. void split(const uint myPencilId, const Realv dx, const Realv dy) { - auto ids = getIds(myPencilId); + auto myIds = this->getIds(myPencilId); // Find paths that members of this pencil may have in other pencils (can happen) // so that we don't add duplicates. std::vector existingSteps; - for (uint theirPencilId = 0; theirPencilId < N; ++theirPencilId) { + + for (uint theirPencilId = 0; theirPencilId < this->N; ++theirPencilId) { if(theirPencilId == myPencilId) continue; - auto theirIds = getIds(theirPencilId); + auto theirIds = this->getIds(theirPencilId); for (auto theirId : theirIds) { - for (auto myId : ids) { + for (auto myId : myIds) { if (myId == theirId) { - auto theirPath = path.at(theirPencilId); - auto myPath = path.at(myPencilId); - auto theirStep = theirPath.at(myPath.size()); + std::vector theirPath = this->path.at(theirPencilId); + std::vector myPath = this->path.at(myPencilId); + uint theirStep = theirPath.at(myPath.size()); existingSteps.push_back(theirStep); } } From 5e5cd84f4a8339bb26e112435228049b58fc671a Mon Sep 17 00:00:00 2001 From: tkoskela Date: Fri, 15 Mar 2019 12:43:18 +0200 Subject: [PATCH 245/602] Fix the order in which path elements are inserted. --- vlasovsolver/cpu_trans_map_amr.cpp | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/vlasovsolver/cpu_trans_map_amr.cpp b/vlasovsolver/cpu_trans_map_amr.cpp index 4f26ce754..dfb2638a5 100644 --- a/vlasovsolver/cpu_trans_map_amr.cpp +++ b/vlasovsolver/cpu_trans_map_amr.cpp @@ -358,7 +358,10 @@ setOfPencils buildPencilsWithNeighbors( const dccrg::Dccrg path.size() ) { + + int startingPathSize = path.size(); + auto it = path.end(); + if( startingRefLvl > startingPathSize ) { CellID myId = startingId; @@ -384,12 +387,7 @@ setOfPencils buildPencilsWithNeighbors( const dccrg::Dccrg Date: Fri, 15 Mar 2019 12:44:51 +0200 Subject: [PATCH 246/602] Updated MPI_VERSION and GCC_BRAND_VERSION --- MAKE/Makefile.sisu_gcc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/MAKE/Makefile.sisu_gcc b/MAKE/Makefile.sisu_gcc index aa0e5f20c..79934fda8 100644 --- a/MAKE/Makefile.sisu_gcc +++ b/MAKE/Makefile.sisu_gcc @@ -45,7 +45,7 @@ FLAGS = #GNU flags: CC_BRAND = gcc -CC_BRAND_VERSION = 5.1.0 +CC_BRAND_VERSION = 6.2.0 CXXFLAGS += -g -O2 -static -funroll-loops -std=c++11 -W -Wall -Wno-unused -fabi-version=0 -mavx2 testpackage: CXXFLAGS = -g -fopenmp -funroll-loops -std=c++0x -fabi-version=0 -mavx @@ -58,7 +58,7 @@ LIB_MPI = -lgomp #======== Libraries =========== -MPT_VERSION = 7.2.6 +MPT_VERSION = 7.5.1 JEMALLOC_VERSION = 4.0.4 LIBRARY_PREFIX = /proj/vlasov/libraries From 58ddd07b98104ce480ee21c548827605f45af531 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Fri, 15 Mar 2019 14:22:15 +0200 Subject: [PATCH 247/602] Updated AMR tests --- projects/Flowthrough/Flowthrough.cfg | 109 +++++++++++++++------------ projects/Flowthrough/Flowthrough.cpp | 6 +- projects/testAmr/testAmr.cfg | 22 ++++-- projects/testAmr/testAmr.cpp | 71 ++++++++++------- 4 files changed, 124 insertions(+), 84 deletions(-) diff --git a/projects/Flowthrough/Flowthrough.cfg b/projects/Flowthrough/Flowthrough.cfg index 1b5a4f82a..0cb2478f4 100644 --- a/projects/Flowthrough/Flowthrough.cfg +++ b/projects/Flowthrough/Flowthrough.cfg @@ -1,92 +1,105 @@ +ParticlePopulations = proton + project = Flowthrough -system_write_t_interval = 15 -diagnostic_write_interval = 1 -restart_write_t_interval = 500 -propagate_field = 0 +propagate_field = 1 propagate_vlasov_acceleration = 1 propagate_vlasov_translation = 1 -dynamic_timestep = 0 +dynamic_timestep = 1 + +[proton_properties] +mass = 1 +mass_units = PROTON +charge = 1 + +[AMR] +max_spatial_level = 2 +box_half_width_x = 2 +box_half_width_z = 2 +box_half_width_y = 2 [gridbuilder] -x_length = 10 -y_length = 10 -z_length = 10 -x_min = -1.3e8 -x_max = 1.3e8 -y_min = -1.3e8 -y_max = 1.3e8 -z_min = -1.3e8 -z_max = 1.3e8 -vx_min = -600000.0 -vx_max = +600000.0 -vy_min = -600000.0 -vy_max = +600000.0 -vz_min = -600000.0 -vz_max = +600000.0 +x_length = 16 +y_length = 8 +z_length = 8 +x_min = -8e7 +x_max = 8e7 +y_min = -4e7 +y_max = 4e7 +z_min = -4e7 +z_max = 4e7 +#t_max = 160 +dt = 2.0 +timestep_max = 1 + +[proton_vspace] +vx_min = -2e6 +vx_max = +2e6 +vy_min = -2e6 +vy_max = +2e6 +vz_min = -2e6 +vz_max = +2e6 vx_length = 15 vy_length = 15 vz_length = 15 -t_max = 550 -dt = 2.0 [io] write_initial_state = 1 system_write_t_interval = 1.0 system_write_file_name = bulk -system_write_distribution_stride = 1 -system_write_distribution_xline_stride = 0 -system_write_distribution_yline_stride = 0 -system_write_distribution_zline_stride = 0 - -system_write_t_interval = 50.0 -system_write_file_name = distrib -system_write_distribution_stride = 1 +system_write_distribution_stride = 0 system_write_distribution_xline_stride = 0 system_write_distribution_yline_stride = 0 system_write_distribution_zline_stride = 0 [variables] -output = Rho +output = populations_Rho output = E output = B -output = Pressure -output = RhoV output = BoundaryType output = MPIrank -output = Blocks -diagnostic = Blocks +output = populations_Blocks +diagnostic = populations_Blocks [boundaries] periodic_x = no -periodic_y = no -periodic_z = no +periodic_y = yes +periodic_z = yes boundary = Outflow boundary = Maxwellian [outflow] -face = x+ -face = y- -face = y+ -face = z- -face = z+ precedence = 3 +[proton_outflow] +face = x+ +#face = y- +#face = y+ +#face = z- +#face = z+ + [maxwellian] -dynamic = 0 +precedence = 4 face = x- + +[proton_maxwellian] +dynamic = 0 file_x- = sw1.dat -precedence = 4 -[sparse] +[proton_sparse] minValue = 1.0e-15 [Flowthrough] -emptyBox = 1 -T = 100000.0 -rho = 1000000.0 Bx = 1.0e-9 By = 1.0e-9 Bz = 1.0e-9 + +[proton_Flowthrough] +T = 1.0e5 +rho = 1.0e6 + nSpaceSamples = 2 nVelocitySamples = 2 + +[loadBalance] +algorithm = RANDOM diff --git a/projects/Flowthrough/Flowthrough.cpp b/projects/Flowthrough/Flowthrough.cpp index a01e8769e..909418c50 100644 --- a/projects/Flowthrough/Flowthrough.cpp +++ b/projects/Flowthrough/Flowthrough.cpp @@ -267,9 +267,9 @@ namespace projects { if(mpiGrid.get_maximum_refinement_level() > 1) { - for (int i = 0; i < 0.5 * P::amrBoxHalfWidthX; ++i) { - for (int j = 0; j < 0.5 * P::amrBoxHalfWidthY; ++j) { - for (int k = 0; k < 0.5 * P::amrBoxHalfWidthZ; ++k) { + for (int i = 0; i < P::amrBoxHalfWidthX; ++i) { + for (int j = 0; j < P::amrBoxHalfWidthY; ++j) { + for (int k = 0; k < P::amrBoxHalfWidthZ; ++k) { std::array xyz; xyz[0] = P::amrBoxCenterX + (0.5 + i - 0.5 * P::amrBoxHalfWidthX) * P::dx_ini; diff --git a/projects/testAmr/testAmr.cfg b/projects/testAmr/testAmr.cfg index aef397546..6b53f7a61 100644 --- a/projects/testAmr/testAmr.cfg +++ b/projects/testAmr/testAmr.cfg @@ -21,18 +21,26 @@ system_write_distribution_xline_stride = 0 system_write_distribution_yline_stride = 0 system_write_distribution_zline_stride = 0 +[AMR] +max_spatial_level = 2 +box_half_width_x = 1 +box_half_width_y = 1 +box_half_width_z = 1 +box_center_x = 1.0e6 +box_center_y = 1.0e6 +box_center_z = 1.0e6 [gridbuilder] -x_length = 7 -y_length = 7 -z_length = 7 -x_min = 0.0 +x_length = 8 +y_length = 8 +z_length = 8 +x_min = -1.0e6 x_max = 1.0e6 -y_min = 0.0 +y_min = -1.0e6 y_max = 1.0e6 -z_min = 0.0 +z_min = -1.0e6 z_max = 1.0e6 -timestep_max = 5 +timestep_max = 1 [proton_vspace] vx_min = -2.0e6 diff --git a/projects/testAmr/testAmr.cpp b/projects/testAmr/testAmr.cpp index dfffbec3d..07cd41cea 100644 --- a/projects/testAmr/testAmr.cpp +++ b/projects/testAmr/testAmr.cpp @@ -255,31 +255,30 @@ namespace projects { } bool testAmr::refineSpatialCells( dccrg::Dccrg& mpiGrid ) const { - + int myRank; MPI_Comm_rank(MPI_COMM_WORLD,&myRank); - mpiGrid.set_maximum_refinement_level(std::min(this->maxSpatialRefinementLevel, mpiGrid.mapping.get_maximum_refinement_level())); - - // cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; if(myRank == MASTER_RANK) std::cout << "Maximum refinement level is " << mpiGrid.mapping.get_maximum_refinement_level() << std::endl; - + std::vector refineSuccess; - for (double x = P::amrBoxCenterX - P::amrBoxHalfWidthX * P::dx_ini; x <= P::amrBoxCenterX + P::amrBoxHalfWidthX * P::dx_ini; x += P::dx_ini) { - for (double y = P::amrBoxCenterY - P::amrBoxHalfWidthY * P::dy_ini; y <= P::amrBoxCenterY + P::amrBoxHalfWidthY * P::dy_ini; y += P::dy_ini) { + for (int i = 0; i < 2 * P::amrBoxHalfWidthX; ++i) { + for (int j = 0; j < 2 * P::amrBoxHalfWidthY; ++j) { + for (int k = 0; k < 2 * P::amrBoxHalfWidthZ; ++k) { - std::array xyz; - xyz[0] = x; - xyz[1] = y; - //std::cout << "Trying to refine at " << xyz[0] << ", " << xyz[1] << ", " << xyz[2] << std::endl; - CellID myCell = mpiGrid.get_existing_cell(xyz); - if (mpiGrid.refine_completely_at(xyz)) { - std::cout << "Rank " << myRank << " is refining cell " << myCell << std::endl; - } + std::array xyz; + xyz[0] = P::amrBoxCenterX + (0.5 + i - P::amrBoxHalfWidthX) * P::dx_ini; + xyz[1] = P::amrBoxCenterY + (0.5 + j - P::amrBoxHalfWidthY) * P::dy_ini; + xyz[2] = P::amrBoxCenterZ + (0.5 + k - P::amrBoxHalfWidthZ) * P::dz_ini; + + CellID myCell = mpiGrid.get_existing_cell(xyz); + if (mpiGrid.refine_completely_at(xyz)) { + std::cout << "Rank " << myRank << " is refining cell " << myCell << std::endl; + } + } } } - std::vector refinedCells = mpiGrid.stop_refining(true); if(myRank == MASTER_RANK) std::cout << "Finished first level of refinement" << endl; if(refinedCells.size() > 0) { @@ -292,17 +291,37 @@ namespace projects { mpiGrid.balance_load(); -// auto cells = mpiGrid.get_cells(); -// if(cells.empty()) { -// std::cout << "Rank " << myRank << " has no cells!" << std::endl; -// } else { -// std::cout << "Cells on rank " << myRank << ": "; -// for (auto c : cells) { -// std::cout << c << " "; -// } -// std::cout << std::endl; -// } + if(mpiGrid.get_maximum_refinement_level() > 1) { + for (int i = 0; i < 2 * P::amrBoxHalfWidthX; ++i) { + for (int j = 0; j < 2 * P::amrBoxHalfWidthY; ++j) { + for (int k = 0; k < 2 * P::amrBoxHalfWidthZ; ++k) { + + std::array xyz; + xyz[0] = P::amrBoxCenterX + 0.5 * (0.5 + i - P::amrBoxHalfWidthX) * P::dx_ini; + xyz[1] = P::amrBoxCenterY + 0.5 * (0.5 + j - P::amrBoxHalfWidthY) * P::dy_ini; + xyz[2] = P::amrBoxCenterZ + 0.5 * (0.5 + k - P::amrBoxHalfWidthZ) * P::dz_ini; + + CellID myCell = mpiGrid.get_existing_cell(xyz); + if (mpiGrid.refine_completely_at(xyz)) { + std::cout << "Rank " << myRank << " is refining cell " << myCell << std::endl; + } + } + } + } + + std::vector refinedCells = mpiGrid.stop_refining(true); + if(myRank == MASTER_RANK) std::cout << "Finished second level of refinement" << endl; + if(refinedCells.size() > 0) { + std::cout << "Refined cells produced by rank " << myRank << " are: "; + for (auto cellid : refinedCells) { + std::cout << cellid << " "; + } + std::cout << endl; + } + mpiGrid.balance_load(); + } + return true; } From 806c4c4b5245b0bddd739a58e15e01ca21a535fa Mon Sep 17 00:00:00 2001 From: Tuomas Koskela Date: Fri, 15 Mar 2019 14:41:33 +0200 Subject: [PATCH 248/602] Added a spacing of 2 * stencil width cells around L1 refinement region and 2 + 2 * stencil width cells around L2 refinement region. Removed error messages when refine_completely() returns false, that can happen for a variety of legitimate reasons. --- projects/Magnetosphere/Magnetosphere.cpp | 60 +++++++++++------------- 1 file changed, 28 insertions(+), 32 deletions(-) diff --git a/projects/Magnetosphere/Magnetosphere.cpp b/projects/Magnetosphere/Magnetosphere.cpp index 05a6f0c68..ada1b2386 100644 --- a/projects/Magnetosphere/Magnetosphere.cpp +++ b/projects/Magnetosphere/Magnetosphere.cpp @@ -485,15 +485,16 @@ namespace projects { // cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; if(myRank == MASTER_RANK) std::cout << "Maximum refinement level is " << mpiGrid.mapping.get_maximum_refinement_level() << std::endl; + const int bw = 2 * VLASOV_STENCIL_WIDTH; + const int bw2 = bw + VLASOV_STENCIL_WIDTH; + // Calculate regions for refinement if (P::amrMaxSpatialRefLevel > 0) { + // L1 refinement. Does not touch a 2-cell thick (at L0) boundary layer. - for (uint i = 2; i < P::xcells_ini-2; ++i) { - for (uint j = 2; j < P::ycells_ini-2; ++j) { - for (uint k = 2; k < P::zcells_ini-2; ++k) { - // for (int i = 0; i < 2 * P::amrBoxHalfWidthX; ++i) { - // for (int j = 0; j < 2 * P::amrBoxHalfWidthY; ++j) { - // for (int k = 0; k < 2 * P::amrBoxHalfWidthZ; ++k) { + for (uint i = bw; i < P::xcells_ini-bw; ++i) { + for (uint j = bw; j < P::ycells_ini-bw; ++j) { + for (uint k = bw; k < P::zcells_ini-bw; ++k) { std::array xyz; xyz[0] = P::xmin + (i+0.5)*P::dx_ini; @@ -507,9 +508,7 @@ namespace projects { (std::abs(xyz[2]) 1) { + // L2 refinement. Does not touch a 5-cell thick (at L1) boundary layer. // This means a boundary width of 2 L0 cells and one L1 cell in between // as a buffer - for (uint i = 5; i < 2*P::xcells_ini-5; ++i) { - for (uint j = 5; j < 2*P::ycells_ini-5; ++j) { - for (uint k = 5; k < 2*P::zcells_ini-5; ++k) { + for (uint i = 2*bw2; i < 2*(P::xcells_ini-bw2); ++i) { + for (uint j = 2*bw2; j < 2*(P::ycells_ini-bw2); ++j) { + for (uint k = 2*bw2; k < 2*(P::zcells_ini-bw2); ++k) { - std::array xyz; - xyz[0] = P::xmin + (i+0.5)*0.5*P::dx_ini; - xyz[1] = P::ymin + (j+0.5)*0.5*P::dy_ini; - xyz[2] = P::zmin + (k+0.5)*0.5*P::dz_ini; - - Real radius2 = (xyz[0]*xyz[0]+xyz[1]*xyz[1]+xyz[2]*xyz[2]); - // Check if cell is within L1 sphere, or within L1 tail slice - if ((radius2 < refine_L2radius*refine_L2radius) || - ((xyz[0] < 0) && (std::abs(xyz[1]) < refine_L2radius) && - (std::abs(xyz[2]) xyz; + xyz[0] = P::xmin + (i+0.5)*0.5*P::dx_ini; + xyz[1] = P::ymin + (j+0.5)*0.5*P::dy_ini; + xyz[2] = P::zmin + (k+0.5)*0.5*P::dz_ini; + + Real radius2 = (xyz[0]*xyz[0]+xyz[1]*xyz[1]+xyz[2]*xyz[2]); + // Check if cell is within L1 sphere, or within L1 tail slice + if ((radius2 < refine_L2radius*refine_L2radius) || + ((xyz[0] < 0) && (std::abs(xyz[1]) < refine_L2radius) && + (std::abs(xyz[2])sysBoundaryFlag != sysboundarytype::DO_NOT_COMPUTE) { - if (!mpiGrid.refine_completely_at(xyz)) { - std::cerr << "ERROR: Failed to refine cell " << myCell << endl; - } - } - } + mpiGrid.refine_completely(myCell); + } + } } - } - } + } } refinedCells = mpiGrid.stop_refining(true); if(myRank == MASTER_RANK) std::cout << "Finished second level of refinement" << endl; From d74e5bd1bd73b253be2e6356d6e0f95d4fd0eef6 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Fri, 15 Mar 2019 16:00:14 +0200 Subject: [PATCH 249/602] Changed the name of the amr version of the update_remote_mapping_contribution to enable compiling both amr and non-amr version simultaneously. Need to resolve how spatial_cell neighbor fields are defined when running non-amr. Likely going to have to use ifdefs. --- vlasovsolver/cpu_trans_map_amr.cpp | 6 +++--- vlasovsolver/cpu_trans_map_amr.hpp | 10 +++++----- vlasovsolver/vlasovmover.cpp | 12 ++++++------ 3 files changed, 14 insertions(+), 14 deletions(-) diff --git a/vlasovsolver/cpu_trans_map_amr.cpp b/vlasovsolver/cpu_trans_map_amr.cpp index dfb2638a5..2fb17de5e 100644 --- a/vlasovsolver/cpu_trans_map_amr.cpp +++ b/vlasovsolver/cpu_trans_map_amr.cpp @@ -1302,7 +1302,7 @@ int get_sibling_index(dccrg::Dccrg& mpiGr \par dimension: 0,1,2 for x,y,z \par direction: 1 for + dir, -1 for - dir */ -void update_remote_mapping_contribution( +void update_remote_mapping_contribution_amr( dccrg::Dccrg& mpiGrid, const uint dimension, int direction, @@ -1353,7 +1353,7 @@ void update_remote_mapping_contribution( } // MPI_Barrier(MPI_COMM_WORLD); - // cout << "begin update_remote_mapping_contribution, dimension = " << dimension << ", direction = " << direction << endl; + // cout << "begin update_remote_mapping_contribution_amr, dimension = " << dimension << ", direction = " << direction << endl; // MPI_Barrier(MPI_COMM_WORLD); // Initialize remote cells @@ -1664,7 +1664,7 @@ void update_remote_mapping_contribution( } // MPI_Barrier(MPI_COMM_WORLD); - // cout << "end update_remote_mapping_contribution, dimension = " << dimension << ", direction = " << direction << endl; + // cout << "end update_remote_mapping_contribution_amr, dimension = " << dimension << ", direction = " << direction << endl; // MPI_Barrier(MPI_COMM_WORLD); } diff --git a/vlasovsolver/cpu_trans_map_amr.hpp b/vlasovsolver/cpu_trans_map_amr.hpp index b2d9281ce..face83b3f 100644 --- a/vlasovsolver/cpu_trans_map_amr.hpp +++ b/vlasovsolver/cpu_trans_map_amr.hpp @@ -196,11 +196,11 @@ bool trans_map_1d_amr(const dccrg::Dccrg& mpiGrid, - const uint dimension, - int direction, - const uint popID); +void update_remote_mapping_contribution_amr(dccrg::Dccrg& mpiGrid, + const uint dimension, + int direction, + const uint popID); #endif diff --git a/vlasovsolver/vlasovmover.cpp b/vlasovsolver/vlasovmover.cpp index 76ed17dbd..e80a8d2f8 100644 --- a/vlasovsolver/vlasovmover.cpp +++ b/vlasovsolver/vlasovmover.cpp @@ -94,8 +94,8 @@ void calculateSpatialTranslation( trans_timer=phiprof::initializeTimer("update_remote-z","MPI"); phiprof::start("update_remote-z"); - update_remote_mapping_contribution(mpiGrid, 2,+1,popID); - update_remote_mapping_contribution(mpiGrid, 2,-1,popID); + update_remote_mapping_contribution_amr(mpiGrid, 2,+1,popID); + update_remote_mapping_contribution_amr(mpiGrid, 2,-1,popID); phiprof::stop("update_remote-z"); } @@ -117,8 +117,8 @@ void calculateSpatialTranslation( trans_timer=phiprof::initializeTimer("update_remote-x","MPI"); phiprof::start("update_remote-x"); - update_remote_mapping_contribution(mpiGrid, 0,+1,popID); - update_remote_mapping_contribution(mpiGrid, 0,-1,popID); + update_remote_mapping_contribution_amr(mpiGrid, 0,+1,popID); + update_remote_mapping_contribution_amr(mpiGrid, 0,-1,popID); phiprof::stop("update_remote-x"); } @@ -140,8 +140,8 @@ void calculateSpatialTranslation( trans_timer=phiprof::initializeTimer("update_remote-y","MPI"); phiprof::start("update_remote-y"); - update_remote_mapping_contribution(mpiGrid, 1,+1,popID); - update_remote_mapping_contribution(mpiGrid, 1,-1,popID); + update_remote_mapping_contribution_amr(mpiGrid, 1,+1,popID); + update_remote_mapping_contribution_amr(mpiGrid, 1,-1,popID); phiprof::stop("update_remote-y"); } From 506ab4692a9292496d09b41cb44542cf7424f2f9 Mon Sep 17 00:00:00 2001 From: Tuomas Koskela Date: Fri, 15 Mar 2019 19:02:05 +0200 Subject: [PATCH 250/602] Added load balnace between refinement levels --- projects/Magnetosphere/Magnetosphere.cpp | 26 +++++++++++++----------- 1 file changed, 14 insertions(+), 12 deletions(-) diff --git a/projects/Magnetosphere/Magnetosphere.cpp b/projects/Magnetosphere/Magnetosphere.cpp index ada1b2386..abf6e0f63 100644 --- a/projects/Magnetosphere/Magnetosphere.cpp +++ b/projects/Magnetosphere/Magnetosphere.cpp @@ -513,11 +513,13 @@ namespace projects { } } } - } - refinedCells = mpiGrid.stop_refining(true); - if(myRank == MASTER_RANK) std::cout << "Finished first level of refinement" << endl; - if(refinedCells.size() > 0) { - std::cout << "Rank " << myRank << " refined " << refinedCells.size() << " cells. " << std::endl; + refinedCells = mpiGrid.stop_refining(true); + if(myRank == MASTER_RANK) std::cout << "Finished first level of refinement" << endl; + if(refinedCells.size() > 0) { + std::cout << "Rank " << myRank << " refined " << refinedCells.size() << " cells. " << std::endl; + + mpiGrid.balance_load(); + } } if (P::amrMaxSpatialRefLevel > 1) { @@ -547,14 +549,14 @@ namespace projects { } } } - } - refinedCells = mpiGrid.stop_refining(true); - if(myRank == MASTER_RANK) std::cout << "Finished second level of refinement" << endl; - if(refinedCells.size() > 0) { - std::cout << "Rank " << myRank << " refined " << refinedCells.size() << " cells. " << std::endl; - } + refinedCells = mpiGrid.stop_refining(true); + if(myRank == MASTER_RANK) std::cout << "Finished second level of refinement" << endl; + if(refinedCells.size() > 0) { + std::cout << "Rank " << myRank << " refined " << refinedCells.size() << " cells. " << std::endl; + } - mpiGrid.balance_load(); + mpiGrid.balance_load(); + } return true; } From cc49524e07f457e738b1ead338d8bed9eb1ccda1 Mon Sep 17 00:00:00 2001 From: Tuomas Koskela Date: Sat, 16 Mar 2019 15:25:58 +0200 Subject: [PATCH 251/602] Bug fix: Moved mpiGrid.balance_load() out of if clause --- projects/Magnetosphere/Magnetosphere.cpp | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/projects/Magnetosphere/Magnetosphere.cpp b/projects/Magnetosphere/Magnetosphere.cpp index abf6e0f63..ec7ba90e5 100644 --- a/projects/Magnetosphere/Magnetosphere.cpp +++ b/projects/Magnetosphere/Magnetosphere.cpp @@ -517,9 +517,8 @@ namespace projects { if(myRank == MASTER_RANK) std::cout << "Finished first level of refinement" << endl; if(refinedCells.size() > 0) { std::cout << "Rank " << myRank << " refined " << refinedCells.size() << " cells. " << std::endl; - - mpiGrid.balance_load(); } + mpiGrid.balance_load(); } if (P::amrMaxSpatialRefLevel > 1) { From 48de34dcf5c1dde8f56746dc91c38ea19b6b5223 Mon Sep 17 00:00:00 2001 From: Tuomas Koskela Date: Mon, 18 Mar 2019 12:07:35 +0200 Subject: [PATCH 252/602] 1) Fixed order of dx and dy in dimension 1 of check_ghost_cells() 2) added safeguard for invalid path sizes in split() --- vlasovsolver/cpu_trans_map_amr.cpp | 4 ++-- vlasovsolver/cpu_trans_map_amr.hpp | 6 ++++-- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/vlasovsolver/cpu_trans_map_amr.cpp b/vlasovsolver/cpu_trans_map_amr.cpp index dfb2638a5..bee2a7c83 100644 --- a/vlasovsolver/cpu_trans_map_amr.cpp +++ b/vlasovsolver/cpu_trans_map_amr.cpp @@ -841,8 +841,8 @@ void check_ghost_cells(const dccrg::Dccrg dy = mpiGrid[ids[0]]->SpatialCell::parameters[CellParams::DZ]; break; case 1: - dx = mpiGrid[ids[0]]->SpatialCell::parameters[CellParams::DX]; - dy = mpiGrid[ids[0]]->SpatialCell::parameters[CellParams::DZ]; + dx = mpiGrid[ids[0]]->SpatialCell::parameters[CellParams::DZ]; + dy = mpiGrid[ids[0]]->SpatialCell::parameters[CellParams::DX]; break; case 2: dx = mpiGrid[ids[0]]->SpatialCell::parameters[CellParams::DX]; diff --git a/vlasovsolver/cpu_trans_map_amr.hpp b/vlasovsolver/cpu_trans_map_amr.hpp index b2d9281ce..29d49ef95 100644 --- a/vlasovsolver/cpu_trans_map_amr.hpp +++ b/vlasovsolver/cpu_trans_map_amr.hpp @@ -114,8 +114,10 @@ struct setOfPencils { if (myId == theirId) { std::vector theirPath = this->path.at(theirPencilId); std::vector myPath = this->path.at(myPencilId); - uint theirStep = theirPath.at(myPath.size()); - existingSteps.push_back(theirStep); + if(theirPath.size() > myPath.size()) { + uint theirStep = theirPath.at(myPath.size()); + existingSteps.push_back(theirStep); + } } } } From d18fa925738ac005f9e6b8f2815fe3a46d0c2a7b Mon Sep 17 00:00:00 2001 From: tkoskela Date: Mon, 18 Mar 2019 14:22:52 +0200 Subject: [PATCH 253/602] Modified testAmr.cfg to trigger a bug. --- projects/testAmr/testAmr.cfg | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/projects/testAmr/testAmr.cfg b/projects/testAmr/testAmr.cfg index 6b53f7a61..c17bcfabd 100644 --- a/projects/testAmr/testAmr.cfg +++ b/projects/testAmr/testAmr.cfg @@ -100,5 +100,5 @@ rho = 1.0e6 rhoPertAbsAmp = 0.0 [loadBalance] -algorithm = RCB -#algorithm = random \ No newline at end of file +#algorithm = RCB +algorithm = RANDOM \ No newline at end of file From 18350d28d1d98c3979e6249ba269f34ed57d2457 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Mon, 18 Mar 2019 15:29:01 +0200 Subject: [PATCH 254/602] Bug fix: Changed ids to myIds in the call to pencils.addPencil to create correct size pencils. --- vlasovsolver/cpu_trans_map_amr.hpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vlasovsolver/cpu_trans_map_amr.hpp b/vlasovsolver/cpu_trans_map_amr.hpp index 81408ce6a..7bab64d88 100644 --- a/vlasovsolver/cpu_trans_map_amr.hpp +++ b/vlasovsolver/cpu_trans_map_amr.hpp @@ -157,7 +157,7 @@ struct setOfPencils { } else { auto myPath = copy_of_path; myPath.push_back(step); - addPencil(ids, myX, myY, periodic.at(myPencilId), myPath); + addPencil(myIds, myX, myY, periodic.at(myPencilId), myPath); } } } From 8a60e7f87c09dc4375632a92f47b0b52756b38bb Mon Sep 17 00:00:00 2001 From: tkoskela Date: Tue, 19 Mar 2019 11:57:09 +0200 Subject: [PATCH 255/602] Moved initSysBoundaries to be called before refineSpatialCells. Removed MPI_Barriers from around refineSpatialCells, added phiprof region around it. --- grid.cpp | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/grid.cpp b/grid.cpp index 159b1998e..324b521d1 100644 --- a/grid.cpp +++ b/grid.cpp @@ -123,11 +123,18 @@ void initializeGrid( .set_geometry(geom_params); - MPI_Barrier(comm); + phiprof::start("Initialize system boundary conditions"); + if(sysBoundaries.initSysBoundaries(project, P::t_min) == false) { + if (myRank == MASTER_RANK) cerr << "Error in initialising the system boundaries." << endl; + exit(1); + } + phiprof::stop("Initialize system boundary conditions"); + + phiprof::start("Refine spatial cells"); if(P::amrMaxSpatialRefLevel > 0 && project.refineSpatialCells(mpiGrid)) { recalculateLocalCellsCache(); } - MPI_Barrier(comm); + phiprof::stop("Refine spatial cells"); // Init velocity mesh on all cells initVelocityGridGeometry(mpiGrid); @@ -147,13 +154,6 @@ void initializeGrid( initSpatialCellCoordinates(mpiGrid); phiprof::stop("Set spatial cell coordinates"); - phiprof::start("Initialize system boundary conditions"); - if(sysBoundaries.initSysBoundaries(project, P::t_min) == false) { - if (myRank == MASTER_RANK) cerr << "Error in initialising the system boundaries." << endl; - exit(1); - } - phiprof::stop("Initialize system boundary conditions"); - // Initialise system boundary conditions (they need the initialised positions!!) phiprof::start("Classify cells (sys boundary conditions)"); if(sysBoundaries.classifyCells(mpiGrid) == false) { From b606040fb207e97f50f5277400d6d9b942ab9b40 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Tue, 19 Mar 2019 16:44:11 +0200 Subject: [PATCH 256/602] Replaced expliti definitions of nbrPair in grid.cpp by const auto to make it backwards compatible with DCCRG. Added const to auto's in cpu_trans_map_amr.cpp --- grid.cpp | 4 ++-- vlasovsolver/cpu_trans_map_amr.cpp | 26 +++++++++++++------------- 2 files changed, 15 insertions(+), 15 deletions(-) diff --git a/grid.cpp b/grid.cpp index 324b521d1..4a6615684 100644 --- a/grid.cpp +++ b/grid.cpp @@ -515,7 +515,7 @@ bool adjustVelocityBlocks(dccrg::Dccrg& m const auto* neighbors = mpiGrid.get_neighbors_of(cell_id, NEAREST_NEIGHBORHOOD_ID); vector neighbor_ptrs; neighbor_ptrs.reserve(neighbors->size()); - for ( pair> nbrPair : *neighbors) { + for ( const auto nbrPair : *neighbors) { CellID neighbor_id = nbrPair.first; if (neighbor_id == 0 || neighbor_id == cell_id) { continue; @@ -964,7 +964,7 @@ bool validateMesh(dccrg::Dccrg& mpiGrid,c // Iterate over all spatial neighbors // for (size_t n=0; nsize(); ++n) { - for (pair > nbrPair : *neighbors) { + for (const auto nbrPair : *neighbors) { // CellID nbrCellID = (*neighbors)[n]; CellID nbrCellID = nbrPair.first; const SpatialCell* nbr = mpiGrid[nbrCellID]; diff --git a/vlasovsolver/cpu_trans_map_amr.cpp b/vlasovsolver/cpu_trans_map_amr.cpp index 7af3e1a64..e8a010e34 100644 --- a/vlasovsolver/cpu_trans_map_amr.cpp +++ b/vlasovsolver/cpu_trans_map_amr.cpp @@ -80,15 +80,15 @@ void computeSpatialSourceCellsForPencil(const dccrg::Dccrg distances; - for (auto nbrPair : *frontNbrPairs) { + for (const auto nbrPair : *frontNbrPairs) { if(nbrPair.second[dimension] < 0) { distances.insert(nbrPair.second[dimension]); } @@ -101,7 +101,7 @@ void computeSpatialSourceCellsForPencil(const dccrg::Dccrg neighbors; - for (auto nbrPair : *frontNbrPairs) { + for (const auto nbrPair : *frontNbrPairs) { int distanceInRefinedCells = nbrPair.second[dimension]; if(distanceInRefinedCells == *it) neighbors.push_back(nbrPair.first); } @@ -118,7 +118,7 @@ void computeSpatialSourceCellsForPencil(const dccrg::Dccrg 0) { distances.insert(nbrPair.second[dimension]); } @@ -132,7 +132,7 @@ void computeSpatialSourceCellsForPencil(const dccrg::Dccrg neighbors; - for (auto nbrPair : *backNbrPairs) { + for (const auto nbrPair : *backNbrPairs) { int distanceInRefinedCells = nbrPair.second[dimension]; if(distanceInRefinedCells == *it) neighbors.push_back(nbrPair.first); } @@ -197,7 +197,7 @@ void computeSpatialTargetCellsForPencils(const dccrg::Dccrgfront().first << " "; // std::cout << backNbrPairs->back().first << std::endl; vector frontNeighborIds; - for( auto nbrPair: *frontNbrPairs ) { + for( const auto nbrPair: *frontNbrPairs ) { if (nbrPair.second.at(dimension) == -1) { frontNeighborIds.push_back(nbrPair.first); } @@ -208,7 +208,7 @@ void computeSpatialTargetCellsForPencils(const dccrg::Dccrg backNeighborIds; - for( auto nbrPair: *backNbrPairs ) { + for( const auto nbrPair: *backNbrPairs ) { if (nbrPair.second.at(dimension) == 1) { backNeighborIds.push_back(nbrPair.first); } @@ -804,13 +804,13 @@ void check_ghost_cells(const dccrg::Dccrg const auto* frontNeighbors = mpiGrid.get_neighbors_of(ids.front(),neighborhoodId); const auto* backNeighbors = mpiGrid.get_neighbors_of(ids.back() ,neighborhoodId); - for (auto nbrPair: *frontNeighbors) { + for (const auto nbrPair: *frontNeighbors) { //if((nbrPair.second[dimension] + 1) / pow(2,mpiGrid.get_refinement_level(nbrPair.first)) == -offset) { maxNbrRefLvl = max(maxNbrRefLvl,mpiGrid.mapping.get_refinement_level(nbrPair.first)); //} } - for (auto nbrPair: *backNeighbors) { + for (const auto nbrPair: *backNeighbors) { //if((nbrPair.second[dimension] + 1) / pow(2,mpiGrid.get_refinement_level(nbrPair.first)) == offset) { maxNbrRefLvl = max(maxNbrRefLvl,mpiGrid.get_refinement_level(nbrPair.first)); //} @@ -1405,7 +1405,7 @@ void update_remote_mapping_contribution_amr( [&mpiGrid](pair> i){return mpiGrid.is_local(i.first);})) { // ccell adds a neighbor_block_data block for each neighbor in the positive direction to its local data - for (auto nbrPair : *nbrToPairVector) { + for (const auto nbrPair : *nbrToPairVector) { bool initBlocksForEmptySiblings = false; CellID nbr = nbrPair.first; @@ -1443,7 +1443,7 @@ void update_remote_mapping_contribution_amr( auto *allNbrs = mpiGrid.get_neighbors_of(c, FULL_NEIGHBORHOOD_ID); bool faceNeighbor = false; - for (auto nbrPair : *allNbrs) { + for (const auto nbrPair : *allNbrs) { if(nbrPair.first == nbr && abs(nbrPair.second.at(dimension)) == 1) { faceNeighbor = true; } @@ -1477,7 +1477,7 @@ void update_remote_mapping_contribution_amr( [&mpiGrid](pair> i){return mpiGrid.is_local(i.first);})) { // ccell adds a neighbor_block_data block for each neighbor in the positive direction to its local data - for (auto nbrPair : *nbrOfPairVector) { + for (const auto nbrPair : *nbrOfPairVector) { CellID nbr = nbrPair.first; From 8e02118b210ff1addabe31a3ade128335d7cd95b Mon Sep 17 00:00:00 2001 From: tkoskela Date: Wed, 20 Mar 2019 10:15:14 +0200 Subject: [PATCH 257/602] Moved initSysBoundaries back to its original place, was causing segfaults with maxwellian boundary --- grid.cpp | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/grid.cpp b/grid.cpp index 4a6615684..addcdc9b4 100644 --- a/grid.cpp +++ b/grid.cpp @@ -123,13 +123,6 @@ void initializeGrid( .set_geometry(geom_params); - phiprof::start("Initialize system boundary conditions"); - if(sysBoundaries.initSysBoundaries(project, P::t_min) == false) { - if (myRank == MASTER_RANK) cerr << "Error in initialising the system boundaries." << endl; - exit(1); - } - phiprof::stop("Initialize system boundary conditions"); - phiprof::start("Refine spatial cells"); if(P::amrMaxSpatialRefLevel > 0 && project.refineSpatialCells(mpiGrid)) { recalculateLocalCellsCache(); @@ -154,6 +147,13 @@ void initializeGrid( initSpatialCellCoordinates(mpiGrid); phiprof::stop("Set spatial cell coordinates"); + phiprof::start("Initialize system boundary conditions"); + if(sysBoundaries.initSysBoundaries(project, P::t_min) == false) { + if (myRank == MASTER_RANK) cerr << "Error in initialising the system boundaries." << endl; + exit(1); + } + phiprof::stop("Initialize system boundary conditions"); + // Initialise system boundary conditions (they need the initialised positions!!) phiprof::start("Classify cells (sys boundary conditions)"); if(sysBoundaries.classifyCells(mpiGrid) == false) { From 347ae9ef91f3a1a9e367f8889d933b9fe120975a Mon Sep 17 00:00:00 2001 From: tkoskela Date: Wed, 20 Mar 2019 11:16:57 +0200 Subject: [PATCH 258/602] Added a null pointer check --- vlasovsolver/cpu_trans_map_amr.cpp | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/vlasovsolver/cpu_trans_map_amr.cpp b/vlasovsolver/cpu_trans_map_amr.cpp index e8a010e34..90acd8fb2 100644 --- a/vlasovsolver/cpu_trans_map_amr.cpp +++ b/vlasovsolver/cpu_trans_map_amr.cpp @@ -239,8 +239,7 @@ void computeSpatialTargetCellsForPencils(const dccrg::DccrgsysBoundaryFlag != sysboundarytype::NOT_SYSBOUNDARY ) { + if (targetCells[i] && targetCells[i]->sysBoundaryFlag != sysboundarytype::NOT_SYSBOUNDARY ) { targetCells[i] = NULL; } } From 8a37a141858788e93c94e3eb2a6a2cff8f6a2b4b Mon Sep 17 00:00:00 2001 From: Tuomas Koskela Date: Wed, 20 Mar 2019 14:41:58 +0200 Subject: [PATCH 259/602] Switched -DDEBUG to -DNDEBUG --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 74644940d..7747028f8 100644 --- a/Makefile +++ b/Makefile @@ -34,7 +34,7 @@ FIELDSOLVER ?= londrillo_delzanna COMPFLAGS += -DPROFILE #Add -DNDEBUG to turn debugging off. If debugging is enabled performance will degrade significantly -COMPFLAGS += -DDEBUG +COMPFLAGS += -DNDEBUG # CXXFLAGS += -DDEBUG_SOLVERS # CXXFLAGS += -DDEBUG_IONOSPHERE From 0c7a0c8f700e0b9250f102faf15cdb00ef841c75 Mon Sep 17 00:00:00 2001 From: ykempf Date: Wed, 20 Mar 2019 14:47:10 +0200 Subject: [PATCH 260/602] Correct misplaced NDEBUG block. --- sysboundary/sysboundarycondition.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/sysboundary/sysboundarycondition.cpp b/sysboundary/sysboundarycondition.cpp index 714a7e5d4..9f31e8e70 100644 --- a/sysboundary/sysboundarycondition.cpp +++ b/sysboundary/sysboundarycondition.cpp @@ -797,7 +797,6 @@ namespace SBC { ) { const std::array closestCell = getTheClosestNonsysboundaryCell(technicalGrid, i, j, k); - #ifndef NDEBUG const std::array gid = technicalGrid.getGlobalIndices(i, j, k); const std::array ngid = technicalGrid.getGlobalIndices(closestCell[0], closestCell[1], closestCell[2]); @@ -810,6 +809,8 @@ namespace SBC { // we don't care what happens in them since they have no effect on the Vlasov solver. return perBGrid.get(i,j,k)->at(fsgrids::bfield::PERBX+component); } + + #ifndef NDEBUG if ( technicalGrid.get(closestCell[0], closestCell[1], closestCell[2]) == nullptr ) { stringstream ss; From c6c04057217d3d403613b4a44ffa7292ac8cf137 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Tue, 26 Mar 2019 10:50:27 +0200 Subject: [PATCH 261/602] Re-ordering --- vlasovsolver/cpu_trans_map_amr.cpp | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/vlasovsolver/cpu_trans_map_amr.cpp b/vlasovsolver/cpu_trans_map_amr.cpp index 90acd8fb2..05b34933b 100644 --- a/vlasovsolver/cpu_trans_map_amr.cpp +++ b/vlasovsolver/cpu_trans_map_amr.cpp @@ -1435,10 +1435,7 @@ void update_remote_mapping_contribution_amr( if(pcell && pcell->sysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY) { if(send_cells.find(nbr) == send_cells.end()) { - // 5a) We have not already sent data from this rank to this cell. - - ccell->neighbor_number_of_blocks.at(sendIndex) = pcell->get_number_of_velocity_blocks(popID); - ccell->neighbor_block_data.at(sendIndex) = pcell->get_data(popID); + // 5a) We have not already sent data from this rank to this cell. auto *allNbrs = mpiGrid.get_neighbors_of(c, FULL_NEIGHBORHOOD_ID); bool faceNeighbor = false; @@ -1450,7 +1447,11 @@ void update_remote_mapping_contribution_amr( if (faceNeighbor) { send_cells.insert(nbr); } + + ccell->neighbor_number_of_blocks.at(sendIndex) = pcell->get_number_of_velocity_blocks(popID); + ccell->neighbor_block_data.at(sendIndex) = pcell->get_data(popID); + } else { ccell->neighbor_number_of_blocks.at(sendIndex) = mpiGrid[nbr]->get_number_of_velocity_blocks(popID); ccell->neighbor_block_data.at(sendIndex) = From 082d5fb86a8ad31bc6d2984a3221664f449f8424 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Tue, 26 Mar 2019 14:58:11 +0200 Subject: [PATCH 262/602] Trying to set message size to 0 for all non-face neighbor sent messages. Receiver receives 0 size if there is no remote face neighbor. --- vlasovsolver/cpu_trans_map_amr.cpp | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/vlasovsolver/cpu_trans_map_amr.cpp b/vlasovsolver/cpu_trans_map_amr.cpp index 05b34933b..0e9c9a143 100644 --- a/vlasovsolver/cpu_trans_map_amr.cpp +++ b/vlasovsolver/cpu_trans_map_amr.cpp @@ -1445,13 +1445,11 @@ void update_remote_mapping_contribution_amr( } } if (faceNeighbor) { + ccell->neighbor_number_of_blocks.at(sendIndex) = pcell->get_number_of_velocity_blocks(popID); + ccell->neighbor_block_data.at(sendIndex) = pcell->get_data(popID); send_cells.insert(nbr); } - - - ccell->neighbor_number_of_blocks.at(sendIndex) = pcell->get_number_of_velocity_blocks(popID); - ccell->neighbor_block_data.at(sendIndex) = pcell->get_data(popID); - + } else { ccell->neighbor_number_of_blocks.at(sendIndex) = mpiGrid[nbr]->get_number_of_velocity_blocks(popID); ccell->neighbor_block_data.at(sendIndex) = @@ -1538,7 +1536,8 @@ void update_remote_mapping_contribution_amr( } else { - ncell->neighbor_number_of_blocks.at(recvIndex) = ccell->get_number_of_velocity_blocks(popID); + //ncell->neighbor_number_of_blocks.at(recvIndex) = ccell->get_number_of_velocity_blocks(popID); + ncell->neighbor_number_of_blocks.at(recvIndex) = 0; } // ncell->neighbor_number_of_blocks.at(recvIndex) = ccell->get_number_of_velocity_blocks(popID); From 4f22b8023f140fb82c4c3b416acf626f9f4d10a1 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Tue, 26 Mar 2019 15:25:17 +0200 Subject: [PATCH 263/602] Fix so that it doesn't break regular face neighbor communications. --- vlasovsolver/cpu_trans_map_amr.cpp | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/vlasovsolver/cpu_trans_map_amr.cpp b/vlasovsolver/cpu_trans_map_amr.cpp index 0e9c9a143..5385ed97a 100644 --- a/vlasovsolver/cpu_trans_map_amr.cpp +++ b/vlasovsolver/cpu_trans_map_amr.cpp @@ -1505,9 +1505,11 @@ void update_remote_mapping_contribution_amr( recvIndex = get_sibling_index(mpiGrid,nbr); - SpatialCell* scell = NULL; + ncell->neighbor_number_of_blocks.at(recvIndex) = ccell->get_number_of_velocity_blocks(popID); if (abs(nbrPair.second.at(dimension)) != 1) { + + SpatialCell* scell = NULL; // nbr is not face neighbor to c --> we are not receiving data mapped to c but to its face neighbor. // This happens because DCCRG does not allow defining neighborhoods with face neighbors only. @@ -1528,19 +1530,18 @@ void update_remote_mapping_contribution_amr( } } - } - - if(scell) { - - ncell->neighbor_number_of_blocks.at(recvIndex) = scell->get_number_of_velocity_blocks(popID); - } else { - - //ncell->neighbor_number_of_blocks.at(recvIndex) = ccell->get_number_of_velocity_blocks(popID); - ncell->neighbor_number_of_blocks.at(recvIndex) = 0; + if(scell) { + // We found a face neighbor that is remote to nbr. Use it's number of blocks to receive the message from nbr. + ncell->neighbor_number_of_blocks.at(recvIndex) = scell->get_number_of_velocity_blocks(popID); + + } else { + // We did not find a face neighbor that is remote to nbr (ie. all nbr's face neighbors are local). Set message size to 0. + ncell->neighbor_number_of_blocks.at(recvIndex) = 0; + } } + - // ncell->neighbor_number_of_blocks.at(recvIndex) = ccell->get_number_of_velocity_blocks(popID); ncell->neighbor_block_data.at(recvIndex) = (Realf*) aligned_malloc(ncell->neighbor_number_of_blocks.at(recvIndex) * WID3 * sizeof(Realf), 64); receiveBuffers.push_back(ncell->neighbor_block_data.at(recvIndex)); From f9f6e1348ea3ca70cf3697d8fee1bd8feec84f8c Mon Sep 17 00:00:00 2001 From: tkoskela Date: Wed, 27 Mar 2019 16:40:00 +0200 Subject: [PATCH 264/602] Version that should work once we patch get_mpi_datatype() so that sends to non-face neighbors are set to 0 size --- vlasovsolver/cpu_trans_map_amr.cpp | 117 ++++++++++------------------- 1 file changed, 38 insertions(+), 79 deletions(-) diff --git a/vlasovsolver/cpu_trans_map_amr.cpp b/vlasovsolver/cpu_trans_map_amr.cpp index 5385ed97a..e6dc4ba2d 100644 --- a/vlasovsolver/cpu_trans_map_amr.cpp +++ b/vlasovsolver/cpu_trans_map_amr.cpp @@ -1389,25 +1389,33 @@ void update_remote_mapping_contribution_amr( if (!ccell) continue; - // Send to neighbors_to - auto* nbrToPairVector = mpiGrid.get_neighbors_to(c, neighborhood); - // Receive from neighbors_of - auto* nbrOfPairVector = mpiGrid.get_neighbors_of(c, neighborhood); + const auto faceNbrs = mpiGrid.get_face_neighbors_of(c); + + vector p_nbrs; + vector n_nbrs; + + for (const auto nbr : faceNbrs) { + if(nbr.second == ((int)dimension + 1) * direction) { + p_nbrs.push_back(nbr.first); + } + + if(nbr.second == -1 * ((int)dimension + 1) * direction) { + n_nbrs.push_back(nbr.first); + } + } uint sendIndex = 0; uint recvIndex = 0; int mySiblingIndex = get_sibling_index(mpiGrid,c); - // Set up sends if any neighbor cells in nbrToPairVector are non-local. - if (!all_of(nbrToPairVector->begin(), nbrToPairVector->end(), - [&mpiGrid](pair> i){return mpiGrid.is_local(i.first);})) { + // Set up sends if any neighbor cells in p_nbrs are non-local. + if (!all_of(p_nbrs.begin(), p_nbrs.end(), [&mpiGrid](CellID i){return mpiGrid.is_local(i);})) { // ccell adds a neighbor_block_data block for each neighbor in the positive direction to its local data - for (const auto nbrPair : *nbrToPairVector) { + for (const auto nbr : p_nbrs) { bool initBlocksForEmptySiblings = false; - CellID nbr = nbrPair.first; //Send data in nbr target array that we just mapped to, if // 1) it is a valid target, @@ -1433,25 +1441,22 @@ void update_remote_mapping_contribution_amr( // 4) it exists and is not a boundary cell, if(pcell && pcell->sysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY) { + + ccell->neighbor_number_of_blocks.at(sendIndex) = pcell->get_number_of_velocity_blocks(popID); if(send_cells.find(nbr) == send_cells.end()) { - // 5a) We have not already sent data from this rank to this cell. + // 5 We have not already sent data from this rank to this cell. - auto *allNbrs = mpiGrid.get_neighbors_of(c, FULL_NEIGHBORHOOD_ID); - bool faceNeighbor = false; - for (const auto nbrPair : *allNbrs) { - if(nbrPair.first == nbr && abs(nbrPair.second.at(dimension)) == 1) { - faceNeighbor = true; - } - } - if (faceNeighbor) { - ccell->neighbor_number_of_blocks.at(sendIndex) = pcell->get_number_of_velocity_blocks(popID); - ccell->neighbor_block_data.at(sendIndex) = pcell->get_data(popID); - send_cells.insert(nbr); - } + ccell->neighbor_block_data.at(sendIndex) = pcell->get_data(popID); + send_cells.insert(nbr); } else { - ccell->neighbor_number_of_blocks.at(sendIndex) = mpiGrid[nbr]->get_number_of_velocity_blocks(popID); + + // The receiving cell can't know which cell is sending the data from this rank. + // Therefore, we have to send 0's from other cells in the case where multiple cells + // from one rank are sending to the same remote cell so that all sent cells can be + // summed for the correct result. + ccell->neighbor_block_data.at(sendIndex) = (Realf*) aligned_malloc(ccell->neighbor_number_of_blocks.at(sendIndex) * WID3 * sizeof(Realf), 64); sendBuffers.push_back(ccell->neighbor_block_data.at(sendIndex)); @@ -1470,14 +1475,11 @@ void update_remote_mapping_contribution_amr( } // closes if(!all_of(nbrs_to.begin(), nbrs_to.end(),[&mpiGrid](CellID i){return mpiGrid.is_local(i);})) - // Set up receives if any neighbor cells in nbrOfPairVector are non-local. - if (!all_of(nbrOfPairVector->begin(), nbrOfPairVector->end(), - [&mpiGrid](pair> i){return mpiGrid.is_local(i.first);})) { + // Set up receives if any neighbor cells in n_nbrs are non-local. + if (!all_of(n_nbrs.begin(), n_nbrs.end(), [&mpiGrid](CellID i){return mpiGrid.is_local(i);})) { // ccell adds a neighbor_block_data block for each neighbor in the positive direction to its local data - for (const auto nbrPair : *nbrOfPairVector) { - - CellID nbr = nbrPair.first; + for (const auto nbr : n_nbrs) { if (nbr != INVALID_CELLID && !mpiGrid.is_local(nbr) && ccell->sysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY) { @@ -1505,43 +1507,7 @@ void update_remote_mapping_contribution_amr( recvIndex = get_sibling_index(mpiGrid,nbr); - ncell->neighbor_number_of_blocks.at(recvIndex) = ccell->get_number_of_velocity_blocks(popID); - - if (abs(nbrPair.second.at(dimension)) != 1) { - - SpatialCell* scell = NULL; - - // nbr is not face neighbor to c --> we are not receiving data mapped to c but to its face neighbor. - // This happens because DCCRG does not allow defining neighborhoods with face neighbors only. - // Figure out who is the face neighbor that nbr maps its data to, then get its number of blocks, - // if it is a remote neighbor to nbr. - - auto myIndices = mpiGrid.mapping.get_indices(nbr); - for (auto localNbrPair : *nbrOfPairVector) { - auto nbrIndices = mpiGrid.mapping.get_indices(localNbrPair.first); - int i1 = (dimension + 1) % 3; - int i2 = (dimension + 2) % 3; - if(myIndices.at(i1) == nbrIndices.at(i1) - && myIndices.at(i2) == nbrIndices.at(i2) - && abs(localNbrPair.second.at(dimension)) == 1 - && mpiGrid.get_process(nbr) != mpiGrid.get_process(localNbrPair.first)) { - - scell = mpiGrid[localNbrPair.first]; - - } - } - - if(scell) { - // We found a face neighbor that is remote to nbr. Use it's number of blocks to receive the message from nbr. - ncell->neighbor_number_of_blocks.at(recvIndex) = scell->get_number_of_velocity_blocks(popID); - - } else { - // We did not find a face neighbor that is remote to nbr (ie. all nbr's face neighbors are local). Set message size to 0. - ncell->neighbor_number_of_blocks.at(recvIndex) = 0; - } - } - - + ncell->neighbor_number_of_blocks.at(recvIndex) = ccell->get_number_of_velocity_blocks(popID); ncell->neighbor_block_data.at(recvIndex) = (Realf*) aligned_malloc(ncell->neighbor_number_of_blocks.at(recvIndex) * WID3 * sizeof(Realf), 64); receiveBuffers.push_back(ncell->neighbor_block_data.at(recvIndex)); @@ -1554,7 +1520,7 @@ void update_remote_mapping_contribution_amr( auto myIndices = mpiGrid.mapping.get_indices(c); // Allocate memory for each sibling to receive all the data sent by coarser ncell. - // nbrs_to of the sender will only include the face neighbors, only allocate blocks for those. + // only allocate blocks for face neighbors. for (uint i_sib = 0; i_sib < MAX_NEIGHBORS_PER_DIM; ++i_sib) { auto sibling = mySiblings.at(i_sib); @@ -1570,20 +1536,13 @@ void update_remote_mapping_contribution_amr( ncell->neighbor_block_data.at(i_sib) = (Realf*) aligned_malloc(ncell->neighbor_number_of_blocks.at(i_sib) * WID3 * sizeof(Realf), 64); receiveBuffers.push_back(ncell->neighbor_block_data.at(i_sib)); - } - } - + } + } } - - // Only nearest neighbors (nbrpair.second(dimension) == 1 are added to the - // block data of the receiving cells - if (abs(nbrPair.second.at(dimension)) == 1) { - receive_cells.push_back(c); - receive_origin_cells.push_back(nbr); - receive_origin_index.push_back(recvIndex); - - } + receive_cells.push_back(c); + receive_origin_cells.push_back(nbr); + receive_origin_index.push_back(recvIndex); } // closes (nbr != INVALID_CELLID && !mpiGrid.is_local(nbr) && ...) From 1a88e2887a79bccbf90b51a7220f3c156aa6eb54 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Thu, 28 Mar 2019 10:16:30 +0200 Subject: [PATCH 265/602] Record the ranks that possess face neighbors of each cell in a std::set container. --- grid.cpp | 31 +++++++++++++++++++++++++++++++ spatial_cell.hpp | 1 + 2 files changed, 32 insertions(+) diff --git a/grid.cpp b/grid.cpp index 394c265a3..8104b65e3 100644 --- a/grid.cpp +++ b/grid.cpp @@ -320,6 +320,32 @@ void initSpatialCellCoordinates(dccrg::Dccrg& mpiGrid ) { + + const auto cells = mpiGrid.get_cells(); + + for (const auto cellid : cells) { + + if (cellid == INVALID_CELLID) continue; + + SpatialCell* cell = mpiGrid[cellid]; + + if (!cell) continue; + + cell->face_neighbor_processes.clear(); + + const auto faceNeighbors = mpiGrid.get_face_neighbors_of(cellid); + + for (const auto nbr : faceNeighbors) { + + cell->face_neighbor_processes.insert(mpiGrid.get_process(nbr.first)); + + } + } +} void balanceLoad(dccrg::Dccrg& mpiGrid, SysBoundary& sysBoundaries){ // Invalidate cached cell lists @@ -467,6 +493,11 @@ void balanceLoad(dccrg::Dccrg& mpiGrid, S } } + // Record ranks of face neighbors + phiprof::start("set face neighbor ranks"); + setFaceNeighborRanks( mpiGrid ); + phiprof::stop("set face neighbor ranks"); + phiprof::stop("Init solvers"); phiprof::stop("Balancing load"); } diff --git a/spatial_cell.hpp b/spatial_cell.hpp index 4f3396b70..39f47eab3 100644 --- a/spatial_cell.hpp +++ b/spatial_cell.hpp @@ -336,6 +336,7 @@ namespace spatial_cell { std::array neighbor_block_data; /**< Pointers for translation operator. We can point to neighbor * cell block data. We do not allocate memory for the pointer.*/ std::array neighbor_number_of_blocks; + std::set face_neighbor_processes; uint sysBoundaryFlag; /**< What type of system boundary does the cell belong to. * Enumerated in the sysboundarytype namespace's enum.*/ uint sysBoundaryLayer; /**< Layers counted from closest systemBoundary. If 0 then it has not From e5623e8dccd7a3b9586602d5ada94d19a1562d9d Mon Sep 17 00:00:00 2001 From: tkoskela Date: Thu, 28 Mar 2019 10:23:21 +0200 Subject: [PATCH 266/602] Added references to const auto's --- grid.cpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/grid.cpp b/grid.cpp index 8104b65e3..b6c3527d3 100644 --- a/grid.cpp +++ b/grid.cpp @@ -325,9 +325,9 @@ Record for each cell which processes own one or more of its face neighbors */ void setFaceNeighborRanks( dccrg::Dccrg& mpiGrid ) { - const auto cells = mpiGrid.get_cells(); + const auto& cells = mpiGrid.get_cells(); - for (const auto cellid : cells) { + for (const auto& cellid : cells) { if (cellid == INVALID_CELLID) continue; @@ -337,9 +337,9 @@ void setFaceNeighborRanks( dccrg::Dccrg& cell->face_neighbor_processes.clear(); - const auto faceNeighbors = mpiGrid.get_face_neighbors_of(cellid); + const auto& faceNeighbors = mpiGrid.get_face_neighbors_of(cellid); - for (const auto nbr : faceNeighbors) { + for (const auto& nbr : faceNeighbors) { cell->face_neighbor_processes.insert(mpiGrid.get_process(nbr.first)); From b7bf052c9bbaad56bf2d43c15f4311067ee0fc4b Mon Sep 17 00:00:00 2001 From: tkoskela Date: Thu, 28 Mar 2019 14:04:39 +0200 Subject: [PATCH 267/602] Added call to getFaceNeighborRanks to initial load balance. Changed internal variable names to be more consistent. --- grid.cpp | 5 +++-- grid.h | 2 ++ 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/grid.cpp b/grid.cpp index b6c3527d3..4fe271768 100644 --- a/grid.cpp +++ b/grid.cpp @@ -138,6 +138,7 @@ void initializeGrid( if (myRank == MASTER_RANK) logFile << "(INIT): Starting initial load balance." << endl << writeVerbose; mpiGrid.balance_load(); recalculateLocalCellsCache(); + setFaceNeighborRanks( mpiGrid ); phiprof::stop("Initial load-balancing"); if (myRank == MASTER_RANK) logFile << "(INIT): Set initial state." << endl << writeVerbose; @@ -335,13 +336,13 @@ void setFaceNeighborRanks( dccrg::Dccrg& if (!cell) continue; - cell->face_neighbor_processes.clear(); + cell->face_neighbor_ranks.clear(); const auto& faceNeighbors = mpiGrid.get_face_neighbors_of(cellid); for (const auto& nbr : faceNeighbors) { - cell->face_neighbor_processes.insert(mpiGrid.get_process(nbr.first)); + cell->face_neighbor_ranks.insert(mpiGrid.get_process(nbr.first)); } } diff --git a/grid.h b/grid.h index 041f5150a..efe795011 100644 --- a/grid.h +++ b/grid.h @@ -104,4 +104,6 @@ void shrink_to_fit_grid_data(dccrg::Dccrg * should be aborted.*/ bool validateMesh(dccrg::Dccrg& mpiGrid,const uint popID); +void setFaceNeighborRanks( dccrg::Dccrg& mpiGrid ); + #endif From feed0c657447178a6429da6a5e91e6ccfb463b04 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Thu, 28 Mar 2019 14:08:05 +0200 Subject: [PATCH 268/602] Variable name change on spatial_cell.cpp side --- spatial_cell.cpp | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/spatial_cell.cpp b/spatial_cell.cpp index 1ea7081de..c11048661 100644 --- a/spatial_cell.cpp +++ b/spatial_cell.cpp @@ -649,9 +649,16 @@ namespace spatial_cell { * neighbor. The values of neighbor_block_data * and neighbor_number_of_blocks should be set in * solver.*/ - for ( int i = 0; i < MAX_NEIGHBORS_PER_DIM; ++i) { - displacements.push_back((uint8_t*) this->neighbor_block_data[i] - (uint8_t*) this); - block_lengths.push_back(sizeof(Realf) * VELOCITY_BLOCK_LENGTH * this->neighbor_number_of_blocks[i]); + + // Transfer only to ranks that contain face neighbors + // this->neighbor_number_of_blocks has been initialized to 0, on other ranks it can stay that way. + if (this->face_neighbor_ranks.find(receiver_rank) != this->face_neighbor_ranks.end()) { + + for ( int i = 0; i < MAX_NEIGHBORS_PER_DIM; ++i) { + displacements.push_back((uint8_t*) this->neighbor_block_data[i] - (uint8_t*) this); + block_lengths.push_back(sizeof(Realf) * VELOCITY_BLOCK_LENGTH * this->neighbor_number_of_blocks[i]); + } + } } From 573a5de51508482072d87e26905a574b7eb7e0b7 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Thu, 28 Mar 2019 14:08:17 +0200 Subject: [PATCH 269/602] Commented out explicit copy operator that had re-appeared somehow. --- spatial_cell.hpp | 35 ++++++++++++++++++----------------- 1 file changed, 18 insertions(+), 17 deletions(-) diff --git a/spatial_cell.hpp b/spatial_cell.hpp index 39f47eab3..3e58d61cd 100644 --- a/spatial_cell.hpp +++ b/spatial_cell.hpp @@ -336,7 +336,7 @@ namespace spatial_cell { std::array neighbor_block_data; /**< Pointers for translation operator. We can point to neighbor * cell block data. We do not allocate memory for the pointer.*/ std::array neighbor_number_of_blocks; - std::set face_neighbor_processes; + std::set face_neighbor_ranks; uint sysBoundaryFlag; /**< What type of system boundary does the cell belong to. * Enumerated in the sysboundarytype namespace's enum.*/ uint sysBoundaryLayer; /**< Layers counted from closest systemBoundary. If 0 then it has not @@ -351,7 +351,7 @@ namespace spatial_cell { static uint64_t mpi_transfer_type; /**< Which data is transferred by the mpi datatype given by spatial cells.*/ static bool mpiTransferAtSysBoundaries; /**< Do we only transfer data at boundaries (true), or in the whole system (false).*/ - SpatialCell& operator=(const SpatialCell& other); + //SpatialCell& operator=(const SpatialCell& other); private: //SpatialCell& operator=(const SpatialCell&); @@ -1896,22 +1896,23 @@ namespace spatial_cell { return populations[popID].vmesh.hasGrandParent(blockGID); } - inline SpatialCell& SpatialCell::operator=(const SpatialCell& other) { - this->sysBoundaryFlag = other.sysBoundaryFlag; - this->sysBoundaryLayer = other.sysBoundaryLayer; - this->sysBoundaryLayerNew = other.sysBoundaryLayerNew; - this->velocity_block_with_content_list = other.velocity_block_with_content_list; - this->velocity_block_with_no_content_list = other.velocity_block_with_no_content_list; - this->initialized = other.initialized; - this->mpiTransferEnabled = other.mpiTransferEnabled; - this->parameters = other.parameters; - this->derivatives = other.derivatives; - this->derivativesBVOL = other.derivativesBVOL; - this->null_block_data = other.null_block_data; - this->populations = other.populations; + // inline SpatialCell& SpatialCell::operator=(const SpatialCell& other) { + // this->sysBoundaryFlag = other.sysBoundaryFlag; + // this->sysBoundaryLayer = other.sysBoundaryLayer; + // this->sysBoundaryLayerNew = other.sysBoundaryLayerNew; + // this->velocity_block_with_content_list = other.velocity_block_with_content_list; + // this->velocity_block_with_no_content_list = other.velocity_block_with_no_content_list; + // this->initialized = other.initialized; + // this->mpiTransferEnabled = other.mpiTransferEnabled; + // this->parameters = other.parameters; + // this->derivatives = other.derivatives; + // this->derivativesBVOL = other.derivativesBVOL; + // this->null_block_data = other.null_block_data; + // this->populations = other.populations; + // this->face_neighbor_ranks = other.face_neighbor_ranks; - return *this; - } + // return *this; + // } // inline SpatialCell& SpatialCell::operator=(const SpatialCell&) { // return *this; From 8c83512de633d012abf93be595179c9806ec474c Mon Sep 17 00:00:00 2001 From: tkoskela Date: Thu, 28 Mar 2019 14:10:38 +0200 Subject: [PATCH 270/602] Added a TODO comment --- grid.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/grid.cpp b/grid.cpp index 4fe271768..8248c404a 100644 --- a/grid.cpp +++ b/grid.cpp @@ -327,7 +327,7 @@ Record for each cell which processes own one or more of its face neighbors void setFaceNeighborRanks( dccrg::Dccrg& mpiGrid ) { const auto& cells = mpiGrid.get_cells(); - + // TODO: Try a #pragma omp parallel for for (const auto& cellid : cells) { if (cellid == INVALID_CELLID) continue; From 9b6a4b19474e8c9183f39e0589665b20b9ba0bf9 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Thu, 28 Mar 2019 15:26:14 +0200 Subject: [PATCH 271/602] Initializing activePopID to 0 instead of -1. Initializing N_blocks = 0. Modified debugging output to work with the changes made earlier. Off by default. --- spatial_cell.cpp | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/spatial_cell.cpp b/spatial_cell.cpp index c11048661..9fb9a738e 100644 --- a/spatial_cell.cpp +++ b/spatial_cell.cpp @@ -34,7 +34,7 @@ using namespace std; namespace spatial_cell { - int SpatialCell::activePopID = -1; + int SpatialCell::activePopID = 0; uint64_t SpatialCell::mpi_transfer_type = 0; bool SpatialCell::mpiTransferAtSysBoundaries = false; @@ -74,6 +74,7 @@ namespace spatial_cell { const species::Species& spec = getObjectWrapper().particleSpecies[popID]; populations[popID].vmesh.initialize(spec.velocityMesh); populations[popID].velocityBlockMinValue = spec.sparseMinValue; + populations[popID].N_blocks = 0; } } @@ -838,11 +839,13 @@ namespace spatial_cell { int myRank; MPI_Type_size(datatype,&mpiSize); MPI_Comm_rank(MPI_COMM_WORLD,&myRank); - cout << myRank << " get_mpi_datatype: " << cellID << " " << sender_rank << " " << receiver_rank << " " << mpiSize << ", Nblocks = " << populations[activePopID].N_blocks << ", nbr Nblocks ="; - for (uint i = 0; i < MAX_NEIGHBORS_PER_DIM; ++i) { - cout << " " << this->neighbor_number_of_blocks[i]; + if (this->face_neighbor_ranks.find(receiver_rank) != this->face_neighbor_ranks.end()) { + cout << myRank << " get_mpi_datatype: " << cellID << " " << sender_rank << " " << receiver_rank << " " << mpiSize << ", Nblocks = " << populations[activePopID].N_blocks << ", nbr Nblocks ="; + for (uint i = 0; i < MAX_NEIGHBORS_PER_DIM; ++i) { + cout << " " << this->neighbor_number_of_blocks[i]; + } + cout << endl; } - cout << endl; } return std::make_tuple(address,count,datatype); From e11676d15f4bdd325928ebc79cfb709777f4daea Mon Sep 17 00:00:00 2001 From: tkoskela Date: Fri, 29 Mar 2019 11:19:30 +0200 Subject: [PATCH 272/602] Receiving cells are remote and do not have face_neighbor_ranks set, therefore we always set up buffers for receiving cells. The value of neighbor nblocks is set to 0 if there are no face neighbors in the solver. --- spatial_cell.cpp | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/spatial_cell.cpp b/spatial_cell.cpp index 9fb9a738e..8d85c757a 100644 --- a/spatial_cell.cpp +++ b/spatial_cell.cpp @@ -651,9 +651,9 @@ namespace spatial_cell { * and neighbor_number_of_blocks should be set in * solver.*/ - // Transfer only to ranks that contain face neighbors + // Send this data only to ranks that contain face neighbors // this->neighbor_number_of_blocks has been initialized to 0, on other ranks it can stay that way. - if (this->face_neighbor_ranks.find(receiver_rank) != this->face_neighbor_ranks.end()) { + if ( receiving || this->face_neighbor_ranks.find(receiver_rank) != this->face_neighbor_ranks.end()) { for ( int i = 0; i < MAX_NEIGHBORS_PER_DIM; ++i) { displacements.push_back((uint8_t*) this->neighbor_block_data[i] - (uint8_t*) this); @@ -833,19 +833,25 @@ namespace spatial_cell { datatype = MPI_BYTE; } - const bool printMpiDatatype = false; + const bool printMpiDatatype = true; if(printMpiDatatype) { int mpiSize; int myRank; MPI_Type_size(datatype,&mpiSize); MPI_Comm_rank(MPI_COMM_WORLD,&myRank); - if (this->face_neighbor_ranks.find(receiver_rank) != this->face_neighbor_ranks.end()) { - cout << myRank << " get_mpi_datatype: " << cellID << " " << sender_rank << " " << receiver_rank << " " << mpiSize << ", Nblocks = " << populations[activePopID].N_blocks << ", nbr Nblocks ="; - for (uint i = 0; i < MAX_NEIGHBORS_PER_DIM; ++i) { + cout << myRank << " get_mpi_datatype: " << cellID << " " << sender_rank << " " << receiver_rank << " " << mpiSize << ", Nblocks = " << populations[activePopID].N_blocks << ", nbr Nblocks ="; + for (uint i = 0; i < MAX_NEIGHBORS_PER_DIM; ++i) { + if ( receiving || this->face_neighbor_ranks.find(receiver_rank) != this->face_neighbor_ranks.end()) { cout << " " << this->neighbor_number_of_blocks[i]; + } else { + cout << " " << 0; } - cout << endl; } + cout << " face_neighbor_ranks ="; + for (const auto& rank : this->face_neighbor_ranks) { + cout << " " << rank; + } + cout << endl; } return std::make_tuple(address,count,datatype); From 6ae8ea9a7e7db5952b847245594089eac1fee001 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Fri, 29 Mar 2019 12:08:04 +0200 Subject: [PATCH 273/602] Store neighbor_face_ranks in a std::map container with neighborhood id as a key. Only compare against the neighborhood passed as an argument to get_mpi_datatype when deciding whether to send neighbor block data. --- grid.cpp | 27 ++++++++++++++++++++++++++- spatial_cell.cpp | 8 +++++--- spatial_cell.hpp | 2 +- 3 files changed, 32 insertions(+), 5 deletions(-) diff --git a/grid.cpp b/grid.cpp index 8248c404a..6f0a0e576 100644 --- a/grid.cpp +++ b/grid.cpp @@ -342,7 +342,32 @@ void setFaceNeighborRanks( dccrg::Dccrg& for (const auto& nbr : faceNeighbors) { - cell->face_neighbor_ranks.insert(mpiGrid.get_process(nbr.first)); + int neighborhood; + + // We store rank numbers into a map that has neighborhood ids as its key values. + + switch (nbr.second) { + case -3: + neighborhood = SHIFT_M_Z_NEIGHBORHOOD_ID; + break; + case -2: + neighborhood = SHIFT_M_Y_NEIGHBORHOOD_ID; + break; + case -1: + neighborhood = SHIFT_M_X_NEIGHBORHOOD_ID; + break; + case +1: + neighborhood = SHIFT_P_X_NEIGHBORHOOD_ID; + break; + case +2: + neighborhood = SHIFT_P_Y_NEIGHBORHOOD_ID; + break; + case +3: + neighborhood = SHIFT_P_Z_NEIGHBORHOOD_ID; + break; + } + + cell->face_neighbor_ranks[neighborhood].insert(mpiGrid.get_process(nbr.first)); } } diff --git a/spatial_cell.cpp b/spatial_cell.cpp index 8d85c757a..95518c57f 100644 --- a/spatial_cell.cpp +++ b/spatial_cell.cpp @@ -653,7 +653,8 @@ namespace spatial_cell { // Send this data only to ranks that contain face neighbors // this->neighbor_number_of_blocks has been initialized to 0, on other ranks it can stay that way. - if ( receiving || this->face_neighbor_ranks.find(receiver_rank) != this->face_neighbor_ranks.end()) { + const set& ranks = this->face_neighbor_ranks[neighborhood]; + if ( receiving || ranks.find(receiver_rank) != ranks.end()) { for ( int i = 0; i < MAX_NEIGHBORS_PER_DIM; ++i) { displacements.push_back((uint8_t*) this->neighbor_block_data[i] - (uint8_t*) this); @@ -841,14 +842,15 @@ namespace spatial_cell { MPI_Comm_rank(MPI_COMM_WORLD,&myRank); cout << myRank << " get_mpi_datatype: " << cellID << " " << sender_rank << " " << receiver_rank << " " << mpiSize << ", Nblocks = " << populations[activePopID].N_blocks << ", nbr Nblocks ="; for (uint i = 0; i < MAX_NEIGHBORS_PER_DIM; ++i) { - if ( receiving || this->face_neighbor_ranks.find(receiver_rank) != this->face_neighbor_ranks.end()) { + const set& ranks = this->face_neighbor_ranks[neighborhood]; + if ( receiving || ranks.find(receiver_rank) != ranks.end()) { cout << " " << this->neighbor_number_of_blocks[i]; } else { cout << " " << 0; } } cout << " face_neighbor_ranks ="; - for (const auto& rank : this->face_neighbor_ranks) { + for (const auto& rank : this->face_neighbor_ranks[neighborhood]) { cout << " " << rank; } cout << endl; diff --git a/spatial_cell.hpp b/spatial_cell.hpp index 3e58d61cd..6bc7e00f0 100644 --- a/spatial_cell.hpp +++ b/spatial_cell.hpp @@ -336,7 +336,7 @@ namespace spatial_cell { std::array neighbor_block_data; /**< Pointers for translation operator. We can point to neighbor * cell block data. We do not allocate memory for the pointer.*/ std::array neighbor_number_of_blocks; - std::set face_neighbor_ranks; + std::map> face_neighbor_ranks; uint sysBoundaryFlag; /**< What type of system boundary does the cell belong to. * Enumerated in the sysboundarytype namespace's enum.*/ uint sysBoundaryLayer; /**< Layers counted from closest systemBoundary. If 0 then it has not From 310adc153472d51d3c0abd8fc867a43082ab3fab Mon Sep 17 00:00:00 2001 From: tkoskela Date: Fri, 29 Mar 2019 12:24:20 +0200 Subject: [PATCH 274/602] Disable debugging printouts --- spatial_cell.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/spatial_cell.cpp b/spatial_cell.cpp index 95518c57f..0a3fcc2b3 100644 --- a/spatial_cell.cpp +++ b/spatial_cell.cpp @@ -834,7 +834,7 @@ namespace spatial_cell { datatype = MPI_BYTE; } - const bool printMpiDatatype = true; + const bool printMpiDatatype = false; if(printMpiDatatype) { int mpiSize; int myRank; From e74c265a12290916e683cea7dc5353d890ec8694 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Mon, 1 Apr 2019 10:48:37 +0300 Subject: [PATCH 275/602] Modified debugging output. --- vlasiator.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vlasiator.cpp b/vlasiator.cpp index 71dfef411..2af5cc8e9 100644 --- a/vlasiator.cpp +++ b/vlasiator.cpp @@ -697,7 +697,7 @@ int main(int argn,char* args[]) { if(printCells) cout << "Cell " << cell << " rho = " << rho << " x: " << x << " y: " << y << " z: " << z << endl; } if(printSums) { - cout << "Rank " << myRank << ", nSum = " << nSum << endl; + cout << "Rank " << myRank << ", Local sum = " << nSum << endl; Real globalSum = 0.0; MPI_Reduce(&nSum, &globalSum, 1, MPI_DOUBLE, MPI_SUM, MASTER_RANK, MPI_COMM_WORLD); MPI_Barrier(MPI_COMM_WORLD); From c66eb2ea7481a5ff9e41ccd061bd5fc324c3f52d Mon Sep 17 00:00:00 2001 From: tkoskela Date: Mon, 1 Apr 2019 11:15:25 +0300 Subject: [PATCH 276/602] Updated 2nd refinement level criteria --- projects/Flowthrough/Flowthrough.cpp | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/projects/Flowthrough/Flowthrough.cpp b/projects/Flowthrough/Flowthrough.cpp index 909418c50..88899ab09 100644 --- a/projects/Flowthrough/Flowthrough.cpp +++ b/projects/Flowthrough/Flowthrough.cpp @@ -267,14 +267,14 @@ namespace projects { if(mpiGrid.get_maximum_refinement_level() > 1) { - for (int i = 0; i < P::amrBoxHalfWidthX; ++i) { - for (int j = 0; j < P::amrBoxHalfWidthY; ++j) { - for (int k = 0; k < P::amrBoxHalfWidthZ; ++k) { + for (int i = 0; i < 2 * P::amrBoxHalfWidthX; ++i) { + for (int j = 0; j < 2 * P::amrBoxHalfWidthY; ++j) { + for (int k = 0; k < 2 * P::amrBoxHalfWidthZ; ++k) { std::array xyz; - xyz[0] = P::amrBoxCenterX + (0.5 + i - 0.5 * P::amrBoxHalfWidthX) * P::dx_ini; - xyz[1] = P::amrBoxCenterY + (0.5 + j - 0.5 * P::amrBoxHalfWidthY) * P::dy_ini; - xyz[2] = P::amrBoxCenterZ + (0.5 + k - 0.5 * P::amrBoxHalfWidthZ) * P::dz_ini; + xyz[0] = P::amrBoxCenterX + 0.5 * (0.5 + i - P::amrBoxHalfWidthX) * P::dx_ini; + xyz[1] = P::amrBoxCenterY + 0.5 * (0.5 + j - P::amrBoxHalfWidthY) * P::dy_ini; + xyz[2] = P::amrBoxCenterZ + 0.5 * (0.5 + k - P::amrBoxHalfWidthZ) * P::dz_ini; CellID myCell = mpiGrid.get_existing_cell(xyz); if (mpiGrid.refine_completely_at(xyz)) { From ea703f533890ef8c0e2bdd969934d78103b70046 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Tue, 2 Apr 2019 11:31:56 +0300 Subject: [PATCH 277/602] Added #ifdef NDEBUG's around printouts --- projects/Flowthrough/Flowthrough.cpp | 10 +++++++++- projects/Magnetosphere/Magnetosphere.cpp | 4 ++++ 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/projects/Flowthrough/Flowthrough.cpp b/projects/Flowthrough/Flowthrough.cpp index 88899ab09..744ca522a 100644 --- a/projects/Flowthrough/Flowthrough.cpp +++ b/projects/Flowthrough/Flowthrough.cpp @@ -248,13 +248,16 @@ namespace projects { CellID myCell = mpiGrid.get_existing_cell(xyz); if (mpiGrid.refine_completely_at(xyz)) { +#ifndef NDEBUG std::cout << "Rank " << myRank << " is refining cell " << myCell << std::endl; +#endif } } } } std::vector refinedCells = mpiGrid.stop_refining(true); if(myRank == MASTER_RANK) std::cout << "Finished first level of refinement" << endl; +#ifndef NDEBUG if(refinedCells.size() > 0) { std::cout << "Refined cells produced by rank " << myRank << " are: "; for (auto cellid : refinedCells) { @@ -262,6 +265,7 @@ namespace projects { } std::cout << endl; } +#endif mpiGrid.balance_load(); @@ -278,7 +282,9 @@ namespace projects { CellID myCell = mpiGrid.get_existing_cell(xyz); if (mpiGrid.refine_completely_at(xyz)) { +#ifndef NDEBUG std::cout << "Rank " << myRank << " is refining cell " << myCell << std::endl; +#endif } } } @@ -286,13 +292,15 @@ namespace projects { std::vector refinedCells = mpiGrid.stop_refining(true); if(myRank == MASTER_RANK) std::cout << "Finished second level of refinement" << endl; +#ifndef NDEBUG if(refinedCells.size() > 0) { std::cout << "Refined cells produced by rank " << myRank << " are: "; for (auto cellid : refinedCells) { std::cout << cellid << " "; } std::cout << endl; - } + } +#endif mpiGrid.balance_load(); } diff --git a/projects/Magnetosphere/Magnetosphere.cpp b/projects/Magnetosphere/Magnetosphere.cpp index ec7ba90e5..9ba4b7d85 100644 --- a/projects/Magnetosphere/Magnetosphere.cpp +++ b/projects/Magnetosphere/Magnetosphere.cpp @@ -515,9 +515,11 @@ namespace projects { } refinedCells = mpiGrid.stop_refining(true); if(myRank == MASTER_RANK) std::cout << "Finished first level of refinement" << endl; +#ifndef NDEBUG if(refinedCells.size() > 0) { std::cout << "Rank " << myRank << " refined " << refinedCells.size() << " cells. " << std::endl; } +#endif mpiGrid.balance_load(); } @@ -550,9 +552,11 @@ namespace projects { } refinedCells = mpiGrid.stop_refining(true); if(myRank == MASTER_RANK) std::cout << "Finished second level of refinement" << endl; +#ifndef NDEBUG if(refinedCells.size() > 0) { std::cout << "Rank " << myRank << " refined " << refinedCells.size() << " cells. " << std::endl; } +#endif mpiGrid.balance_load(); } From 49143d4545de6a24376a86868efe1dbe76f09f3f Mon Sep 17 00:00:00 2001 From: tkoskela Date: Tue, 2 Apr 2019 12:02:15 +0300 Subject: [PATCH 278/602] Bug fix: In setOfPencils.split(), when searching for members of this pencil in other pencils to avoid adding duplicate paths, we were only checking the last step of the path, which allowed false positives on refinement levels >1. Now checking all steps of the path. --- vlasovsolver/cpu_trans_map_amr.cpp | 20 -------------------- vlasovsolver/cpu_trans_map_amr.hpp | 13 +++++++++++-- 2 files changed, 11 insertions(+), 22 deletions(-) diff --git a/vlasovsolver/cpu_trans_map_amr.cpp b/vlasovsolver/cpu_trans_map_amr.cpp index 43b336f23..ea5394dae 100644 --- a/vlasovsolver/cpu_trans_map_amr.cpp +++ b/vlasovsolver/cpu_trans_map_amr.cpp @@ -1576,30 +1576,10 @@ void update_remote_mapping_contribution_amr( Realf *blockData = receive_cell->get_data(popID); Realf *neighborData = origin_cell->neighbor_block_data[receive_origin_index[c]]; - // cout << "Rank " << myRank << ", dim " << dimension << ", dir " << direction; - // cout << ". Neighbor data of remote cell " << receive_origin_cells[c] << " is added to local cell " << receive_cells[c]; - // cout << " with index " << receive_origin_index[c]; - - Realf checksum = 0.0; - //#pragma omp for for(uint vCell = 0; vCell < VELOCITY_BLOCK_LENGTH * receive_cell->get_number_of_velocity_blocks(popID); ++vCell) { blockData[vCell] += neighborData[vCell]; - checksum += neighborData[vCell]; } - - //cout << ". Sum is " << checksum << endl; - - // array allChecksums = {}; - // cout << ". Sums are "; - // for (uint i = 0; i < MAX_NEIGHBORS_PER_DIM; ++i) { - // neighborData = origin_cell->neighbor_block_data[i]; - // for(uint vCell = 0; vCell < VELOCITY_BLOCK_LENGTH * receive_cell->get_number_of_velocity_blocks(popID); ++vCell) { - // allChecksums[i] += neighborData[vCell]; - // } - // cout << allChecksums[i] << " "; - // } - // cout << endl; } // send cell data is set to zero. This is to avoid double copy if diff --git a/vlasovsolver/cpu_trans_map_amr.hpp b/vlasovsolver/cpu_trans_map_amr.hpp index 7bab64d88..6b73522a5 100644 --- a/vlasovsolver/cpu_trans_map_amr.hpp +++ b/vlasovsolver/cpu_trans_map_amr.hpp @@ -115,8 +115,17 @@ struct setOfPencils { std::vector theirPath = this->path.at(theirPencilId); std::vector myPath = this->path.at(myPencilId); if(theirPath.size() > myPath.size()) { - uint theirStep = theirPath.at(myPath.size()); - existingSteps.push_back(theirStep); + bool samePath = true; + for (uint i = 0; i < myPath.size(); ++i) { + if(myPath.at(i) != theirPath.at(i)) { + samePath = false; + } + } + + if(samePath) { + uint theirStep = theirPath.at(myPath.size()); + existingSteps.push_back(theirStep); + } } } } From c1eb0e0ec3f111fa31b9958d31d59245169588d8 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Tue, 2 Apr 2019 13:40:57 +0300 Subject: [PATCH 279/602] Fixed bug in testAmr density setup. --- projects/testAmr/testAmr.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/projects/testAmr/testAmr.cpp b/projects/testAmr/testAmr.cpp index 07cd41cea..98f6481c0 100644 --- a/projects/testAmr/testAmr.cpp +++ b/projects/testAmr/testAmr.cpp @@ -172,8 +172,8 @@ namespace projects { break; case TestCase: rhoFactor = 1.0; - if (x < 0.31 * (P::xmax - P::xmin) && - y < 0.31 * (P::ymax - P::ymin)) { + if (x < P::xmin + 0.31 * (P::xmax - P::xmin) && + y < P::ymin + 0.31 * (P::ymax - P::ymin)) { rhoFactor = 3.0; } break; From 650ddbb4f7f985818bb7647dd9958ad29e575fd5 Mon Sep 17 00:00:00 2001 From: Tuomas Koskela Date: Tue, 2 Apr 2019 15:59:55 +0300 Subject: [PATCH 280/602] Enable load balance --- vlasiator.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vlasiator.cpp b/vlasiator.cpp index 2af5cc8e9..19c888e56 100644 --- a/vlasiator.cpp +++ b/vlasiator.cpp @@ -898,7 +898,7 @@ int main(int argn,char* args[]) { //Re-loadbalance if needed //TODO - add LB measure and do LB if it exceeds threshold #warning Re-loadbalance has been disabled temporarily for amr debugging - if(((P::tstep % P::rebalanceInterval == 0 && P::tstep > P::tstep_min) || overrideRebalanceNow) && false) { + if(((P::tstep % P::rebalanceInterval == 0 && P::tstep > P::tstep_min) || overrideRebalanceNow)) { logFile << "(LB): Start load balance, tstep = " << P::tstep << " t = " << P::t << endl << writeVerbose; balanceLoad(mpiGrid, sysBoundaries); addTimedBarrier("barrier-end-load-balance"); From 60b6560bf91b294dfc381bba6d23db3eb462b819 Mon Sep 17 00:00:00 2001 From: ykempf Date: Wed, 3 Apr 2019 11:51:21 +0300 Subject: [PATCH 281/602] Activate timed barriers temporarily. REMOVE LATER --- vlasiator.cpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/vlasiator.cpp b/vlasiator.cpp index 2af5cc8e9..dab553559 100644 --- a/vlasiator.cpp +++ b/vlasiator.cpp @@ -82,10 +82,10 @@ bool globalflags::balanceLoad = 0; ObjectWrapper objectWrapper; void addTimedBarrier(string name){ -#ifdef NDEBUG -//let's not do a barrier - return; -#endif +//#ifdef NDEBUG +// //let's not do a barrier +// return; +//#endif int bt=phiprof::initializeTimer(name,"Barriers","MPI"); phiprof::start(bt); MPI_Barrier(MPI_COMM_WORLD); From e9e05ad6cfd667493c8753059e5d2bfe72e85ed4 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Fri, 5 Apr 2019 16:42:26 +0300 Subject: [PATCH 282/602] Uncommented update_remote_contribution function in cpu_trans_map.cpp, modified the neighbor block data field accesses to work with the new array structures. Modified vlasovmover.cpp to select between amr and non-amr trans_map_1d - function according to the max refinement level. --- vlasovsolver/cpu_trans_map.cpp | 33 +++++++++++++++++++++------------ vlasovsolver/vlasovmover.cpp | 20 ++++++++++++++++---- 2 files changed, 37 insertions(+), 16 deletions(-) diff --git a/vlasovsolver/cpu_trans_map.cpp b/vlasovsolver/cpu_trans_map.cpp index 19bfb8606..80e59b3eb 100644 --- a/vlasovsolver/cpu_trans_map.cpp +++ b/vlasovsolver/cpu_trans_map.cpp @@ -638,7 +638,6 @@ bool trans_map_1d(const dccrg::Dccrg& mpi \par direction: 1 for + dir, -1 for - dir */ -/* void update_remote_mapping_contribution( dccrg::Dccrg& mpiGrid, const uint dimension, @@ -661,8 +660,14 @@ void update_remote_mapping_contribution( for (size_t c=0; cneighbor_block_data = ccell->get_data(popID); - ccell->neighbor_number_of_blocks = 0; + for (uint i = 0; i < MAX_NEIGHBORS_PER_DIM; ++i) { + if(i == 0) { + ccell->neighbor_block_data.at(i) = ccell->get_data(popID); + } else { + ccell->neighbor_block_data.at(i) = NULL; + } + ccell->neighbor_number_of_blocks.at(i) = 0; + } } //TODO: prepare arrays, make parallel by avoidin push_back and by checking also for other stuff @@ -670,8 +675,14 @@ void update_remote_mapping_contribution( SpatialCell *ccell = mpiGrid[local_cells[c]]; //default values, to avoid any extra sends and receives - ccell->neighbor_block_data = ccell->get_data(popID); - ccell->neighbor_number_of_blocks = 0; + for (uint i = 0; i < MAX_NEIGHBORS_PER_DIM; ++i) { + if(i == 0) { + ccell->neighbor_block_data.at(i) = ccell->get_data(popID); + } else { + ccell->neighbor_block_data.at(i) = NULL; + } + ccell->neighbor_number_of_blocks.at(i) = 0; + } CellID p_ngbr,m_ngbr; // switch (dimension) { // case 0: @@ -728,8 +739,8 @@ void update_remote_mapping_contribution( //mapped to if 1) it is a valid target, //2) is remote cell, 3) if the source cell in center was //translated - ccell->neighbor_block_data = pcell->get_data(popID); - ccell->neighbor_number_of_blocks = pcell->get_number_of_velocity_blocks(popID); + ccell->neighbor_block_data[0] = pcell->get_data(popID); + ccell->neighbor_number_of_blocks[0] = pcell->get_number_of_velocity_blocks(popID); send_cells.push_back(p_ngbr); } if (m_ngbr != INVALID_CELLID && @@ -738,11 +749,11 @@ void update_remote_mapping_contribution( //Receive data that mcell mapped to ccell to this local cell //data array, if 1) m is a valid source cell, 2) center cell is to be updated (normal cell) 3) m is remote //we will here allocate a receive buffer, since we need to aggregate values - mcell->neighbor_number_of_blocks = ccell->get_number_of_velocity_blocks(popID); - mcell->neighbor_block_data = (Realf*) aligned_malloc(mcell->neighbor_number_of_blocks * WID3 * sizeof(Realf), 64); + mcell->neighbor_number_of_blocks[0] = ccell->get_number_of_velocity_blocks(popID); + mcell->neighbor_block_data[0] = (Realf*) aligned_malloc(mcell->neighbor_number_of_blocks[0] * WID3 * sizeof(Realf), 64); receive_cells.push_back(local_cells[c]); - receiveBuffers.push_back(mcell->neighbor_block_data); + receiveBuffers.push_back(mcell->neighbor_block_data[0]); } } @@ -799,5 +810,3 @@ void update_remote_mapping_contribution( } } -*/ - diff --git a/vlasovsolver/vlasovmover.cpp b/vlasovsolver/vlasovmover.cpp index e80a8d2f8..026a8b7d6 100644 --- a/vlasovsolver/vlasovmover.cpp +++ b/vlasovsolver/vlasovmover.cpp @@ -89,7 +89,11 @@ void calculateSpatialTranslation( phiprof::stop(trans_timer); phiprof::start("compute-mapping-z"); - trans_map_1d_amr(mpiGrid,local_propagated_cells, remoteTargetCellsz, 2, dt,popID); // map along z// + if(P::amrMaxSpatialRefLevel = 0) { + trans_map_1d(mpiGrid,local_propagated_cells, remoteTargetCellsz, 2, dt,popID); // map along z// + } else { + trans_map_1d_amr(mpiGrid,local_propagated_cells, remoteTargetCellsz, 2, dt,popID); // map along z// + } phiprof::stop("compute-mapping-z"); trans_timer=phiprof::initializeTimer("update_remote-z","MPI"); @@ -112,7 +116,11 @@ void calculateSpatialTranslation( phiprof::stop(trans_timer); phiprof::start("compute-mapping-x"); - trans_map_1d_amr(mpiGrid,local_propagated_cells, remoteTargetCellsx, 0,dt,popID); // map along x// + if(P::amrMaxSpatialRefLevel = 0) { + trans_map_1d(mpiGrid,local_propagated_cells, remoteTargetCellsx, 0,dt,popID); // map along x// + } else { + trans_map_1d_amr(mpiGrid,local_propagated_cells, remoteTargetCellsx, 0,dt,popID); // map along x// + } phiprof::stop("compute-mapping-x"); trans_timer=phiprof::initializeTimer("update_remote-x","MPI"); @@ -134,8 +142,12 @@ void calculateSpatialTranslation( mpiGrid.update_copies_of_remote_neighbors(VLASOV_SOLVER_Y_NEIGHBORHOOD_ID); phiprof::stop(trans_timer); - phiprof::start("compute-mapping-y"); - trans_map_1d_amr(mpiGrid,local_propagated_cells, remoteTargetCellsy, 1,dt,popID); // map along y// + phiprof::start("compute-mapping-y"); + if(P::amrMaxSpatialRefLevel = 0) { + trans_map_1d(mpiGrid,local_propagated_cells, remoteTargetCellsy, 1,dt,popID); // map along y// + } else { + trans_map_1d_amr(mpiGrid,local_propagated_cells, remoteTargetCellsy, 1,dt,popID); // map along y// + } phiprof::stop("compute-mapping-y"); trans_timer=phiprof::initializeTimer("update_remote-y","MPI"); From b302f1936879fc42329e00e8bd9a8f15c77126f2 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Fri, 5 Apr 2019 17:15:05 +0300 Subject: [PATCH 283/602] Added amr-awareness to update_remote_mapping_contribution calls. --- vlasovsolver/vlasovmover.cpp | 27 +++++++++++++++++++++------ 1 file changed, 21 insertions(+), 6 deletions(-) diff --git a/vlasovsolver/vlasovmover.cpp b/vlasovsolver/vlasovmover.cpp index 026a8b7d6..515c7f06d 100644 --- a/vlasovsolver/vlasovmover.cpp +++ b/vlasovsolver/vlasovmover.cpp @@ -98,8 +98,13 @@ void calculateSpatialTranslation( trans_timer=phiprof::initializeTimer("update_remote-z","MPI"); phiprof::start("update_remote-z"); - update_remote_mapping_contribution_amr(mpiGrid, 2,+1,popID); - update_remote_mapping_contribution_amr(mpiGrid, 2,-1,popID); + if(P::amrMaxSpatialRefLevel = 0) { + update_remote_mapping_contribution(mpiGrid, 2,+1,popID); + update_remote_mapping_contribution(mpiGrid, 2,-1,popID); + } else { + update_remote_mapping_contribution_amr(mpiGrid, 2,+1,popID); + update_remote_mapping_contribution_amr(mpiGrid, 2,-1,popID); + } phiprof::stop("update_remote-z"); } @@ -125,8 +130,13 @@ void calculateSpatialTranslation( trans_timer=phiprof::initializeTimer("update_remote-x","MPI"); phiprof::start("update_remote-x"); - update_remote_mapping_contribution_amr(mpiGrid, 0,+1,popID); - update_remote_mapping_contribution_amr(mpiGrid, 0,-1,popID); + if(P::amrMaxSpatialRefLevel = 0) { + update_remote_mapping_contribution(mpiGrid, 0,+1,popID); + update_remote_mapping_contribution(mpiGrid, 0,-1,popID); + } else { + update_remote_mapping_contribution_amr(mpiGrid, 0,+1,popID); + update_remote_mapping_contribution_amr(mpiGrid, 0,-1,popID); + } phiprof::stop("update_remote-x"); } @@ -152,8 +162,13 @@ void calculateSpatialTranslation( trans_timer=phiprof::initializeTimer("update_remote-y","MPI"); phiprof::start("update_remote-y"); - update_remote_mapping_contribution_amr(mpiGrid, 1,+1,popID); - update_remote_mapping_contribution_amr(mpiGrid, 1,-1,popID); + if(P::amrMaxSpatialRefLevel = 0) { + update_remote_mapping_contribution(mpiGrid, 1,+1,popID); + update_remote_mapping_contribution(mpiGrid, 1,-1,popID); + } else { + update_remote_mapping_contribution_amr(mpiGrid, 1,+1,popID); + update_remote_mapping_contribution_amr(mpiGrid, 1,-1,popID); + } phiprof::stop("update_remote-y"); } From 9936923d514abc559c81f8ee91b429061e2e0d7e Mon Sep 17 00:00:00 2001 From: tkoskela Date: Fri, 5 Apr 2019 17:17:35 +0300 Subject: [PATCH 284/602] Uncommented update_remote_neighbor_contribution in header of cpu_trans_map. Fixed logical conditions in if statements. --- vlasovsolver/cpu_trans_map.hpp | 10 +++++----- vlasovsolver/vlasovmover.cpp | 12 ++++++------ 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/vlasovsolver/cpu_trans_map.hpp b/vlasovsolver/cpu_trans_map.hpp index 7ecefcd96..3f993cf7b 100644 --- a/vlasovsolver/cpu_trans_map.hpp +++ b/vlasovsolver/cpu_trans_map.hpp @@ -51,11 +51,11 @@ bool trans_map_1d(const dccrg::Dccrg& mpiGrid, -// const uint dimension, -// int direction, -// const uint popID); +void update_remote_mapping_contribution(dccrg::Dccrg& mpiGrid, + const uint dimension, + int direction, + const uint popID); void compute_spatial_source_neighbors(const dccrg::Dccrg& mpiGrid, diff --git a/vlasovsolver/vlasovmover.cpp b/vlasovsolver/vlasovmover.cpp index 515c7f06d..c17cc3184 100644 --- a/vlasovsolver/vlasovmover.cpp +++ b/vlasovsolver/vlasovmover.cpp @@ -89,7 +89,7 @@ void calculateSpatialTranslation( phiprof::stop(trans_timer); phiprof::start("compute-mapping-z"); - if(P::amrMaxSpatialRefLevel = 0) { + if(P::amrMaxSpatialRefLevel == 0) { trans_map_1d(mpiGrid,local_propagated_cells, remoteTargetCellsz, 2, dt,popID); // map along z// } else { trans_map_1d_amr(mpiGrid,local_propagated_cells, remoteTargetCellsz, 2, dt,popID); // map along z// @@ -98,7 +98,7 @@ void calculateSpatialTranslation( trans_timer=phiprof::initializeTimer("update_remote-z","MPI"); phiprof::start("update_remote-z"); - if(P::amrMaxSpatialRefLevel = 0) { + if(P::amrMaxSpatialRefLevel == 0) { update_remote_mapping_contribution(mpiGrid, 2,+1,popID); update_remote_mapping_contribution(mpiGrid, 2,-1,popID); } else { @@ -121,7 +121,7 @@ void calculateSpatialTranslation( phiprof::stop(trans_timer); phiprof::start("compute-mapping-x"); - if(P::amrMaxSpatialRefLevel = 0) { + if(P::amrMaxSpatialRefLevel == 0) { trans_map_1d(mpiGrid,local_propagated_cells, remoteTargetCellsx, 0,dt,popID); // map along x// } else { trans_map_1d_amr(mpiGrid,local_propagated_cells, remoteTargetCellsx, 0,dt,popID); // map along x// @@ -130,7 +130,7 @@ void calculateSpatialTranslation( trans_timer=phiprof::initializeTimer("update_remote-x","MPI"); phiprof::start("update_remote-x"); - if(P::amrMaxSpatialRefLevel = 0) { + if(P::amrMaxSpatialRefLevel == 0) { update_remote_mapping_contribution(mpiGrid, 0,+1,popID); update_remote_mapping_contribution(mpiGrid, 0,-1,popID); } else { @@ -153,7 +153,7 @@ void calculateSpatialTranslation( phiprof::stop(trans_timer); phiprof::start("compute-mapping-y"); - if(P::amrMaxSpatialRefLevel = 0) { + if(P::amrMaxSpatialRefLevel == 0) { trans_map_1d(mpiGrid,local_propagated_cells, remoteTargetCellsy, 1,dt,popID); // map along y// } else { trans_map_1d_amr(mpiGrid,local_propagated_cells, remoteTargetCellsy, 1,dt,popID); // map along y// @@ -162,7 +162,7 @@ void calculateSpatialTranslation( trans_timer=phiprof::initializeTimer("update_remote-y","MPI"); phiprof::start("update_remote-y"); - if(P::amrMaxSpatialRefLevel = 0) { + if(P::amrMaxSpatialRefLevel == 0) { update_remote_mapping_contribution(mpiGrid, 1,+1,popID); update_remote_mapping_contribution(mpiGrid, 1,-1,popID); } else { From e3f0e08f03fe2e732a93dc17c73cfcc89462c7c7 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Mon, 8 Apr 2019 11:29:57 +0300 Subject: [PATCH 285/602] Setting m_ngbr and p_ngbr correctly depending on direction --- vlasovsolver/cpu_trans_map.cpp | 64 ++++++++++++---------------------- 1 file changed, 23 insertions(+), 41 deletions(-) diff --git a/vlasovsolver/cpu_trans_map.cpp b/vlasovsolver/cpu_trans_map.cpp index 80e59b3eb..c38b222db 100644 --- a/vlasovsolver/cpu_trans_map.cpp +++ b/vlasovsolver/cpu_trans_map.cpp @@ -653,6 +653,10 @@ void update_remote_mapping_contribution( int myRank; MPI_Comm_rank(MPI_COMM_WORLD,&myRank); + + // MPI_Barrier(MPI_COMM_WORLD); + // cout << "begin update_remote_mapping_contribution, dimension = " << dimension << ", direction = " << direction << endl; + // MPI_Barrier(MPI_COMM_WORLD); //normalize if(direction > 0) direction = 1; @@ -683,48 +687,21 @@ void update_remote_mapping_contribution( } ccell->neighbor_number_of_blocks.at(i) = 0; } - CellID p_ngbr,m_ngbr; - // switch (dimension) { - // case 0: - // //p_ngbr is target, if in boundaries then it is not updated - // p_ngbr=get_spatial_neighbor(mpiGrid, local_cells[c], false, direction, 0, 0); - // //m_ngbr is source, first boundary layer is propagated so that it flows into system - // m_ngbr=get_spatial_neighbor(mpiGrid, local_cells[c], true, -direction, 0, 0); - // break; - // case 1: - // //p_ngbr is target, if in boundaries then it is not update - // p_ngbr=get_spatial_neighbor(mpiGrid, local_cells[c], false, 0, direction, 0); - // //m_ngbr is source, first boundary layer is propagated so that it flows into system - // m_ngbr=get_spatial_neighbor(mpiGrid, local_cells[c], true, 0, -direction, 0); - // break; - // case 2: - // //p_ngbr is target, if in boundaries then it is not update - // p_ngbr=get_spatial_neighbor(mpiGrid, local_cells[c], false, 0, 0, direction); - // //m_ngbr is source, first boundary layer is propagated so that it flows into system - // m_ngbr=get_spatial_neighbor(mpiGrid, local_cells[c], true, 0, 0, -direction); - // break; - // default: - // cerr << "Dimension wrong at (impossible!) "<< __FILE__ <<":" << __LINE__<front().first; - p_ngbr = NbrPairVector->back().first; + CellID p_ngbr = INVALID_CELLID; + CellID m_ngbr = INVALID_CELLID; + const auto faceNbrs = mpiGrid.get_face_neighbors_of(local_cells[c]); + + for (const auto nbr : faceNbrs) { + if(nbr.second == ((int)dimension + 1) * direction) { + p_ngbr = nbr.first; + } + + if(nbr.second == -1 * ((int)dimension + 1) * direction) { + m_ngbr = nbr.first; + } + } + //internal cell, not much to do if (mpiGrid.is_local(p_ngbr) && mpiGrid.is_local(m_ngbr)) continue; @@ -808,5 +785,10 @@ void update_remote_mapping_contribution( for (size_t c=0; c < receiveBuffers.size(); ++c) { aligned_free(receiveBuffers[c]); } + + // MPI_Barrier(MPI_COMM_WORLD); + // cout << "end update_remote_mapping_contribution, dimension = " << dimension << ", direction = " << direction << endl; + // MPI_Barrier(MPI_COMM_WORLD); + } From 630d8515bcf0dabafb99e2cdc28d28956b101aaa Mon Sep 17 00:00:00 2001 From: Tuomas Koskela Date: Mon, 8 Apr 2019 13:01:35 +0300 Subject: [PATCH 286/602] Populating the face_neighbor_ranks data set only when refinement level > 0 --- grid.cpp | 4 +++- spatial_cell.cpp | 2 +- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/grid.cpp b/grid.cpp index 6f0a0e576..a742d8ab3 100644 --- a/grid.cpp +++ b/grid.cpp @@ -138,7 +138,9 @@ void initializeGrid( if (myRank == MASTER_RANK) logFile << "(INIT): Starting initial load balance." << endl << writeVerbose; mpiGrid.balance_load(); recalculateLocalCellsCache(); - setFaceNeighborRanks( mpiGrid ); + if(P::amrMaxSpatialRefLevel > 0) { + setFaceNeighborRanks( mpiGrid ); + } phiprof::stop("Initial load-balancing"); if (myRank == MASTER_RANK) logFile << "(INIT): Set initial state." << endl << writeVerbose; diff --git a/spatial_cell.cpp b/spatial_cell.cpp index 0a3fcc2b3..65d3b95a9 100644 --- a/spatial_cell.cpp +++ b/spatial_cell.cpp @@ -654,7 +654,7 @@ namespace spatial_cell { // Send this data only to ranks that contain face neighbors // this->neighbor_number_of_blocks has been initialized to 0, on other ranks it can stay that way. const set& ranks = this->face_neighbor_ranks[neighborhood]; - if ( receiving || ranks.find(receiver_rank) != ranks.end()) { + if ( P::amrMaxSpatialRefLevel == 0 || receiving || ranks.find(receiver_rank) != ranks.end()) { for ( int i = 0; i < MAX_NEIGHBORS_PER_DIM; ++i) { displacements.push_back((uint8_t*) this->neighbor_block_data[i] - (uint8_t*) this); From 7eb2034310a10c83120095f01783a7706c9dd4ec Mon Sep 17 00:00:00 2001 From: Urs Ganse Date: Mon, 8 Apr 2019 12:33:24 +0300 Subject: [PATCH 287/602] Update fsgrid coupling calls to share coupling information. --- vlasiator.cpp | 112 +++++++++++--------------------------------------- 1 file changed, 23 insertions(+), 89 deletions(-) diff --git a/vlasiator.cpp b/vlasiator.cpp index 19c888e56..72a8e8980 100644 --- a/vlasiator.cpp +++ b/vlasiator.cpp @@ -399,22 +399,24 @@ int main(int argn,char* args[]) { convert(P::zcells_ini) * pow(2,P::amrMaxSpatialRefLevel)}; std::array periodicity{mpiGrid.topology.is_periodic(0), - mpiGrid.topology.is_periodic(1), - mpiGrid.topology.is_periodic(2)}; + mpiGrid.topology.is_periodic(1), + mpiGrid.topology.is_periodic(2)}; - FsGrid< std::array, 2> perBGrid(fsGridDimensions, comm, periodicity); - FsGrid< std::array, 2> perBDt2Grid(fsGridDimensions, comm, periodicity); - FsGrid< std::array, 2> EGrid(fsGridDimensions, comm, periodicity); - FsGrid< std::array, 2> EDt2Grid(fsGridDimensions, comm, periodicity); - FsGrid< std::array, 2> EHallGrid(fsGridDimensions, comm, periodicity); - FsGrid< std::array, 2> EGradPeGrid(fsGridDimensions, comm, periodicity); - FsGrid< std::array, 2> momentsGrid(fsGridDimensions, comm, periodicity); - FsGrid< std::array, 2> momentsDt2Grid(fsGridDimensions, comm, periodicity); - FsGrid< std::array, 2> dPerBGrid(fsGridDimensions, comm, periodicity); - FsGrid< std::array, 2> dMomentsGrid(fsGridDimensions, comm, periodicity); - FsGrid< std::array, 2> BgBGrid(fsGridDimensions, comm, periodicity); - FsGrid< std::array, 2> volGrid(fsGridDimensions, comm, periodicity); - FsGrid< fsgrids::technical, 2> technicalGrid(fsGridDimensions, comm, periodicity); + FsGridCouplingInformation gridCoupling; + FsGrid< std::array, 2> perBGrid(fsGridDimensions, comm, periodicity,gridCoupling); + FsGrid< std::array, 2> perBDt2Grid(fsGridDimensions, comm, periodicity,gridCoupling); + FsGrid< std::array, 2> EGrid(fsGridDimensions, comm, periodicity,gridCoupling); + FsGrid< std::array, 2> EDt2Grid(fsGridDimensions, comm, periodicity,gridCoupling); + FsGrid< std::array, 2> EHallGrid(fsGridDimensions, comm, periodicity,gridCoupling); + FsGrid< std::array, 2> EGradPeGrid(fsGridDimensions, comm, periodicity,gridCoupling); + FsGrid< std::array, 2> momentsGrid(fsGridDimensions, comm, periodicity,gridCoupling); + FsGrid< std::array, 2> momentsDt2Grid(fsGridDimensions, comm, periodicity,gridCoupling); + FsGrid< std::array, 2> dPerBGrid(fsGridDimensions, comm, periodicity,gridCoupling); + FsGrid< std::array, 2> dMomentsGrid(fsGridDimensions, comm, periodicity,gridCoupling); + FsGrid< std::array, 2> BgBGrid(fsGridDimensions, comm, periodicity,gridCoupling); + FsGrid< std::array, 2> volGrid(fsGridDimensions, comm, periodicity,gridCoupling); + FsGrid< fsgrids::technical, 2> technicalGrid(fsGridDimensions, comm, periodicity,gridCoupling); + // Set DX,DY and DZ // TODO: This is currently just taking the values from cell 1, and assuming them to be // constant throughout the simulation. @@ -431,19 +433,10 @@ int main(int argn,char* args[]) { phiprof::start("Initial fsgrid coupling"); const std::vector& cells = getLocalCells(); - perBGrid. setupForGridCoupling(); - perBDt2Grid. setupForGridCoupling(); - EGrid. setupForGridCoupling(); - EDt2Grid. setupForGridCoupling(); - EHallGrid. setupForGridCoupling(); - EGradPeGrid. setupForGridCoupling(); - momentsGrid. setupForGridCoupling(); - momentsDt2Grid.setupForGridCoupling(); - dPerBGrid. setupForGridCoupling(); - dMomentsGrid. setupForGridCoupling(); - BgBGrid. setupForGridCoupling(); - volGrid. setupForGridCoupling(); - technicalGrid. setupForGridCoupling(); + // Couple FSGrids to mpiGrid. Note that the coupling information is shared + // between them. + technicalGrid.setupForGridCoupling(cells.size()); + // Each dccrg cell may have to communicate with multiple fsgrid cells, if they are on a lower refinement level. // Calculate the corresponding fsgrid ids for each dccrg cell and set coupling for each fsgrid id. @@ -452,35 +445,12 @@ int main(int argn,char* args[]) { for (auto fsgridId : fsgridIds) { - perBGrid. setGridCoupling(fsgridId, myRank); - perBDt2Grid. setGridCoupling(fsgridId, myRank); - EGrid. setGridCoupling(fsgridId, myRank); - EDt2Grid. setGridCoupling(fsgridId, myRank); - EHallGrid. setGridCoupling(fsgridId, myRank); - EGradPeGrid. setGridCoupling(fsgridId, myRank); - momentsGrid. setGridCoupling(fsgridId, myRank); - momentsDt2Grid.setGridCoupling(fsgridId, myRank); - dPerBGrid. setGridCoupling(fsgridId, myRank); - dMomentsGrid. setGridCoupling(fsgridId, myRank); - BgBGrid. setGridCoupling(fsgridId, myRank); - volGrid. setGridCoupling(fsgridId, myRank); technicalGrid. setGridCoupling(fsgridId, myRank); } } - perBGrid. finishGridCoupling(); - perBDt2Grid. finishGridCoupling(); - EGrid. finishGridCoupling(); - EDt2Grid. finishGridCoupling(); - EHallGrid. finishGridCoupling(); - EGradPeGrid. finishGridCoupling(); - momentsGrid. finishGridCoupling(); - momentsDt2Grid.finishGridCoupling(); - dPerBGrid. finishGridCoupling(); - dMomentsGrid. finishGridCoupling(); - BgBGrid. finishGridCoupling(); - volGrid. finishGridCoupling(); technicalGrid. finishGridCoupling(); + phiprof::stop("Initial fsgrid coupling"); // Transfer initial field configuration into the FsGrids @@ -918,19 +888,7 @@ int main(int argn,char* args[]) { // for(auto id : cells) cout << id << " "; // cout << endl; - perBGrid. setupForGridCoupling(); - perBDt2Grid. setupForGridCoupling(); - EGrid. setupForGridCoupling(); - EDt2Grid. setupForGridCoupling(); - EHallGrid. setupForGridCoupling(); - EGradPeGrid. setupForGridCoupling(); - momentsGrid. setupForGridCoupling(); - momentsDt2Grid.setupForGridCoupling(); - dPerBGrid. setupForGridCoupling(); - dMomentsGrid. setupForGridCoupling(); - BgBGrid. setupForGridCoupling(); - volGrid. setupForGridCoupling(); - technicalGrid. setupForGridCoupling(); + technicalGrid. setupForGridCoupling(cells.size()); // Each dccrg cell may have to communicate with multiple fsgrid cells, if they are on a lower refinement level. // Calculate the corresponding fsgrid ids for each dccrg cell and set coupling for each fsgrid id. @@ -938,35 +896,11 @@ int main(int argn,char* args[]) { const auto fsgridIds = mapDccrgIdToFsGridGlobalID(mpiGrid, dccrgId); for (auto& fsgridId : fsgridIds) { - perBGrid. setGridCoupling(fsgridId, myRank); - perBDt2Grid. setGridCoupling(fsgridId, myRank); - EGrid. setGridCoupling(fsgridId, myRank); - EDt2Grid. setGridCoupling(fsgridId, myRank); - EHallGrid. setGridCoupling(fsgridId, myRank); - EGradPeGrid. setGridCoupling(fsgridId, myRank); - momentsGrid. setGridCoupling(fsgridId, myRank); - momentsDt2Grid.setGridCoupling(fsgridId, myRank); - dPerBGrid. setGridCoupling(fsgridId, myRank); - dMomentsGrid. setGridCoupling(fsgridId, myRank); - BgBGrid. setGridCoupling(fsgridId, myRank); - volGrid. setGridCoupling(fsgridId, myRank); technicalGrid. setGridCoupling(fsgridId, myRank); } } // cout << endl; - perBGrid. finishGridCoupling(); - perBDt2Grid. finishGridCoupling(); - EGrid. finishGridCoupling(); - EDt2Grid. finishGridCoupling(); - EHallGrid. finishGridCoupling(); - EGradPeGrid. finishGridCoupling(); - momentsGrid. finishGridCoupling(); - momentsDt2Grid.finishGridCoupling(); - dPerBGrid. finishGridCoupling(); - dMomentsGrid. finishGridCoupling(); - BgBGrid. finishGridCoupling(); - volGrid. finishGridCoupling(); technicalGrid. finishGridCoupling(); phiprof::stop("fsgrid-recouple-after-lb"); From 6797df0965cf0a73c119ac5dc9417d03eaa27dbc Mon Sep 17 00:00:00 2001 From: ykempf Date: Wed, 10 Apr 2019 11:19:16 +0300 Subject: [PATCH 288/602] Threading of setupTechnicalFsGrid. --- fieldsolver/gridGlue.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/fieldsolver/gridGlue.cpp b/fieldsolver/gridGlue.cpp index 2ff2ba607..c08b85521 100644 --- a/fieldsolver/gridGlue.cpp +++ b/fieldsolver/gridGlue.cpp @@ -439,6 +439,7 @@ void setupTechnicalFsGrid(dccrg::Dccrg& m for(uint layer = 1; layer <= MAX_NUMBER_OF_BOUNDARY_LAYERS; ++layer) { // loop through all cells in grid +#pragma omp parallel for collapse(3) for (int x = 0; x < localSize[0]; ++x) { for (int y = 0; y < localSize[1]; ++y) { for (int z = 0; z < localSize[2]; ++z) { From 9253fc43926d2943191bfade18259adb5516ce54 Mon Sep 17 00:00:00 2001 From: ykempf Date: Wed, 10 Apr 2019 11:37:03 +0300 Subject: [PATCH 289/602] More threading in setupTechnicalFsGrid. --- fieldsolver/gridGlue.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/fieldsolver/gridGlue.cpp b/fieldsolver/gridGlue.cpp index c08b85521..1cf3c3126 100644 --- a/fieldsolver/gridGlue.cpp +++ b/fieldsolver/gridGlue.cpp @@ -424,6 +424,7 @@ void setupTechnicalFsGrid(dccrg::Dccrg& m // Add layer calculation here. Include diagonals +-1. // Initialize boundary layer flags to 0. +#pragma omp parallel for collapse(3) for (int x = 0; x < localSize[0]; ++x) { for (int y = 0; y < localSize[1]; ++y) { for (int z = 0; z < localSize[2]; ++z) { From 8b69df83dc1c1edca7ab23ecaad9934b61a6729b Mon Sep 17 00:00:00 2001 From: tkoskela Date: Wed, 10 Apr 2019 14:06:05 +0300 Subject: [PATCH 290/602] Don't need this anymore --- projects/testAmr/nullPointersOnSisu.cfg | 95 ------------------------- 1 file changed, 95 deletions(-) delete mode 100644 projects/testAmr/nullPointersOnSisu.cfg diff --git a/projects/testAmr/nullPointersOnSisu.cfg b/projects/testAmr/nullPointersOnSisu.cfg deleted file mode 100644 index 1a4aa0e63..000000000 --- a/projects/testAmr/nullPointersOnSisu.cfg +++ /dev/null @@ -1,95 +0,0 @@ -dynamic_timestep = 1 -project = testAmr -ParticlePopulations = proton -propagate_field = 0 -propagate_vlasov_acceleration = 0 -propagate_vlasov_translation = 1 - -[proton_properties] -mass = 1 -mass_units = PROTON -charge = 1 - -[io] -diagnostic_write_interval = -1 -write_initial_state = 0 - -system_write_t_interval = -0.01 -system_write_file_name = fullf -system_write_distribution_stride = 0 -system_write_distribution_xline_stride = 0 -system_write_distribution_yline_stride = 0 -system_write_distribution_zline_stride = 0 - - -[gridbuilder] -x_length = 15 -y_length = 15 -z_length = 10 -x_min = 0.0 -x_max = 1.0e6 -y_min = 0.0 -y_max = 1.0e6 -z_min = 0.0 -z_max = 1.0e6 -timestep_max = 100 - -[proton_vspace] -vx_min = -2.0e6 -vx_max = +2.0e6 -vy_min = -2.0e6 -vy_max = +2.0e6 -vz_min = -2.0e6 -vz_max = +2.0e6 -vx_length = 1 -vy_length = 1 -vz_length = 1 -max_refinement_level = 1 -[proton_sparse] -minValue = 1.0e-16 - -[boundaries] -periodic_x = yes -periodic_y = yes -periodic_z = yes - -[variables] -output = populations_Rho -output = B -output = Pressure -output = populations_V -output = E -output = MPIrank -output = populations_Blocks -#output = VelocitySubSteps - -diagnostic = populations_Blocks -#diagnostic = Pressure -#diagnostic = populations_Rho -#diagnostic = populations_RhoLossAdjust -#diagnostic = populations_RhoLossVelBoundary - -[testAmr] -#magnitude of 1.82206867e-10 gives a period of 360s, useful for testing... -Bx = 1.2e-10 -By = 0.8e-10 -Bz = 1.1135233442526334e-10 -magXPertAbsAmp = 0 -magYPertAbsAmp = 0 -magZPertAbsAmp = 0 -densityModel = testcase -nVelocitySamples = 3 - -[proton_testAmr] -n = 1 -Vx = 5e5 -Vy = 5e5 -Vz = 0.0 -Tx = 500000.0 -Ty = 500000.0 -Tz = 500000.0 -rho = 1.0e6 -rhoPertAbsAmp = 0.0 - -[loadBalance] -algorithm = RCB \ No newline at end of file From 01a784e250af9256714b081aa41ae27b65ac0e38 Mon Sep 17 00:00:00 2001 From: ykempf Date: Wed, 10 Apr 2019 16:04:23 +0300 Subject: [PATCH 291/602] Background filed initialised on fsgrid. NOT DONE: copying from fsgrid into mpiGrid instead of the reverse. Compiles at least. --- MAKE/Makefile.sisu_gcc | 2 +- Makefile | 74 +++--- backgroundfield/backgroundfield.cpp | 169 ++++++------- backgroundfield/backgroundfield.h | 11 +- grid.cpp | 11 +- poisson_solver/poisson_solver.cpp | 8 +- projects/Diffusion/Diffusion.cpp | 7 +- projects/Diffusion/Diffusion.h | 5 +- projects/Dispersion/Dispersion.cpp | 7 +- projects/Dispersion/Dispersion.h | 5 +- projects/Distributions/Distributions.cpp | 7 +- projects/Distributions/Distributions.h | 5 +- projects/ElectricSail/electric_sail.cpp | 163 +++++++------ projects/ElectricSail/electric_sail.h | 5 +- projects/Flowthrough/Flowthrough.cpp | 7 +- projects/Flowthrough/Flowthrough.h | 5 +- projects/Fluctuations/Fluctuations.cpp | 7 +- projects/Fluctuations/Fluctuations.h | 5 +- projects/Harris/Harris.cpp | 7 +- projects/Harris/Harris.h | 5 +- projects/IPShock/IPShock.cpp | 7 +- projects/IPShock/IPShock.h | 5 +- projects/Larmor/Larmor.cpp | 7 +- projects/Larmor/Larmor.h | 5 +- projects/Magnetosphere/Magnetosphere.cpp | 225 ++++++++++-------- projects/Magnetosphere/Magnetosphere.h | 5 +- projects/MultiPeak/MultiPeak.cpp | 7 +- projects/MultiPeak/MultiPeak.h | 5 +- projects/Poisson/poisson_test.cpp | 5 +- projects/Poisson/poisson_test.h | 5 +- projects/Shocktest/Shocktest.cpp | 8 +- projects/Shocktest/Shocktest.h | 5 +- projects/Template/Template.cpp | 11 +- projects/Template/Template.h | 5 +- projects/VelocityBox/VelocityBox.cpp | 9 +- projects/VelocityBox/VelocityBox.h | 5 +- projects/project.cpp | 5 +- projects/project.h | 13 +- projects/testAmr/testAmr.cpp | 7 +- projects/testAmr/testAmr.h | 5 +- projects/testHall/testHall.cpp | 2 +- projects/testHall/testHall.h | 2 +- projects/test_fp/test_fp.cpp | 7 +- projects/test_fp/test_fp.h | 5 +- projects/test_trans/test_trans.cpp | 7 +- projects/test_trans/test_trans.h | 5 +- .../verificationLarmor/verificationLarmor.cpp | 7 +- .../verificationLarmor/verificationLarmor.h | 5 +- vlasiator.cpp | 53 +++-- 49 files changed, 555 insertions(+), 402 deletions(-) diff --git a/MAKE/Makefile.sisu_gcc b/MAKE/Makefile.sisu_gcc index ff5dab612..1fe1d172e 100644 --- a/MAKE/Makefile.sisu_gcc +++ b/MAKE/Makefile.sisu_gcc @@ -85,6 +85,6 @@ INC_EIGEN = -I$(LIBRARY_PREFIX)/eigen/ INC_DCCRG = -I$(LIBRARY_PREFIX)/dccrg_new_neighbours/ INC_VECTORCLASS = -I$(LIBRARY_PREFIX)/vectorclass #INC_FSGRID = -I$(LIBRARY_PREFIX)/fsgrid -INC_FSGRID = -I/homeappl/home/koskelat/lib/fsgrid/ +INC_FSGRID = -I/homeappl/home/kempf/fsgrid/ INC_DCCRG = -I/homeappl/home/koskelat/lib/dccrg/ diff --git a/Makefile b/Makefile index 7747028f8..72b1fe278 100644 --- a/Makefile +++ b/Makefile @@ -240,7 +240,7 @@ version.o: version.cpp ${CMP} ${CXXFLAGS} ${FLAGS} -c version.cpp amr_refinement_criteria.o: ${DEPS_COMMON} velocity_blocks.h amr_refinement_criteria.h amr_refinement_criteria.cpp object_factory.h - ${CMP} ${CXXFLAGS} ${FLAGS} ${MATHFLAGS} -c amr_refinement_criteria.cpp ${INC_DCCRG} ${INC_ZOLTAN} ${INC_BOOST} + ${CMP} ${CXXFLAGS} ${FLAGS} ${MATHFLAGS} -c amr_refinement_criteria.cpp ${INC_DCCRG} ${INC_ZOLTAN} ${INC_BOOST} ${INC_FSGRID} memoryallocation.o: memoryallocation.cpp ${CMP} ${CXXFLAGS} ${FLAGS} -c memoryallocation.cpp ${INC_PAPI} @@ -258,16 +258,16 @@ quadr.o: backgroundfield/quadr.cpp backgroundfield/quadr.hpp ${CMP} ${CXXFLAGS} ${FLAGS} -c backgroundfield/quadr.cpp backgroundfield.o: ${DEPS_COMMON} backgroundfield/backgroundfield.cpp backgroundfield/backgroundfield.h backgroundfield/fieldfunction.hpp backgroundfield/functions.hpp backgroundfield/integratefunction.hpp - ${CMP} ${CXXFLAGS} ${FLAGS} -c backgroundfield/backgroundfield.cpp ${INC_DCCRG} ${INC_ZOLTAN} + ${CMP} ${CXXFLAGS} ${FLAGS} -c backgroundfield/backgroundfield.cpp ${INC_DCCRG} ${INC_ZOLTAN} ${INC_FSGRID} integratefunction.o: ${DEPS_COMMON} backgroundfield/integratefunction.cpp backgroundfield/integratefunction.hpp backgroundfield/functions.hpp backgroundfield/quadr.cpp backgroundfield/quadr.hpp - ${CMP} ${CXXFLAGS} ${FLAGS} -c backgroundfield/integratefunction.cpp + ${CMP} ${CXXFLAGS} ${FLAGS} -c backgroundfield/integratefunction.cpp datareducer.o: ${DEPS_COMMON} spatial_cell.hpp datareduction/datareducer.h datareduction/datareductionoperator.h datareduction/datareducer.cpp - ${CMP} ${CXXFLAGS} ${FLAGS} ${MATHFLAGS} -c datareduction/datareducer.cpp ${INC_DCCRG} ${INC_ZOLTAN} ${INC_MPI} ${INC_BOOST} ${INC_EIGEN} ${INC_VLSV} + ${CMP} ${CXXFLAGS} ${FLAGS} ${MATHFLAGS} -c datareduction/datareducer.cpp ${INC_DCCRG} ${INC_ZOLTAN} ${INC_MPI} ${INC_BOOST} ${INC_EIGEN} ${INC_VLSV} ${INC_FSGRID} datareductionoperator.o: ${DEPS_COMMON} ${DEPS_CELL} parameters.h datareduction/datareductionoperator.h datareduction/datareductionoperator.cpp - ${CMP} ${CXXFLAGS} ${FLAGS} ${MATHFLAGS} -c datareduction/datareductionoperator.cpp ${INC_DCCRG} ${INC_ZOLTAN} ${INC_MPI} ${INC_BOOST} ${INC_EIGEN} ${INC_VLSV} + ${CMP} ${CXXFLAGS} ${FLAGS} ${MATHFLAGS} -c datareduction/datareductionoperator.cpp ${INC_DCCRG} ${INC_ZOLTAN} ${INC_MPI} ${INC_BOOST} ${INC_EIGEN} ${INC_VLSV} ${INC_FSGRID} dro_populations.o: ${DEPS_COMMON} ${DEPS_CELL} parameters.h datareduction/datareductionoperator.h datareduction/datareductionoperator.cpp datareduction/dro_populations.h datareduction/dro_populations.cpp ${CMP} ${CXXFLAGS} ${FLAGS} ${MATHFLAGS} -c datareduction/dro_populations.cpp ${INC_DCCRG} ${INC_ZOLTAN} ${INC_MPI} ${INC_BOOST} ${INC_EIGEN} ${INC_VLSV} @@ -283,7 +283,7 @@ ionosphere.o: ${DEPS_SYSBOUND} sysboundary/ionosphere.h sysboundary/ionosphere.c mesh_data_container.o: ${DEPS_COMMON} mesh_data_container.h mesh_data.h - ${CMP} ${CXXFLAGS} ${FLAGS} ${MATHFLAGS} -c mesh_data_container.cpp ${INC_VLSV} ${INC_DCCRG} ${INC_ZOLTAN} ${INC_BOOST} + ${CMP} ${CXXFLAGS} ${FLAGS} ${MATHFLAGS} -c mesh_data_container.cpp ${INC_VLSV} ${INC_DCCRG} ${INC_ZOLTAN} ${INC_BOOST} ${INC_FSGRID} project_boundary.o: ${DEPS_SYSBOUND} sysboundary/project_boundary.h sysboundary/project_boundary.cpp ${CMP} ${CXXFLAGS} ${FLAGS} ${MATHFLAGS} -c sysboundary/project_boundary.cpp ${INC_DCCRG} ${INC_FSGRID} ${INC_ZOLTAN} ${INC_BOOST} ${INC_EIGEN} @@ -308,82 +308,82 @@ read_gaussian_population.o: definitions.h readparameters.h projects/read_gaussia ${CMP} ${CXXFLAGS} ${FLAGS} -c projects/read_gaussian_population.cpp Alfven.o: ${DEPS_COMMON} projects/Alfven/Alfven.h projects/Alfven/Alfven.cpp - ${CMP} ${CXXFLAGS} ${FLAGS} ${MATHFLAGS} -c projects/Alfven/Alfven.cpp ${INC_DCCRG} ${INC_ZOLTAN} ${INC_BOOST} ${INC_EIGEN} + ${CMP} ${CXXFLAGS} ${FLAGS} ${MATHFLAGS} -c projects/Alfven/Alfven.cpp ${INC_DCCRG} ${INC_ZOLTAN} ${INC_BOOST} ${INC_EIGEN} ${INC_FSGRID} Diffusion.o: ${DEPS_COMMON} projects/Diffusion/Diffusion.h projects/Diffusion/Diffusion.cpp - ${CMP} ${CXXFLAGS} ${FLAGS} ${MATHFLAGS} -c projects/Diffusion/Diffusion.cpp ${INC_DCCRG} ${INC_ZOLTAN} ${INC_BOOST} ${INC_EIGEN} + ${CMP} ${CXXFLAGS} ${FLAGS} ${MATHFLAGS} -c projects/Diffusion/Diffusion.cpp ${INC_DCCRG} ${INC_ZOLTAN} ${INC_BOOST} ${INC_EIGEN} ${INC_FSGRID} Dispersion.o: ${DEPS_COMMON} projects/Dispersion/Dispersion.h projects/Dispersion/Dispersion.cpp - ${CMP} ${CXXFLAGS} ${FLAGS} ${MATHFLAGS} -c projects/Dispersion/Dispersion.cpp ${INC_DCCRG} ${INC_ZOLTAN} ${INC_BOOST} ${INC_EIGEN} + ${CMP} ${CXXFLAGS} ${FLAGS} ${MATHFLAGS} -c projects/Dispersion/Dispersion.cpp ${INC_DCCRG} ${INC_ZOLTAN} ${INC_BOOST} ${INC_EIGEN} ${INC_FSGRID} Distributions.o: ${DEPS_COMMON} projects/Distributions/Distributions.h projects/Distributions/Distributions.cpp - ${CMP} ${CXXFLAGS} ${FLAGS} ${MATHFLAGS} -c projects/Distributions/Distributions.cpp ${INC_DCCRG} ${INC_ZOLTAN} ${INC_BOOST} ${INC_EIGEN} + ${CMP} ${CXXFLAGS} ${FLAGS} ${MATHFLAGS} -c projects/Distributions/Distributions.cpp ${INC_DCCRG} ${INC_ZOLTAN} ${INC_BOOST} ${INC_EIGEN} ${INC_FSGRID} electric_sail.o: ${DEPS_COMMON} projects/read_gaussian_population.h projects/ElectricSail/electric_sail.h projects/ElectricSail/electric_sail.cpp - ${CMP} ${CXXFLAGS} ${FLAGS} ${MATHFLAGS} -c projects/ElectricSail/electric_sail.cpp ${INC_DCCRG} ${INC_ZOLTAN} ${INC_BOOST} ${INC_EIGEN} + ${CMP} ${CXXFLAGS} ${FLAGS} ${MATHFLAGS} -c projects/ElectricSail/electric_sail.cpp ${INC_DCCRG} ${INC_ZOLTAN} ${INC_BOOST} ${INC_EIGEN} ${INC_FSGRID} Firehose.o: ${DEPS_COMMON} projects/Firehose/Firehose.h projects/Firehose/Firehose.cpp - ${CMP} ${CXXFLAGS} ${FLAGS} ${MATHFLAGS} -c projects/Firehose/Firehose.cpp ${INC_DCCRG} ${INC_ZOLTAN} ${INC_BOOST} ${INC_EIGEN} + ${CMP} ${CXXFLAGS} ${FLAGS} ${MATHFLAGS} -c projects/Firehose/Firehose.cpp ${INC_DCCRG} ${INC_ZOLTAN} ${INC_BOOST} ${INC_EIGEN} ${INC_FSGRID} Flowthrough.o: ${DEPS_COMMON} projects/Flowthrough/Flowthrough.h projects/Flowthrough/Flowthrough.cpp - ${CMP} ${CXXFLAGS} ${FLAGS} ${MATHFLAGS} -c projects/Flowthrough/Flowthrough.cpp ${INC_DCCRG} ${INC_ZOLTAN} ${INC_BOOST} ${INC_EIGEN} + ${CMP} ${CXXFLAGS} ${FLAGS} ${MATHFLAGS} -c projects/Flowthrough/Flowthrough.cpp ${INC_DCCRG} ${INC_ZOLTAN} ${INC_BOOST} ${INC_EIGEN} ${INC_FSGRID} Fluctuations.o: ${DEPS_COMMON} projects/Fluctuations/Fluctuations.h projects/Fluctuations/Fluctuations.cpp - ${CMP} ${CXXFLAGS} ${FLAGS} ${MATHFLAGS} -c projects/Fluctuations/Fluctuations.cpp ${INC_DCCRG} ${INC_ZOLTAN} ${INC_BOOST} ${INC_EIGEN} + ${CMP} ${CXXFLAGS} ${FLAGS} ${MATHFLAGS} -c projects/Fluctuations/Fluctuations.cpp ${INC_DCCRG} ${INC_ZOLTAN} ${INC_BOOST} ${INC_EIGEN} ${INC_FSGRID} Harris.o: ${DEPS_COMMON} projects/Harris/Harris.h projects/Harris/Harris.cpp - ${CMP} ${CXXFLAGS} ${FLAGS} ${MATHFLAGS} -c projects/Harris/Harris.cpp ${INC_DCCRG} ${INC_ZOLTAN} ${INC_BOOST} ${INC_EIGEN} + ${CMP} ${CXXFLAGS} ${FLAGS} ${MATHFLAGS} -c projects/Harris/Harris.cpp ${INC_DCCRG} ${INC_ZOLTAN} ${INC_BOOST} ${INC_EIGEN} ${INC_FSGRID} KHB.o: ${DEPS_COMMON} projects/KHB/KHB.h projects/KHB/KHB.cpp - ${CMP} ${CXXFLAGS} ${FLAGS} ${MATHFLAGS} -c projects/KHB/KHB.cpp ${INC_DCCRG} ${INC_ZOLTAN} ${INC_BOOST} ${INC_EIGEN} + ${CMP} ${CXXFLAGS} ${FLAGS} ${MATHFLAGS} -c projects/KHB/KHB.cpp ${INC_DCCRG} ${INC_ZOLTAN} ${INC_BOOST} ${INC_EIGEN} ${INC_FSGRID} Larmor.o: ${DEPS_COMMON} projects/Larmor/Larmor.h projects/Larmor/Larmor.cpp - ${CMP} ${CXXFLAGS} ${FLAGS} ${MATHFLAGS} -c projects/Larmor/Larmor.cpp ${INC_DCCRG} ${INC_ZOLTAN} ${INC_BOOST} ${INC_EIGEN} + ${CMP} ${CXXFLAGS} ${FLAGS} ${MATHFLAGS} -c projects/Larmor/Larmor.cpp ${INC_DCCRG} ${INC_ZOLTAN} ${INC_BOOST} ${INC_EIGEN} ${INC_FSGRID} Magnetosphere.o: ${DEPS_COMMON} projects/Magnetosphere/Magnetosphere.h projects/Magnetosphere/Magnetosphere.cpp - ${CMP} ${CXXFLAGS} ${FLAGS} ${MATHFLAGS} -c projects/Magnetosphere/Magnetosphere.cpp ${INC_DCCRG} ${INC_ZOLTAN} ${INC_BOOST} ${INC_EIGEN} + ${CMP} ${CXXFLAGS} ${FLAGS} ${MATHFLAGS} -c projects/Magnetosphere/Magnetosphere.cpp ${INC_DCCRG} ${INC_ZOLTAN} ${INC_BOOST} ${INC_EIGEN} ${INC_FSGRID} MultiPeak.o: ${DEPS_COMMON} projects/MultiPeak/MultiPeak.h projects/MultiPeak/MultiPeak.cpp projects/projectTriAxisSearch.h projects/projectTriAxisSearch.cpp - ${CMP} ${CXXFLAGS} ${FLAGS} ${MATHFLAGS} -c projects/MultiPeak/MultiPeak.cpp ${INC_DCCRG} ${INC_ZOLTAN} ${INC_BOOST} ${INC_EIGEN} + ${CMP} ${CXXFLAGS} ${FLAGS} ${MATHFLAGS} -c projects/MultiPeak/MultiPeak.cpp ${INC_DCCRG} ${INC_ZOLTAN} ${INC_BOOST} ${INC_EIGEN} ${INC_FSGRID} VelocityBox.o: ${DEPS_COMMON} projects/VelocityBox/VelocityBox.h projects/VelocityBox/VelocityBox.cpp - ${CMP} ${CXXFLAGS} ${FLAGS} ${MATHFLAGS} -c projects/VelocityBox/VelocityBox.cpp ${INC_DCCRG} ${INC_ZOLTAN} ${INC_BOOST} ${INC_EIGEN} + ${CMP} ${CXXFLAGS} ${FLAGS} ${MATHFLAGS} -c projects/VelocityBox/VelocityBox.cpp ${INC_DCCRG} ${INC_ZOLTAN} ${INC_BOOST} ${INC_EIGEN} ${INC_FSGRID} Riemann1.o: ${DEPS_COMMON} projects/Riemann1/Riemann1.h projects/Riemann1/Riemann1.cpp - ${CMP} ${CXXFLAGS} ${FLAGS} ${MATHFLAGS} -c projects/Riemann1/Riemann1.cpp ${INC_DCCRG} ${INC_ZOLTAN} ${INC_BOOST} ${INC_EIGEN} + ${CMP} ${CXXFLAGS} ${FLAGS} ${MATHFLAGS} -c projects/Riemann1/Riemann1.cpp ${INC_DCCRG} ${INC_ZOLTAN} ${INC_BOOST} ${INC_EIGEN} ${INC_FSGRID} Shock.o: ${DEPS_COMMON} projects/Shock/Shock.h projects/Shock/Shock.cpp - ${CMP} ${CXXFLAGS} ${FLAGS} ${MATHFLAGS} -c projects/Shock/Shock.cpp ${INC_DCCRG} ${INC_ZOLTAN} ${INC_BOOST} ${INC_EIGEN} + ${CMP} ${CXXFLAGS} ${FLAGS} ${MATHFLAGS} -c projects/Shock/Shock.cpp ${INC_DCCRG} ${INC_ZOLTAN} ${INC_BOOST} ${INC_EIGEN} ${INC_FSGRID} IPShock.o: ${DEPS_COMMON} projects/IPShock/IPShock.h projects/IPShock/IPShock.cpp - ${CMP} ${CXXFLAGS} ${FLAGS} ${MATHFLAGS} -c projects/IPShock/IPShock.cpp ${INC_DCCRG} ${INC_ZOLTAN} ${INC_BOOST} ${INC_EIGEN} ${INC_VECTORCLASS} + ${CMP} ${CXXFLAGS} ${FLAGS} ${MATHFLAGS} -c projects/IPShock/IPShock.cpp ${INC_DCCRG} ${INC_ZOLTAN} ${INC_BOOST} ${INC_EIGEN} ${INC_FSGRID} ${INC_VECTORCLASS} Template.o: ${DEPS_COMMON} projects/Template/Template.h projects/Template/Template.cpp - ${CMP} ${CXXFLAGS} ${FLAGS} ${MATHFLAGS} -c projects/Template/Template.cpp ${INC_DCCRG} ${INC_ZOLTAN} ${INC_BOOST} ${INC_EIGEN} + ${CMP} ${CXXFLAGS} ${FLAGS} ${MATHFLAGS} -c projects/Template/Template.cpp ${INC_DCCRG} ${INC_ZOLTAN} ${INC_BOOST} ${INC_EIGEN} ${INC_FSGRID} test_fp.o: ${DEPS_COMMON} projects/test_fp/test_fp.h projects/test_fp/test_fp.cpp - ${CMP} ${CXXFLAGS} ${FLAGS} -c projects/test_fp/test_fp.cpp ${INC_DCCRG} ${INC_ZOLTAN} ${INC_BOOST} ${INC_EIGEN} + ${CMP} ${CXXFLAGS} ${FLAGS} -c projects/test_fp/test_fp.cpp ${INC_DCCRG} ${INC_ZOLTAN} ${INC_BOOST} ${INC_EIGEN} ${INC_FSGRID} testAmr.o: ${DEPS_COMMON} projects/testAmr/testAmr.h projects/testAmr/testAmr.cpp - ${CMP} ${CXXFLAGS} ${FLAGS} ${MATHFLAGS} -c projects/testAmr/testAmr.cpp ${INC_DCCRG} ${INC_ZOLTAN} ${INC_BOOST} ${INC_EIGEN} + ${CMP} ${CXXFLAGS} ${FLAGS} ${MATHFLAGS} -c projects/testAmr/testAmr.cpp ${INC_DCCRG} ${INC_ZOLTAN} ${INC_BOOST} ${INC_EIGEN} ${INC_FSGRID} testHall.o: ${DEPS_COMMON} projects/testHall/testHall.h projects/testHall/testHall.cpp - ${CMP} ${CXXFLAGS} ${FLAGS} ${MATHFLAGS} -c projects/testHall/testHall.cpp ${INC_DCCRG} ${INC_ZOLTAN} ${INC_BOOST} ${INC_EIGEN} + ${CMP} ${CXXFLAGS} ${FLAGS} ${MATHFLAGS} -c projects/testHall/testHall.cpp ${INC_DCCRG} ${INC_ZOLTAN} ${INC_BOOST} ${INC_EIGEN} ${INC_FSGRID} test_trans.o: ${DEPS_COMMON} projects/test_trans/test_trans.h projects/test_trans/test_trans.cpp - ${CMP} ${CXXFLAGS} ${FLAGS} ${MATHFLAGS} -c projects/test_trans/test_trans.cpp ${INC_DCCRG} ${INC_ZOLTAN} ${INC_BOOST} ${INC_EIGEN} + ${CMP} ${CXXFLAGS} ${FLAGS} ${MATHFLAGS} -c projects/test_trans/test_trans.cpp ${INC_DCCRG} ${INC_ZOLTAN} ${INC_BOOST} ${INC_EIGEN} ${INC_FSGRID} verificationLarmor.o: ${DEPS_COMMON} projects/verificationLarmor/verificationLarmor.h projects/verificationLarmor/verificationLarmor.cpp - ${CMP} ${CXXFLAGS} ${FLAGS} ${MATHFLAGS} -c projects/verificationLarmor/verificationLarmor.cpp ${INC_DCCRG} ${INC_ZOLTAN} ${INC_BOOST} ${INC_EIGEN} + ${CMP} ${CXXFLAGS} ${FLAGS} ${MATHFLAGS} -c projects/verificationLarmor/verificationLarmor.cpp ${INC_DCCRG} ${INC_ZOLTAN} ${INC_BOOST} ${INC_EIGEN} ${INC_FSGRID} Shocktest.o: ${DEPS_COMMON} projects/Shocktest/Shocktest.h projects/Shocktest/Shocktest.cpp - ${CMP} ${CXXFLAGS} ${FLAGS} ${MATHFLAGS} -c projects/Shocktest/Shocktest.cpp ${INC_DCCRG} ${INC_ZOLTAN} ${INC_BOOST} ${INC_EIGEN} + ${CMP} ${CXXFLAGS} ${FLAGS} ${MATHFLAGS} -c projects/Shocktest/Shocktest.cpp ${INC_DCCRG} ${INC_ZOLTAN} ${INC_BOOST} ${INC_EIGEN} ${INC_FSGRID} project.o: ${DEPS_COMMON} $(DEPS_PROJECTS) - ${CMP} ${CXXFLAGS} ${FLAGS} ${MATHFLAGS} -c projects/project.cpp ${INC_DCCRG} ${INC_ZOLTAN} ${INC_BOOST} ${INC_EIGEN} ${INC_VECTORCLASS} + ${CMP} ${CXXFLAGS} ${FLAGS} ${MATHFLAGS} -c projects/project.cpp ${INC_DCCRG} ${INC_ZOLTAN} ${INC_BOOST} ${INC_EIGEN} ${INC_VECTORCLASS} ${INC_FSGRID} projectTriAxisSearch.o: ${DEPS_COMMON} $(DEPS_PROJECTS) projects/projectTriAxisSearch.h projects/projectTriAxisSearch.cpp - ${CMP} ${CXXFLAGS} ${FLAGS} ${MATHFLAGS} -c projects/projectTriAxisSearch.cpp ${INC_DCCRG} ${INC_ZOLTAN} ${INC_BOOST} ${INC_EIGEN} + ${CMP} ${CXXFLAGS} ${FLAGS} ${MATHFLAGS} -c projects/projectTriAxisSearch.cpp ${INC_DCCRG} ${INC_ZOLTAN} ${INC_BOOST} ${INC_EIGEN} ${INC_FSGRID} poisson_solver.o: ${DEPS_COMMON} ${DEPS_CELL} poisson_solver/poisson_solver.h poisson_solver/poisson_solver.cpp $(CMP) $(CXXFLAGS) $(FLAGS) ${MATHFLAGS} -c poisson_solver/poisson_solver.cpp ${INC_DCCRG} ${INC_FSGRID} ${INC_BOOST} ${INC_ZOLTAN} @@ -401,7 +401,7 @@ poisson_test.o: ${DEPS_COMMON} ${DEPS_CELL} projects/project.h projects/project. $(CMP) $(CXXFLAGS) ${MATHFLAGS} $(FLAGS) -c projects/Poisson/poisson_test.cpp ${INC_DCCRG} ${INC_FSGRID} ${INC_ZOLTAN} ${INC_BOOST} ${INC_EIGEN} spatial_cell.o: ${DEPS_CELL} spatial_cell.cpp - $(CMP) $(CXXFLAGS) ${MATHFLAGS} $(FLAGS) -c spatial_cell.cpp $(INC_BOOST) ${INC_DCCRG} ${INC_EIGEN} ${INC_ZOLTAN} ${INC_VECTORCLASS} + $(CMP) $(CXXFLAGS) ${MATHFLAGS} $(FLAGS) -c spatial_cell.cpp $(INC_BOOST) ${INC_DCCRG} ${INC_EIGEN} ${INC_ZOLTAN} ${INC_VECTORCLASS} ${INC_FSGRID} ifeq ($(MESH),AMR) vlasovmover.o: ${DEPS_VLSVMOVER_AMR} @@ -424,7 +424,7 @@ cpu_acc_load_blocks.o: ${DEPS_CPU_ACC_LOAD_BLOCKS} ${CMP} ${CXXFLAGS} ${FLAG_OPENMP} ${MATHFLAGS} ${FLAGS} -c vlasovsolver/cpu_acc_load_blocks.cpp ${INC_VECTORCLASS} cpu_acc_transform.o: ${DEPS_CPU_ACC_TRANSFORM} - ${CMP} ${CXXFLAGS} ${FLAG_OPENMP} ${MATHFLAGS} ${FLAGS} -c vlasovsolver/cpu_acc_transform.cpp ${INC_EIGEN} ${INC_DCCRG} ${INC_PROFILE} ${INC_ZOLTAN} ${INC_BOOST} + ${CMP} ${CXXFLAGS} ${FLAG_OPENMP} ${MATHFLAGS} ${FLAGS} -c vlasovsolver/cpu_acc_transform.cpp ${INC_EIGEN} ${INC_DCCRG} ${INC_PROFILE} ${INC_ZOLTAN} ${INC_BOOST} ${INC_FSGRID} cpu_trans_map.o: ${DEPS_CPU_TRANS_MAP} ${CMP} ${CXXFLAGS} ${FLAG_OPENMP} ${MATHFLAGS} ${FLAGS} -c vlasovsolver/cpu_trans_map.cpp ${INC_EIGEN} ${INC_DCCRG} ${INC_FSGRID} ${INC_PROFILE} ${INC_VECTORCLASS} ${INC_ZOLTAN} ${INC_VLSV} ${INC_BOOST} @@ -477,7 +477,7 @@ grid.o: ${DEPS_COMMON} parameters.h ${DEPS_PROJECTS} ${DEPS_CELL} grid.cpp grid ${CMP} ${CXXFLAGS} ${FLAG_OPENMP} ${FLAGS} -c grid.cpp ${INC_MPI} ${INC_DCCRG} ${INC_FSGRID} ${INC_BOOST} ${INC_EIGEN} ${INC_ZOLTAN} ${INC_PROFILE} ${INC_VLSV} ${INC_PAPI} ioread.o: ${DEPS_COMMON} parameters.h ${DEPS_CELL} ioread.cpp ioread.h - ${CMP} ${CXXFLAGS} ${FLAG_OPENMP} ${FLAGS} -c ioread.cpp ${INC_MPI} ${INC_DCCRG} ${INC_BOOST} ${INC_EIGEN} ${INC_ZOLTAN} ${INC_PROFILE} ${INC_VLSV} + ${CMP} ${CXXFLAGS} ${FLAG_OPENMP} ${FLAGS} -c ioread.cpp ${INC_MPI} ${INC_DCCRG} ${INC_BOOST} ${INC_EIGEN} ${INC_ZOLTAN} ${INC_PROFILE} ${INC_VLSV} ${INC_FSGRID} iowrite.o: ${DEPS_COMMON} parameters.h ${DEPS_CELL} iowrite.cpp iowrite.h ${CMP} ${CXXFLAGS} ${FLAG_OPENMP} ${FLAGS} -c iowrite.cpp ${INC_MPI} ${INC_DCCRG} ${INC_FSGRID} ${INC_BOOST} ${INC_EIGEN} ${INC_ZOLTAN} ${INC_PROFILE} ${INC_VLSV} @@ -489,7 +489,7 @@ common.o: common.h common.cpp $(CMP) $(CXXFLAGS) $(FLAGS) -c common.cpp parameters.o: parameters.h parameters.cpp readparameters.h - $(CMP) $(CXXFLAGS) $(FLAGS) -c parameters.cpp ${INC_BOOST} ${INC_EIGEN} ${INC_DCCRG} ${INC_ZOLTAN} + $(CMP) $(CXXFLAGS) $(FLAGS) -c parameters.cpp ${INC_BOOST} ${INC_EIGEN} ${INC_DCCRG} ${INC_ZOLTAN} ${INC_FSGRID} readparameters.o: readparameters.h readparameters.cpp version.h version.cpp $(CMP) $(CXXFLAGS) $(FLAGS) -c readparameters.cpp ${INC_BOOST} ${INC_EIGEN} @@ -501,7 +501,7 @@ vlscommon.o: $(DEPS_COMMON) vlscommon.h vlscommon.cpp ${CMP} ${CXXFLAGS} ${FLAGS} -c vlscommon.cpp object_wrapper.o: $(DEPS_COMMON) object_wrapper.h object_wrapper.cpp - ${CMP} ${CXXFLAGS} ${FLAGS} -c object_wrapper.cpp ${INC_DCCRG} ${INC_ZOLTAN} ${INC_BOOST} + ${CMP} ${CXXFLAGS} ${FLAGS} -c object_wrapper.cpp ${INC_DCCRG} ${INC_ZOLTAN} ${INC_BOOST} ${INC_FSGRID} # Make executable vlasiator: $(OBJS) $(OBJS_POISSON) $(OBJS_FSOLVER) diff --git a/backgroundfield/backgroundfield.cpp b/backgroundfield/backgroundfield.cpp index fa6e5c812..3e1e2d5b4 100644 --- a/backgroundfield/backgroundfield.cpp +++ b/backgroundfield/backgroundfield.cpp @@ -31,13 +31,13 @@ //FieldFunction should be initialized void setBackgroundField( FieldFunction& bgFunction, - Real* cellParams, - Real* faceDerivatives, - Real* volumeDerivatives, + FsGrid< std::array, 2>& BgBGrid, bool append) { - using namespace CellParams; - using namespace fieldsolver; - using namespace bvolderivatives; + + /*if we do not add a new background to the existing one we first put everything to zero*/ + if(append==false) { + setBackgroundFieldToZero(BgBGrid); + } //these are doubles, as the averaging functions copied from Gumics //use internally doubles. In any case, it should provide more @@ -48,19 +48,6 @@ void setBackgroundField( double dx[3]; unsigned int faceCoord1[3]; unsigned int faceCoord2[3]; - - - start[0] = cellParams[CellParams::XCRD]; - start[1] = cellParams[CellParams::YCRD]; - start[2] = cellParams[CellParams::ZCRD]; - - dx[0] = cellParams[CellParams::DX]; - dx[1] = cellParams[CellParams::DY]; - dx[2] = cellParams[CellParams::DZ]; - - end[0]=start[0]+dx[0]; - end[1]=start[1]+dx[1]; - end[2]=start[2]+dx[2]; //the coordinates of the edges face with a normal in the third coordinate direction, stored here to enable looping faceCoord1[0]=1; @@ -69,77 +56,97 @@ void setBackgroundField( faceCoord2[1]=2; faceCoord1[2]=0; faceCoord2[2]=1; - - /*if we do not add a new background to the existing one we first put everything to zero*/ - if(append==false) { - setBackgroundFieldToZero(cellParams, faceDerivatives, volumeDerivatives); - } - //Face averages - for(unsigned int fComponent=0;fComponent<3;fComponent++){ - bgFunction.setDerivative(0); - bgFunction.setComponent((coordinate)fComponent); - cellParams[CellParams::BGBX+fComponent] += - surfaceAverage( - bgFunction, - (coordinate)fComponent, - accuracy, - start, - dx[faceCoord1[fComponent]], - dx[faceCoord2[fComponent]] - ); - - //Compute derivatives. Note that we scale by dx[] as the arrays are assumed to contain differences, not true derivatives! - bgFunction.setDerivative(1); - bgFunction.setDerivComponent((coordinate)faceCoord1[fComponent]); - faceDerivatives[fieldsolver::dBGBxdy+2*fComponent] += - dx[faceCoord1[fComponent]]* - surfaceAverage(bgFunction,(coordinate)fComponent,accuracy,start,dx[faceCoord1[fComponent]],dx[faceCoord2[fComponent]]); - bgFunction.setDerivComponent((coordinate)faceCoord2[fComponent]); - faceDerivatives[fieldsolver::dBGBxdy+1+2*fComponent] += - dx[faceCoord2[fComponent]]* - surfaceAverage(bgFunction,(coordinate)fComponent,accuracy,start,dx[faceCoord1[fComponent]],dx[faceCoord2[fComponent]]); - } - - //Volume averages - for(unsigned int fComponent=0;fComponent<3;fComponent++){ - bgFunction.setDerivative(0); - bgFunction.setComponent((coordinate)fComponent); - cellParams[CellParams::BGBXVOL+fComponent] += volumeAverage(bgFunction,accuracy,start,end); - - //Compute derivatives. Note that we scale by dx[] as the arrays are assumed to contain differences, not true derivatives! - bgFunction.setDerivative(1); - bgFunction.setDerivComponent((coordinate)faceCoord1[fComponent]); - volumeDerivatives[bvolderivatives::dBGBXVOLdy+2*fComponent] += dx[faceCoord1[fComponent]]*volumeAverage(bgFunction,accuracy,start,end); - bgFunction.setDerivComponent((coordinate)faceCoord2[fComponent]); - volumeDerivatives[bvolderivatives::dBGBXVOLdy+1+2*fComponent] += dx[faceCoord2[fComponent]]*volumeAverage(bgFunction,accuracy,start,end); + auto localSize = BgBGrid.getLocalSize(); + + #pragma omp parallel for collapse(3) + for (int x = 0; x < localSize[0]; ++x) { + for (int y = 0; y < localSize[1]; ++y) { + for (int z = 0; z < localSize[2]; ++z) { + std::array start3 = BgBGrid.getPhysicalCoords(x, y, z); + start[0] = start3[0]; + start[1] = start3[1]; + start[2] = start3[2]; + + dx[0] = BgBGrid.DX; + dx[1] = BgBGrid.DY; + dx[2] = BgBGrid.DZ; + + end[0]=start[0]+dx[0]; + end[1]=start[1]+dx[1]; + end[2]=start[2]+dx[2]; + + //Face averages + for(uint fComponent=0; fComponent<3; fComponent++){ + bgFunction.setDerivative(0); + bgFunction.setComponent((coordinate)fComponent); + BgBGrid.get(x,y,z)->at(fsgrids::bgbfield::BGBX+fComponent) += + surfaceAverage(bgFunction, + (coordinate)fComponent, + accuracy, + start, + dx[faceCoord1[fComponent]], + dx[faceCoord2[fComponent]] + ); + + //Compute derivatives. Note that we scale by dx[] as the arrays are assumed to contain differences, not true derivatives! + bgFunction.setDerivative(1); + bgFunction.setDerivComponent((coordinate)faceCoord1[fComponent]); + BgBGrid.get(x,y,z)->at(fsgrids::bgbfield::dBGBxdy+2*fComponent) += + dx[faceCoord1[fComponent]] * + surfaceAverage(bgFunction, + (coordinate)fComponent, + accuracy, + start, + dx[faceCoord1[fComponent]], + dx[faceCoord2[fComponent]] + ); + bgFunction.setDerivComponent((coordinate)faceCoord2[fComponent]); + BgBGrid.get(x,y,z)->at(fsgrids::bgbfield::dBGBxdy+1+2*fComponent) += + dx[faceCoord2[fComponent]] * + surfaceAverage(bgFunction, + (coordinate)fComponent, + accuracy, + start, + dx[faceCoord1[fComponent]], + dx[faceCoord2[fComponent]] + ); + } + + //Volume averages + for(unsigned int fComponent=0;fComponent<3;fComponent++){ + bgFunction.setDerivative(0); + bgFunction.setComponent((coordinate)fComponent); + BgBGrid.get(x,y,z)->at(fsgrids::bgbfield::BGBXVOL+fComponent) += volumeAverage(bgFunction,accuracy,start,end); + + //Compute derivatives. Note that we scale by dx[] as the arrays are assumed to contain differences, not true derivatives! + bgFunction.setDerivative(1); + bgFunction.setDerivComponent((coordinate)faceCoord1[fComponent]); + BgBGrid.get(x,y,z)->at(fsgrids::bgbfield::dBGBXVOLdy+2*fComponent) += dx[faceCoord1[fComponent]] * volumeAverage(bgFunction,accuracy,start,end); + bgFunction.setDerivComponent((coordinate)faceCoord2[fComponent]); + BgBGrid.get(x,y,z)->at(fsgrids::bgbfield::dBGBXVOLdy+1+2*fComponent) += dx[faceCoord2[fComponent]] * volumeAverage(bgFunction,accuracy,start,end); + } + } + } } - //TODO //COmpute divergence and curl of volume averaged field and check that both are zero. } void setBackgroundFieldToZero( - Real* cellParams, - Real* faceDerivatives, - Real* volumeDerivatives + FsGrid< std::array, 2>& BgBGrid ) { - using namespace CellParams; - using namespace fieldsolver; - using namespace bvolderivatives; - - //Face averages - for(unsigned int fComponent=0;fComponent<3;fComponent++){ - cellParams[CellParams::BGBX+fComponent] = 0.0; - faceDerivatives[fieldsolver::dBGBxdy+2*fComponent] = 0.0; - faceDerivatives[fieldsolver::dBGBxdy+1+2*fComponent] = 0.0; - } + auto localSize = BgBGrid.getLocalSize(); - //Volume averages - for(unsigned int fComponent=0;fComponent<3;fComponent++){ - cellParams[CellParams::BGBXVOL+fComponent] = 0.0; - volumeDerivatives[bvolderivatives::dBGBXVOLdy+2*fComponent] = 0.0; - volumeDerivatives[bvolderivatives::dBGBXVOLdy+1+2*fComponent] =0.0; + #pragma omp parallel for collapse(3) + for (int x = 0; x < localSize[0]; ++x) { + for (int y = 0; y < localSize[1]; ++y) { + for (int z = 0; z < localSize[2]; ++z) { + for (int i = 0; i < fsgrids::bgbfield::N_BGB; ++i) { + BgBGrid.get(x,y,z)->at(i) = 0; + } + } + } } } diff --git a/backgroundfield/backgroundfield.h b/backgroundfield/backgroundfield.h index 49415f99d..7996b8b37 100644 --- a/backgroundfield/backgroundfield.h +++ b/backgroundfield/backgroundfield.h @@ -25,18 +25,17 @@ #include "fieldfunction.hpp" #include "../definitions.h" +#include "../common.h" +#include "fsgrid.hpp" + void setBackgroundField( FieldFunction& bgFunction, - Real* cellParams, - Real* faceDerivatives, - Real* volumeDerivatives, + FsGrid< std::array, 2>& BgBGrid, bool append=false ); void setBackgroundFieldToZero( - Real* cellParams, - Real* faceDerivatives, - Real* volumeDerivatives + FsGrid< std::array, 2>& BgBGrid ); #endif diff --git a/grid.cpp b/grid.cpp index 6f0a0e576..d53586fe4 100644 --- a/grid.cpp +++ b/grid.cpp @@ -183,12 +183,6 @@ void initializeGrid( } phiprof::stop("Read restart"); const vector& cells = getLocalCells(); - //set background field, FIXME should be read in from restart - #pragma omp parallel for schedule(dynamic) - for (size_t i=0; isysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY) { project.setCell(cell); diff --git a/poisson_solver/poisson_solver.cpp b/poisson_solver/poisson_solver.cpp index d9c54be7e..3527fe20f 100644 --- a/poisson_solver/poisson_solver.cpp +++ b/poisson_solver/poisson_solver.cpp @@ -104,7 +104,9 @@ namespace poisson { spatial_cell::SpatialCell* cell = mpiGrid[cells[c]]; if (Poisson::timeDependentBackground == true) { - getObjectWrapper().project->setCellBackgroundField(cell); +#warning this is not supported at the moment + abort(); +// getObjectWrapper().project->setCellBackgroundField(cell); } cell->parameters[CellParams::PHI] = 0; @@ -119,7 +121,9 @@ namespace poisson { spatial_cell::SpatialCell* cell = mpiGrid[cells[c]]; if (Poisson::timeDependentBackground == true) { - getObjectWrapper().project->setCellBackgroundField(cell); + #warning this is not supported at the moment + abort(); +// getObjectWrapper().project->setCellBackgroundField(cell); } cell->parameters[CellParams::EXVOL] = cell->parameters[CellParams::BGEXVOL]; diff --git a/projects/Diffusion/Diffusion.cpp b/projects/Diffusion/Diffusion.cpp index abc7f3ecd..8e9e447fd 100644 --- a/projects/Diffusion/Diffusion.cpp +++ b/projects/Diffusion/Diffusion.cpp @@ -133,9 +133,12 @@ namespace projects { } - void Diffusion::setCellBackgroundField(SpatialCell* cell) { + void Diffusion::setProjectBackgroundField( + FsGrid< std::array, 2>& BgBGrid, + FsGrid< fsgrids::technical, 2>& technicalGrid + ) { ConstantField bgField; bgField.initialize(0,0,this->B0); //bg bx, by,bz - setBackgroundField(bgField,cell->parameters.data(), cell->derivatives.data(),cell->derivativesBVOL.data()); + setBackgroundField(bgField, BgBGrid); } } // namespace projects diff --git a/projects/Diffusion/Diffusion.h b/projects/Diffusion/Diffusion.h index ff3270d1d..c1e289d4b 100644 --- a/projects/Diffusion/Diffusion.h +++ b/projects/Diffusion/Diffusion.h @@ -46,7 +46,10 @@ namespace projects { static void addParameters(void); virtual void getParameters(void); /*! set background field, should set it for all cells */ - virtual void setCellBackgroundField(spatial_cell::SpatialCell* cell); + virtual void setProjectBackgroundField( + FsGrid< std::array, 2>& BgBGrid, + FsGrid< fsgrids::technical, 2>& technicalGrid + ); protected: Real getDistribValue( diff --git a/projects/Dispersion/Dispersion.cpp b/projects/Dispersion/Dispersion.cpp index c3902f148..4af7db0fa 100644 --- a/projects/Dispersion/Dispersion.cpp +++ b/projects/Dispersion/Dispersion.cpp @@ -254,12 +254,15 @@ namespace projects { } - void Dispersion::setCellBackgroundField(SpatialCell* cell) const { + void Dispersion::setProjectBackgroundField( + FsGrid< std::array, 2>& BgBGrid, + FsGrid< fsgrids::technical, 2>& technicalGrid + ) { ConstantField bgField; bgField.initialize(this->B0 * cos(this->angleXY) * cos(this->angleXZ), this->B0 * sin(this->angleXY) * cos(this->angleXZ), this->B0 * sin(this->angleXZ)); - setBackgroundField(bgField,cell->parameters.data(), cell->derivatives.data(),cell->derivativesBVOL.data()); + setBackgroundField(bgField, BgBGrid); } } // namespace projects diff --git a/projects/Dispersion/Dispersion.h b/projects/Dispersion/Dispersion.h index 4af74226b..77ae7987a 100644 --- a/projects/Dispersion/Dispersion.h +++ b/projects/Dispersion/Dispersion.h @@ -51,7 +51,10 @@ namespace projects { virtual bool initialize(void); static void addParameters(void); virtual void getParameters(void); - virtual void setCellBackgroundField(spatial_cell::SpatialCell* cell) const; + virtual void setProjectBackgroundField( + FsGrid< std::array, 2>& BgBGrid, + FsGrid< fsgrids::technical, 2>& technicalGrid + ); virtual void hook( cuint& stage, const dccrg::Dccrg& mpiGrid diff --git a/projects/Distributions/Distributions.cpp b/projects/Distributions/Distributions.cpp index ad76e9da8..4c69c4d56 100644 --- a/projects/Distributions/Distributions.cpp +++ b/projects/Distributions/Distributions.cpp @@ -167,13 +167,16 @@ namespace projects { } } - void Distributions::setCellBackgroundField(SpatialCell* cell) { + void Distributions::setProjectBackgroundField( + FsGrid< std::array, 2>& BgBGrid, + FsGrid< fsgrids::technical, 2>& technicalGrid + ) { ConstantField bgField; bgField.initialize(this->Bx, this->By, this->Bz); - setBackgroundField(bgField,cell->parameters.data(), cell->derivatives.data(),cell->derivativesBVOL.data()); + setBackgroundField(bgField, BgBGrid); } vector> Distributions::getV0( diff --git a/projects/Distributions/Distributions.h b/projects/Distributions/Distributions.h index f33a19cdb..9cf799db6 100644 --- a/projects/Distributions/Distributions.h +++ b/projects/Distributions/Distributions.h @@ -36,7 +36,10 @@ namespace projects { virtual bool initialize(void); static void addParameters(void); virtual void getParameters(void); - virtual void setCellBackgroundField(spatial_cell::SpatialCell* cell); + virtual void setProjectBackgroundField( + FsGrid< std::array, 2>& BgBGrid, + FsGrid< fsgrids::technical, 2>& technicalGrid + ); protected: Real getDistribValue( creal& x,creal& y, creal& z, diff --git a/projects/ElectricSail/electric_sail.cpp b/projects/ElectricSail/electric_sail.cpp index ce0b3ad84..3a96c97c3 100644 --- a/projects/ElectricSail/electric_sail.cpp +++ b/projects/ElectricSail/electric_sail.cpp @@ -210,86 +210,91 @@ namespace projects { /** * * NOTE: This is only called in grid.cpp:initializeGrid. - * NOTE: This function must be thread-safe. */ - void ElectricSail::setCellBackgroundField(SpatialCell* cell) const { - Real X = cell->parameters[CellParams::XCRD]; - Real Y = cell->parameters[CellParams::YCRD]; - Real Z = cell->parameters[CellParams::ZCRD]; - Real DX = cell->parameters[CellParams::DX]; - Real DY = cell->parameters[CellParams::DY]; - Real DZ = cell->parameters[CellParams::DZ]; - - cell->parameters[CellParams::RHOQ_EXT] = 0; - Real pos[3]; - pos[0] = cell->parameters[CellParams::XCRD] + 0.5*cell->parameters[CellParams::DX]; - pos[1] = cell->parameters[CellParams::YCRD] + 0.5*cell->parameters[CellParams::DY]; - pos[2] = cell->parameters[CellParams::ZCRD] + 0.5*cell->parameters[CellParams::DZ]; - - Real factor = 1.0; - if (timeDependentCharge == true) { - factor = max((Real)0.0,(Real)1.0+(Parameters::t-tetherChargeRiseTime)/tetherChargeRiseTime); - factor = min((Real)1.0,factor); - } - - if (useBackgroundField == false) { - Real rad = sqrt(pos[0]*pos[0]+pos[1]*pos[1]+pos[2]*pos[2]); - Real D3 = cell->parameters[CellParams::DX]*cell->parameters[CellParams::DY]; - if (rad <= 5) cell->parameters[CellParams::RHOQ_EXT] = 0.25*factor*tetherUnitCharge/D3/physicalconstants::EPS_0; - - cell->parameters[CellParams::BGEXVOL] = 0; - cell->parameters[CellParams::BGEYVOL] = 0; - cell->parameters[CellParams::BGEZVOL] = 0; - return; - } - - cell->parameters[CellParams::RHOQ_EXT] = 0; - - const Real EPSILON = 1e-30; - uint N = 1; - int N3_sum = 0; - Real E_vol[3] = {0,0,0}; - - bool ok = false; - do { - Real E_current[3] = {0,0,0}; - - const Real DX_N = DX / N; - const Real DY_N = DY / N; - const Real DZ_N = DZ / N; - - // Sample E using N points - Real E_dummy[3] = {0,0,0}; - for (uint k=0; k= poisson::Poisson::maxIterations) ok = true; - - // Add new E values to accumulated sums - for (int i=0; i<3; ++i) E_vol[i] += E_current[i]; - N3_sum += N*N*N; - ++N; - } while (ok == false); - - // Store the computed volume-average - cell->parameters[CellParams::BGEXVOL] = E_vol[0] / N3_sum; - cell->parameters[CellParams::BGEYVOL] = E_vol[1] / N3_sum; - cell->parameters[CellParams::BGEZVOL] = E_vol[2] / N3_sum; + void ElectricSail::setProjectBackgroundField( + FsGrid< std::array, 2>& BgBGrid, + FsGrid< fsgrids::technical, 2>& technicalGrid + ) { +#warning this is not supported at the moment, needs to be ported to fsgrid + std::cerr << "ERROR: ElectricSail::setProjectBackgroundField is not ported to fsgrid! Aborting." << std::endl; + abort(); +// Real X = cell->parameters[CellParams::XCRD]; +// Real Y = cell->parameters[CellParams::YCRD]; +// Real Z = cell->parameters[CellParams::ZCRD]; +// Real DX = cell->parameters[CellParams::DX]; +// Real DY = cell->parameters[CellParams::DY]; +// Real DZ = cell->parameters[CellParams::DZ]; +// +// cell->parameters[CellParams::RHOQ_EXT] = 0; +// Real pos[3]; +// pos[0] = cell->parameters[CellParams::XCRD] + 0.5*cell->parameters[CellParams::DX]; +// pos[1] = cell->parameters[CellParams::YCRD] + 0.5*cell->parameters[CellParams::DY]; +// pos[2] = cell->parameters[CellParams::ZCRD] + 0.5*cell->parameters[CellParams::DZ]; +// +// Real factor = 1.0; +// if (timeDependentCharge == true) { +// factor = max((Real)0.0,(Real)1.0+(Parameters::t-tetherChargeRiseTime)/tetherChargeRiseTime); +// factor = min((Real)1.0,factor); +// } +// +// if (useBackgroundField == false) { +// Real rad = sqrt(pos[0]*pos[0]+pos[1]*pos[1]+pos[2]*pos[2]); +// Real D3 = cell->parameters[CellParams::DX]*cell->parameters[CellParams::DY]; +// if (rad <= 5) cell->parameters[CellParams::RHOQ_EXT] = 0.25*factor*tetherUnitCharge/D3/physicalconstants::EPS_0; +// +// cell->parameters[CellParams::BGEXVOL] = 0; +// cell->parameters[CellParams::BGEYVOL] = 0; +// cell->parameters[CellParams::BGEZVOL] = 0; +// return; +// } +// +// cell->parameters[CellParams::RHOQ_EXT] = 0; +// +// const Real EPSILON = 1e-30; +// uint N = 1; +// int N3_sum = 0; +// Real E_vol[3] = {0,0,0}; +// +// bool ok = false; +// do { +// Real E_current[3] = {0,0,0}; +// +// const Real DX_N = DX / N; +// const Real DY_N = DY / N; +// const Real DZ_N = DZ / N; +// +// // Sample E using N points +// Real E_dummy[3] = {0,0,0}; +// for (uint k=0; k= poisson::Poisson::maxIterations) ok = true; +// +// // Add new E values to accumulated sums +// for (int i=0; i<3; ++i) E_vol[i] += E_current[i]; +// N3_sum += N*N*N; +// ++N; +// } while (ok == false); +// +// // Store the computed volume-average +// cell->parameters[CellParams::BGEXVOL] = E_vol[0] / N3_sum; +// cell->parameters[CellParams::BGEYVOL] = E_vol[1] / N3_sum; +// cell->parameters[CellParams::BGEZVOL] = E_vol[2] / N3_sum; } diff --git a/projects/ElectricSail/electric_sail.h b/projects/ElectricSail/electric_sail.h index 52b6016c4..3ea7020f7 100644 --- a/projects/ElectricSail/electric_sail.h +++ b/projects/ElectricSail/electric_sail.h @@ -52,7 +52,10 @@ namespace projects { Real getCorrectNumberDensity(spatial_cell::SpatialCell* cell,const uint popID) const; virtual void getParameters(); virtual bool initialize(); - virtual void setCellBackgroundField(spatial_cell::SpatialCell* cell) const; + virtual void setProjectBackgroundField( + FsGrid< std::array, 2>& BgBGrid, + FsGrid< fsgrids::technical, 2>& technicalGrid + ); protected: uint popID; diff --git a/projects/Flowthrough/Flowthrough.cpp b/projects/Flowthrough/Flowthrough.cpp index 744ca522a..4ee88cf7a 100644 --- a/projects/Flowthrough/Flowthrough.cpp +++ b/projects/Flowthrough/Flowthrough.cpp @@ -209,10 +209,13 @@ namespace projects { cellParams[CellParams::PERBZ] = 0.; } - void Flowthrough::setCellBackgroundField(spatial_cell::SpatialCell* cell) const { + void Flowthrough::setProjectBackgroundField( + FsGrid< std::array, 2>& BgBGrid, + FsGrid< fsgrids::technical, 2>& technicalGrid + ) { ConstantField bgField; bgField.initialize(Bx,By,Bz); //bg bx, by,bz - setBackgroundField(bgField,cell->parameters.data(), cell->derivatives.data(),cell->derivativesBVOL.data()); + setBackgroundField(bgField, BgBGrid); } std::vector > Flowthrough::getV0( diff --git a/projects/Flowthrough/Flowthrough.h b/projects/Flowthrough/Flowthrough.h index 7d5491269..39f64c2df 100644 --- a/projects/Flowthrough/Flowthrough.h +++ b/projects/Flowthrough/Flowthrough.h @@ -47,7 +47,10 @@ namespace projects { virtual bool initialize(void); static void addParameters(void); virtual void getParameters(void); - void setCellBackgroundField(spatial_cell::SpatialCell* cell) const; + virtual void setProjectBackgroundField( + FsGrid< std::array, 2>& BgBGrid, + FsGrid< fsgrids::technical, 2>& technicalGrid + ); protected: Real getDistribValue( diff --git a/projects/Fluctuations/Fluctuations.cpp b/projects/Fluctuations/Fluctuations.cpp index c5b4a09e3..c8d9bda6c 100644 --- a/projects/Fluctuations/Fluctuations.cpp +++ b/projects/Fluctuations/Fluctuations.cpp @@ -178,13 +178,16 @@ namespace projects { cellParams[CellParams::PERBZ] = this->magZPertAbsAmp * (0.5 - getRandomNumber(cell)); } - void Fluctuations::setCellBackgroundField(SpatialCell* cell) const { + void Fluctuations::setProjectBackgroundField( + FsGrid< std::array, 2>& BgBGrid, + FsGrid< fsgrids::technical, 2>& technicalGrid + ) { ConstantField bgField; bgField.initialize(this->BX0, this->BY0, this->BZ0); - setBackgroundField(bgField,cell->parameters.data(), cell->derivatives.data(),cell->derivativesBVOL.data()); + setBackgroundField(bgField, BgBGrid); } std::vector > Fluctuations::getV0( diff --git a/projects/Fluctuations/Fluctuations.h b/projects/Fluctuations/Fluctuations.h index c4e92b9c8..31eb6db9d 100644 --- a/projects/Fluctuations/Fluctuations.h +++ b/projects/Fluctuations/Fluctuations.h @@ -48,7 +48,10 @@ namespace projects { virtual bool initialize(void); static void addParameters(void); virtual void getParameters(void); - virtual void setCellBackgroundField(spatial_cell::SpatialCell* cell) const; + virtual void setProjectBackgroundField( + FsGrid< std::array, 2>& BgBGrid, + FsGrid< fsgrids::technical, 2>& technicalGrid + ); virtual std::vector > getV0( creal x, creal y, diff --git a/projects/Harris/Harris.cpp b/projects/Harris/Harris.cpp index ede212799..ed0617708 100644 --- a/projects/Harris/Harris.cpp +++ b/projects/Harris/Harris.cpp @@ -166,8 +166,11 @@ namespace projects { return V0; } - void Harris::setCellBackgroundField(SpatialCell *cell) const { - setBackgroundFieldToZero(cell->parameters.data(), cell->derivatives.data(),cell->derivativesBVOL.data()); + void Harris::setProjectBackgroundField( + FsGrid< std::array, 2>& BgBGrid, + FsGrid< fsgrids::technical, 2>& technicalGrid + ) { + setBackgroundFieldToZero(BgBGrid); } } // namespace projects diff --git a/projects/Harris/Harris.h b/projects/Harris/Harris.h index 8c8a64717..1d430e890 100644 --- a/projects/Harris/Harris.h +++ b/projects/Harris/Harris.h @@ -44,7 +44,10 @@ namespace projects { static void addParameters(void); virtual void getParameters(void); virtual void calcCellParameters(spatial_cell::SpatialCell* cell,creal& t); - virtual void setCellBackgroundField(spatial_cell::SpatialCell* cell) const; + virtual void setProjectBackgroundField( + FsGrid< std::array, 2>& BgBGrid, + FsGrid< fsgrids::technical, 2>& technicalGrid + ); virtual Real calcPhaseSpaceDensity( creal& x, creal& y, creal& z, creal& dx, creal& dy, creal& dz, diff --git a/projects/IPShock/IPShock.cpp b/projects/IPShock/IPShock.cpp index 6babb2818..677e132c2 100644 --- a/projects/IPShock/IPShock.cpp +++ b/projects/IPShock/IPShock.cpp @@ -491,8 +491,11 @@ namespace projects { return a; } - void IPShock::setCellBackgroundField(spatial_cell::SpatialCell* cell) const { - setBackgroundFieldToZero(cell->parameters.data(), cell->derivatives.data(),cell->derivativesBVOL.data()); + void IPShock::setProjectBackgroundField( + FsGrid< std::array, 2>& BgBGrid, + FsGrid< fsgrids::technical, 2>& technicalGrid + ) { + setBackgroundFieldToZero(BgBGrid); } }//namespace projects diff --git a/projects/IPShock/IPShock.h b/projects/IPShock/IPShock.h index 4b06efa52..de561b5cf 100644 --- a/projects/IPShock/IPShock.h +++ b/projects/IPShock/IPShock.h @@ -62,7 +62,10 @@ namespace projects { static void addParameters(void); virtual void getParameters(void); - virtual void setCellBackgroundField(spatial_cell::SpatialCell* cell) const; + virtual void setProjectBackgroundField( + FsGrid< std::array, 2>& BgBGrid, + FsGrid< fsgrids::technical, 2>& technicalGrid + ); virtual Real calcPhaseSpaceDensity( creal& x, creal& y, creal& z, creal& dx, creal& dy, creal& dz, diff --git a/projects/Larmor/Larmor.cpp b/projects/Larmor/Larmor.cpp index 4d0065c93..4b8a85024 100644 --- a/projects/Larmor/Larmor.cpp +++ b/projects/Larmor/Larmor.cpp @@ -155,13 +155,16 @@ namespace projects { cellParams[CellParams::PERBZ ] = 0.0; } - void Larmor::setCellBackgroundField(SpatialCell* cell) { + void Larmor::setProjectBackgroundField( + FsGrid< std::array, 2>& BgBGrid, + FsGrid< fsgrids::technical, 2>& technicalGrid + ) { ConstantField bgField; bgField.initialize(this->BX0, this->BY0, this->BZ0); - setBackgroundField(bgField,cell->parameters.data(), cell->derivatives.data(),cell->derivativesBVOL.data()); + setBackgroundField(bgField, BgBGrid); } } //namespace projects diff --git a/projects/Larmor/Larmor.h b/projects/Larmor/Larmor.h index a721740bd..b4f5d839c 100644 --- a/projects/Larmor/Larmor.h +++ b/projects/Larmor/Larmor.h @@ -38,7 +38,10 @@ namespace projects { virtual bool initialize(void); static void addParameters(void); virtual void getParameters(void); - virtual void setCellBackgroundField(spatial_cell::SpatialCell* cell); + virtual void setProjectBackgroundField( + FsGrid< std::array, 2>& BgBGrid, + FsGrid< fsgrids::technical, 2>& technicalGrid + ); protected: Real getDistribValue( creal& x,creal& y, creal& z, diff --git a/projects/Magnetosphere/Magnetosphere.cpp b/projects/Magnetosphere/Magnetosphere.cpp index 9ba4b7d85..0a9e5d2c7 100644 --- a/projects/Magnetosphere/Magnetosphere.cpp +++ b/projects/Magnetosphere/Magnetosphere.cpp @@ -28,6 +28,7 @@ #include "../../common.h" #include "../../readparameters.h" #include "../../backgroundfield/backgroundfield.h" +#include "../../backgroundfield/constantfield.hpp" #include "../../backgroundfield/dipole.hpp" #include "../../backgroundfield/linedipole.hpp" #include "../../object_wrapper.h" @@ -256,112 +257,134 @@ namespace projects { } /* set 0-centered dipole */ - void Magnetosphere::setCellBackgroundField(SpatialCell *cell) const { - if(cell->sysBoundaryFlag == sysboundarytype::SET_MAXWELLIAN && this->noDipoleInSW) { - setBackgroundFieldToZero(cell->parameters.data(), cell->derivatives.data(),cell->derivativesBVOL.data()); - } - else { - Dipole bgFieldDipole; - LineDipole bgFieldLineDipole; - - // The hardcoded constants of dipole and line dipole moments are obtained - // from Daldorff et al (2014), see - // https://github.com/fmihpc/vlasiator/issues/20 for a derivation of the - // values used here. - switch(this->dipoleType) { - case 0: - bgFieldDipole.initialize(8e15 *this->dipoleScalingFactor, 0.0, 0.0, 0.0, 0.0 );//set dipole moment - setBackgroundField(bgFieldDipole,cell->parameters.data(), cell->derivatives.data(),cell->derivativesBVOL.data()); - break; - case 1: - bgFieldLineDipole.initialize(126.2e6 *this->dipoleScalingFactor, 0.0, 0.0, 0.0 );//set dipole moment - setBackgroundField(bgFieldLineDipole,cell->parameters.data(), cell->derivatives.data(),cell->derivativesBVOL.data()); - break; - case 2: - bgFieldLineDipole.initialize(126.2e6 *this->dipoleScalingFactor, 0.0, 0.0, 0.0 );//set dipole moment - setBackgroundField(bgFieldLineDipole,cell->parameters.data(), cell->derivatives.data(),cell->derivativesBVOL.data()); - //Append mirror dipole - bgFieldLineDipole.initialize(126.2e6 *this->dipoleScalingFactor, this->dipoleMirrorLocationX, 0.0, 0.0 ); - setBackgroundField(bgFieldLineDipole,cell->parameters.data(), cell->derivatives.data(),cell->derivativesBVOL.data(), true); - break; - case 3: - bgFieldDipole.initialize(8e15 *this->dipoleScalingFactor, 0.0, 0.0, 0.0, 0.0 );//set dipole moment - setBackgroundField(bgFieldDipole,cell->parameters.data(), cell->derivatives.data(),cell->derivativesBVOL.data()); - //Append mirror dipole - bgFieldDipole.initialize(8e15 *this->dipoleScalingFactor, this->dipoleMirrorLocationX, 0.0, 0.0, 0.0 );//mirror - setBackgroundField(bgFieldDipole,cell->parameters.data(), cell->derivatives.data(),cell->derivativesBVOL.data(), true); - break; - - default: - setBackgroundFieldToZero(cell->parameters.data(), cell->derivatives.data(),cell->derivativesBVOL.data()); - - } + void Magnetosphere::setProjectBackgroundField( + FsGrid< std::array, 2>& BgBGrid, + FsGrid< fsgrids::technical, 2>& technicalGrid + ) { + Dipole bgFieldDipole; + LineDipole bgFieldLineDipole; + + // The hardcoded constants of dipole and line dipole moments are obtained + // from Daldorff et al (2014), see + // https://github.com/fmihpc/vlasiator/issues/20 for a derivation of the + // values used here. + switch(this->dipoleType) { + case 0: + bgFieldDipole.initialize(8e15 *this->dipoleScalingFactor, 0.0, 0.0, 0.0, 0.0 );//set dipole moment + setBackgroundField(bgFieldDipole, BgBGrid); + break; + case 1: + bgFieldLineDipole.initialize(126.2e6 *this->dipoleScalingFactor, 0.0, 0.0, 0.0 );//set dipole moment + setBackgroundField(bgFieldLineDipole, BgBGrid); + break; + case 2: + bgFieldLineDipole.initialize(126.2e6 *this->dipoleScalingFactor, 0.0, 0.0, 0.0 );//set dipole moment + setBackgroundField(bgFieldLineDipole, BgBGrid); + //Append mirror dipole + bgFieldLineDipole.initialize(126.2e6 *this->dipoleScalingFactor, this->dipoleMirrorLocationX, 0.0, 0.0 ); + setBackgroundField(bgFieldLineDipole, BgBGrid, true); + break; + case 3: + bgFieldDipole.initialize(8e15 *this->dipoleScalingFactor, 0.0, 0.0, 0.0, 0.0 );//set dipole moment + setBackgroundField(bgFieldDipole, BgBGrid); + //Append mirror dipole + bgFieldDipole.initialize(8e15 *this->dipoleScalingFactor, this->dipoleMirrorLocationX, 0.0, 0.0, 0.0 );//mirror + setBackgroundField(bgFieldDipole, BgBGrid, true); + break; + + default: + setBackgroundFieldToZero(BgBGrid); + } - - //Force field to zero in the perpendicular direction for 2D (1D) simulations. Otherwise we have unphysical components. - if(P::xcells_ini==1) { - cell->parameters[CellParams::BGBX]=0; - cell->parameters[CellParams::BGBXVOL]=0.0; - cell->derivatives[fieldsolver::dBGBydx]=0.0; - cell->derivatives[fieldsolver::dBGBzdx]=0.0; - cell->derivatives[fieldsolver::dBGBxdy]=0.0; - cell->derivatives[fieldsolver::dBGBxdz]=0.0; - cell->derivativesBVOL[bvolderivatives::dBGBYVOLdx]=0.0; - cell->derivativesBVOL[bvolderivatives::dBGBZVOLdx]=0.0; - cell->derivativesBVOL[bvolderivatives::dBGBXVOLdy]=0.0; - cell->derivativesBVOL[bvolderivatives::dBGBXVOLdz]=0.0; - } + auto localSize = BgBGrid.getLocalSize(); - if(P::ycells_ini==1) { - /*2D simulation in x and z. Set By and derivatives along Y, and derivatives of By to zero*/ - cell->parameters[CellParams::BGBY]=0.0; - cell->parameters[CellParams::BGBYVOL]=0.0; - cell->derivatives[fieldsolver::dBGBxdy]=0.0; - cell->derivatives[fieldsolver::dBGBzdy]=0.0; - cell->derivatives[fieldsolver::dBGBydx]=0.0; - cell->derivatives[fieldsolver::dBGBydz]=0.0; - cell->derivativesBVOL[bvolderivatives::dBGBXVOLdy]=0.0; - cell->derivativesBVOL[bvolderivatives::dBGBZVOLdy]=0.0; - cell->derivativesBVOL[bvolderivatives::dBGBYVOLdx]=0.0; - cell->derivativesBVOL[bvolderivatives::dBGBYVOLdz]=0.0; - } - if(P::zcells_ini==1) { - cell->parameters[CellParams::BGBX]=0; - cell->parameters[CellParams::BGBY]=0; - cell->parameters[CellParams::BGBYVOL]=0.0; - cell->parameters[CellParams::BGBXVOL]=0.0; - cell->derivatives[fieldsolver::dBGBxdy]=0.0; - cell->derivatives[fieldsolver::dBGBxdz]=0.0; - cell->derivatives[fieldsolver::dBGBydx]=0.0; - cell->derivatives[fieldsolver::dBGBydz]=0.0; - cell->derivativesBVOL[bvolderivatives::dBGBXVOLdy]=0.0; - cell->derivativesBVOL[bvolderivatives::dBGBXVOLdz]=0.0; - cell->derivativesBVOL[bvolderivatives::dBGBYVOLdx]=0.0; - cell->derivativesBVOL[bvolderivatives::dBGBYVOLdz]=0.0; - } - for(uint component=0; component<3; component++) { - if(this->constBgB[component] != 0.0) { - cell->parameters[CellParams::BGBX+component] += this->constBgB[component]; - cell->parameters[CellParams::BGBXVOL+component] += this->constBgB[component]; +#pragma omp parallel + { + //Force field to zero in the perpendicular direction for 2D (1D) simulations. Otherwise we have unphysical components. + if(P::xcells_ini==1) { +#pragma omp for collapse(3) + for (int x = 0; x < localSize[0]; ++x) { + for (int y = 0; y < localSize[1]; ++y) { + for (int z = 0; z < localSize[2]; ++z) { + BgBGrid.get(x, y, z)->at(fsgrids::bgbfield::BGBX)=0; + BgBGrid.get(x, y, z)->at(fsgrids::bgbfield::BGBXVOL)=0.0; + BgBGrid.get(x, y, z)->at(fsgrids::bgbfield::dBGBydx)=0.0; + BgBGrid.get(x, y, z)->at(fsgrids::bgbfield::dBGBzdx)=0.0; + BgBGrid.get(x, y, z)->at(fsgrids::bgbfield::dBGBxdy)=0.0; + BgBGrid.get(x, y, z)->at(fsgrids::bgbfield::dBGBxdz)=0.0; + BgBGrid.get(x, y, z)->at(fsgrids::bgbfield::dBGBYVOLdx)=0.0; + BgBGrid.get(x, y, z)->at(fsgrids::bgbfield::dBGBZVOLdx)=0.0; + BgBGrid.get(x, y, z)->at(fsgrids::bgbfield::dBGBXVOLdy)=0.0; + BgBGrid.get(x, y, z)->at(fsgrids::bgbfield::dBGBXVOLdz)=0.0; + } + } + } + } + if(P::ycells_ini==1) { + /*2D simulation in x and z. Set By and derivatives along Y, and derivatives of By to zero*/ +#pragma omp for collapse(3) + for (int x = 0; x < localSize[0]; ++x) { + for (int y = 0; y < localSize[1]; ++y) { + for (int z = 0; z < localSize[2]; ++z) { + BgBGrid.get(x, y, z)->at(fsgrids::bgbfield::BGBY)=0.0; + BgBGrid.get(x, y, z)->at(fsgrids::bgbfield::BGBYVOL)=0.0; + BgBGrid.get(x, y, z)->at(fsgrids::bgbfield::dBGBxdy)=0.0; + BgBGrid.get(x, y, z)->at(fsgrids::bgbfield::dBGBzdy)=0.0; + BgBGrid.get(x, y, z)->at(fsgrids::bgbfield::dBGBydx)=0.0; + BgBGrid.get(x, y, z)->at(fsgrids::bgbfield::dBGBydz)=0.0; + BgBGrid.get(x, y, z)->at(fsgrids::bgbfield::dBGBXVOLdy)=0.0; + BgBGrid.get(x, y, z)->at(fsgrids::bgbfield::dBGBZVOLdy)=0.0; + BgBGrid.get(x, y, z)->at(fsgrids::bgbfield::dBGBYVOLdx)=0.0; + BgBGrid.get(x, y, z)->at(fsgrids::bgbfield::dBGBYVOLdz)=0.0; + } + } + } } + if(P::zcells_ini==1) { +#pragma omp for collapse(3) + for (int x = 0; x < localSize[0]; ++x) { + for (int y = 0; y < localSize[1]; ++y) { + for (int z = 0; z < localSize[2]; ++z) { + BgBGrid.get(x, y, z)->at(fsgrids::bgbfield::BGBX)=0; + BgBGrid.get(x, y, z)->at(fsgrids::bgbfield::BGBY)=0; + BgBGrid.get(x, y, z)->at(fsgrids::bgbfield::BGBYVOL)=0.0; + BgBGrid.get(x, y, z)->at(fsgrids::bgbfield::BGBXVOL)=0.0; + BgBGrid.get(x, y, z)->at(fsgrids::bgbfield::dBGBxdy)=0.0; + BgBGrid.get(x, y, z)->at(fsgrids::bgbfield::dBGBxdz)=0.0; + BgBGrid.get(x, y, z)->at(fsgrids::bgbfield::dBGBydx)=0.0; + BgBGrid.get(x, y, z)->at(fsgrids::bgbfield::dBGBydz)=0.0; + BgBGrid.get(x, y, z)->at(fsgrids::bgbfield::dBGBXVOLdy)=0.0; + BgBGrid.get(x, y, z)->at(fsgrids::bgbfield::dBGBXVOLdz)=0.0; + BgBGrid.get(x, y, z)->at(fsgrids::bgbfield::dBGBYVOLdx)=0.0; + BgBGrid.get(x, y, z)->at(fsgrids::bgbfield::dBGBYVOLdz)=0.0; + } + } + } + } + + // Remove dipole from inflow cells if this is requested + if(this->noDipoleInSW) { +#pragma omp for collapse(3) + for (int x = 0; x < localSize[0]; ++x) { + for (int y = 0; y < localSize[1]; ++y) { + for (int z = 0; z < localSize[2]; ++z) { + if(technicalGrid.get(x, y, z)->sysBoundaryFlag == sysboundarytype::SET_MAXWELLIAN ) { + for (int i = 0; i < fsgrids::bgbfield::N_BGB; ++i) { + BgBGrid.get(x,y,z)->at(i) = 0; + } + } + } + } + } + } + } // end of omp parallel region + // Superimpose constant background field if needed + if(this->constBgB[0] != 0.0 || this->constBgB[1] != 0.0 || this->constBgB[2] != 0.0) { + ConstantField bgConstantField; + bgConstantField.initialize(this->constBgB[0], this->constBgB[1], this->constBgB[2]); + setBackgroundField(bgConstantField, BgBGrid, true); } - -// // FIXME TESTING HACK to be used when one wants to get the "zero" Hall field from the dipole -// cell->parameters[CellParams::PERBX] = cell->parameters[CellParams::BGBX]; -// cell->parameters[CellParams::PERBXVOL] = cell->parameters[CellParams::BGBXVOL]; -// cell->parameters[CellParams::BGBX] = 0.0; -// cell->parameters[CellParams::BGBXVOL] = 0.0; -// cell->parameters[CellParams::PERBY] = cell->parameters[CellParams::BGBY]; -// cell->parameters[CellParams::PERBYVOL] = cell->parameters[CellParams::BGBYVOL]; -// cell->parameters[CellParams::BGBY] = 0.0; -// cell->parameters[CellParams::BGBYVOL] = 0.0; -// cell->parameters[CellParams::PERBZ] = cell->parameters[CellParams::BGBY]; -// cell->parameters[CellParams::PERBZVOL] = cell->parameters[CellParams::BGBZVOL]; -// cell->parameters[CellParams::BGBZ] = 0.0; -// cell->parameters[CellParams::BGBZVOL] = 0.0; -// // END OF TESTING HACK - } diff --git a/projects/Magnetosphere/Magnetosphere.h b/projects/Magnetosphere/Magnetosphere.h index 28d6bdd32..8926291de 100644 --- a/projects/Magnetosphere/Magnetosphere.h +++ b/projects/Magnetosphere/Magnetosphere.h @@ -47,7 +47,10 @@ namespace projects { virtual bool initialize(void); static void addParameters(void); virtual void getParameters(void); - virtual void setCellBackgroundField(spatial_cell::SpatialCell* cell) const; + virtual void setProjectBackgroundField( + FsGrid< std::array, 2>& BgBGrid, + FsGrid< fsgrids::technical, 2>& technicalGrid + ); virtual Real calcPhaseSpaceDensity( creal& x, creal& y, creal& z, creal& dx, creal& dy, creal& dz, diff --git a/projects/MultiPeak/MultiPeak.cpp b/projects/MultiPeak/MultiPeak.cpp index ad0e4f58c..f9a5ad8d3 100644 --- a/projects/MultiPeak/MultiPeak.cpp +++ b/projects/MultiPeak/MultiPeak.cpp @@ -229,13 +229,16 @@ namespace projects { rhoRnd = 0.5 - getRandomNumber(cell); } - void MultiPeak::setCellBackgroundField(SpatialCell* cell) const { + void MultiPeak::setProjectBackgroundField( + FsGrid< std::array, 2>& BgBGrid, + FsGrid< fsgrids::technical, 2>& technicalGrid + ) { ConstantField bgField; bgField.initialize(this->Bx, this->By, this->Bz); - setBackgroundField(bgField,cell->parameters.data(), cell->derivatives.data(),cell->derivativesBVOL.data()); + setBackgroundField(bgField, BgBGrid); } std::vector > MultiPeak::getV0( diff --git a/projects/MultiPeak/MultiPeak.h b/projects/MultiPeak/MultiPeak.h index 55356a80a..eec1626e9 100644 --- a/projects/MultiPeak/MultiPeak.h +++ b/projects/MultiPeak/MultiPeak.h @@ -61,7 +61,10 @@ namespace projects { virtual bool initialize(void); static void addParameters(void); virtual void getParameters(void); - virtual void setCellBackgroundField(spatial_cell::SpatialCell* cell) const; + virtual void setProjectBackgroundField( + FsGrid< std::array, 2>& BgBGrid, + FsGrid< fsgrids::technical, 2>& technicalGrid + ); protected: Real getDistribValue( creal& x,creal& y, creal& z, diff --git a/projects/Poisson/poisson_test.cpp b/projects/Poisson/poisson_test.cpp index f7eda5b0b..81e912a36 100644 --- a/projects/Poisson/poisson_test.cpp +++ b/projects/Poisson/poisson_test.cpp @@ -72,7 +72,10 @@ namespace projects { return true; } - void PoissonTest::setCellBackgroundField(SpatialCell* cell) { + void PoissonTest::setProjectBackgroundField( + FsGrid< std::array, 2>& BgBGrid, + FsGrid< fsgrids::technical, 2>& technicalGrid + ) { } diff --git a/projects/Poisson/poisson_test.h b/projects/Poisson/poisson_test.h index 3de34ec46..ad579e558 100644 --- a/projects/Poisson/poisson_test.h +++ b/projects/Poisson/poisson_test.h @@ -41,7 +41,10 @@ namespace projects { static void addParameters(); virtual void getParameters(); virtual bool initialize(); - virtual void setCellBackgroundField(spatial_cell::SpatialCell* cell); + virtual void setProjectBackgroundField( + FsGrid< std::array, 2>& BgBGrid, + FsGrid< fsgrids::technical, 2>& technicalGrid + ); protected: virtual void calcCellParameters(spatial_cell::SpatialCell* cell,creal& t); diff --git a/projects/Shocktest/Shocktest.cpp b/projects/Shocktest/Shocktest.cpp index c260b43f5..d727be585 100644 --- a/projects/Shocktest/Shocktest.cpp +++ b/projects/Shocktest/Shocktest.cpp @@ -222,11 +222,13 @@ namespace projects { } - /*! Base class sets zero background field */ - void Shocktest::setCellBackgroundField(SpatialCell* cell) { + void Shocktest::setProjectBackgroundField( + FsGrid< std::array, 2>& BgBGrid, + FsGrid< fsgrids::technical, 2>& technicalGrid + ) { ConstantField bgField; bgField.initialize(0,0,0); //bg bx, by,bz - setBackgroundField(bgField,cell->parameters.data(), cell->derivatives.data(),cell->derivativesBVOL.data()); + setBackgroundField(bgField, BgBGrid); } } // Namespace projects diff --git a/projects/Shocktest/Shocktest.h b/projects/Shocktest/Shocktest.h index 4f8247230..26a29220f 100644 --- a/projects/Shocktest/Shocktest.h +++ b/projects/Shocktest/Shocktest.h @@ -62,7 +62,10 @@ namespace projects { creal& dvx, creal& dvy, creal& dvz, const uint popID ) const; - virtual void setCellBackgroundField(spatial_cell::SpatialCell* cell); + virtual void setProjectBackgroundField( + FsGrid< std::array, 2>& BgBGrid, + FsGrid< fsgrids::technical, 2>& technicalGrid + ); virtual void calcCellParameters(spatial_cell::SpatialCell* cell,creal& t); virtual Real calcPhaseSpaceDensity( diff --git a/projects/Template/Template.cpp b/projects/Template/Template.cpp index 04e9fc863..09f42f57a 100644 --- a/projects/Template/Template.cpp +++ b/projects/Template/Template.cpp @@ -71,14 +71,13 @@ namespace projects { exp(- physicalconstants::MASS_PROTON * ((vx-Vx0)*(vx-Vx0) + (vy-Vy0)*(vy-Vy0) + (vz-Vz0)*(vz-Vz0)) / (2.0 * physicalconstants::K_B * T)); } - void Template::setCellBackgroundField(SpatialCell *cell){ + void Template::setProjectBackgroundField( + FsGrid< std::array, 2>& BgBGrid, + FsGrid< fsgrids::technical, 2>& technicalGrid + ) { Dipole bgField; bgField.initialize(8e15, 0.0, 0.0, 0.0, 0.0); //set dipole moment and location - if(cell->sysBoundaryFlag == sysboundarytype::DO_NOT_COMPUTE) { - setBackgroundFieldToZero(cell->parameters.data(), cell->derivatives.data(),cell->derivativesBVOL.data()); - } else { - setBackgroundField(bgField,cell->parameters.data(), cell->derivatives.data(),cell->derivativesBVOL.data()); - } + setBackgroundField(bgField, BgBGrid); } vector> Template::getV0( diff --git a/projects/Template/Template.h b/projects/Template/Template.h index 31ee3647d..f01702637 100644 --- a/projects/Template/Template.h +++ b/projects/Template/Template.h @@ -35,7 +35,10 @@ namespace projects { virtual bool initialize(void); static void addParameters(void); virtual void getParameters(void); - virtual void setCellBackgroundField(spatial_cell::SpatialCell* cell); + virtual void setProjectBackgroundField( + FsGrid< std::array, 2>& BgBGrid, + FsGrid< fsgrids::technical, 2>& technicalGrid + ); virtual Real calcPhaseSpaceDensity( creal& x, creal& y, creal& z, creal& dx, creal& dy, creal& dz, diff --git a/projects/VelocityBox/VelocityBox.cpp b/projects/VelocityBox/VelocityBox.cpp index bd1ed9b02..4fde4ec11 100644 --- a/projects/VelocityBox/VelocityBox.cpp +++ b/projects/VelocityBox/VelocityBox.cpp @@ -109,13 +109,16 @@ namespace projects { cellParams[CellParams::PERBZ ] = 0.0; } - void VelocityBox::setCellBackgroundField(SpatialCell* cell) { - ConstantField bgField; + void VelocityBox::setProjectBackgroundField( + FsGrid< std::array, 2>& BgBGrid, + FsGrid< fsgrids::technical, 2>& technicalGrid + ) { + ConstantField bgField; bgField.initialize(this->Bx, this->By, this->Bz); - setBackgroundField(bgField,cell->parameters.data(), cell->derivatives.data(),cell->derivativesBVOL.data()); + setBackgroundField(bgField, BgBGrid); } }// namespace projects diff --git a/projects/VelocityBox/VelocityBox.h b/projects/VelocityBox/VelocityBox.h index 5d074411d..90aadfb7a 100644 --- a/projects/VelocityBox/VelocityBox.h +++ b/projects/VelocityBox/VelocityBox.h @@ -36,7 +36,10 @@ namespace projects { virtual bool initialize(void); static void addParameters(void); virtual void getParameters(void); - virtual void setCellBackgroundField(spatial_cell::SpatialCell* cell); + virtual void setProjectBackgroundField( + FsGrid< std::array, 2>& BgBGrid, + FsGrid< fsgrids::technical, 2>& technicalGrid + ); protected: Real getDistribValue(creal& vx, creal& vy, creal& vz, const uint popID) const; virtual void calcCellParameters(spatial_cell::SpatialCell* cell,creal& t); diff --git a/projects/project.cpp b/projects/project.cpp index ba67d6d3a..773f1b805 100644 --- a/projects/project.cpp +++ b/projects/project.cpp @@ -183,7 +183,10 @@ namespace projects { bool Project::initialized() {return baseClassInitialized;} /*! Print a warning message to stderr and abort, one should not use the base class functions. */ - void Project::setCellBackgroundField(SpatialCell* cell) const { + void Project::setProjectBackgroundField( + FsGrid< std::array, 2>& BgBGrid, + FsGrid< fsgrids::technical, 2>& technicalGrid + ) { int rank; MPI_Comm_rank(MPI_COMM_WORLD,&rank); if (rank == MASTER_RANK) { diff --git a/projects/project.h b/projects/project.h index ca9f7f46a..5b08a6db6 100644 --- a/projects/project.h +++ b/projects/project.h @@ -26,6 +26,7 @@ #include "../spatial_cell.hpp" #include #include +#include "fsgrid.hpp" namespace projects { class Project { @@ -52,11 +53,15 @@ namespace projects { bool initialized(); - /*! set background field, should set it for all cells. + /*! set background field on the background field fsgrid. * Currently this function is only called during the initialization. - * NOTE: This function is called inside parallel region so it must be declared as const. - * @param cell Pointer to the spatial cell.*/ - virtual void setCellBackgroundField(spatial_cell::SpatialCell* cell) const; + * @param BgBGrid Background field fsgrid + * @param technicalGrid Technical fsgrid + */ + virtual void setProjectBackgroundField( + FsGrid< std::array, 2>& BgBGrid, + FsGrid< fsgrids::technical, 2>& technicalGrid + ); /*! Setup data structures for subsequent setCell calls. * This will most likely be empty for most projects, except for some advanced diff --git a/projects/testAmr/testAmr.cpp b/projects/testAmr/testAmr.cpp index 98f6481c0..abaa21215 100644 --- a/projects/testAmr/testAmr.cpp +++ b/projects/testAmr/testAmr.cpp @@ -230,13 +230,16 @@ namespace projects { rhoRnd = 0.5 - getRandomNumber(cell); } - void testAmr::setCellBackgroundField(SpatialCell* cell) const { + void testAmr::setProjectBackgroundField( + FsGrid< std::array, 2>& BgBGrid, + FsGrid< fsgrids::technical, 2>& technicalGrid + ) { ConstantField bgField; bgField.initialize(this->Bx, this->By, this->Bz); - setBackgroundField(bgField,cell->parameters.data(), cell->derivatives.data(),cell->derivativesBVOL.data()); + setBackgroundField(bgField, BgBGrid); } std::vector > testAmr::getV0( diff --git a/projects/testAmr/testAmr.h b/projects/testAmr/testAmr.h index fe5db8ca2..a7daf55dd 100644 --- a/projects/testAmr/testAmr.h +++ b/projects/testAmr/testAmr.h @@ -61,7 +61,10 @@ namespace projects { virtual bool initialize(void); static void addParameters(void); virtual void getParameters(void); - virtual void setCellBackgroundField(spatial_cell::SpatialCell* cell) const; + virtual void setProjectBackgroundField( + FsGrid< std::array, 2>& BgBGrid, + FsGrid< fsgrids::technical, 2>& technicalGrid + ); protected: Real getDistribValue( creal& x,creal& y, creal& z, diff --git a/projects/testHall/testHall.cpp b/projects/testHall/testHall.cpp index cc996dd91..bcf7bab29 100644 --- a/projects/testHall/testHall.cpp +++ b/projects/testHall/testHall.cpp @@ -92,7 +92,7 @@ namespace projects { exp(- mass * (pow(vx + 0.5 * dvx - this->VX0, 2.0) + pow(vy + 0.5 * dvy - this->VY0, 2.0) + pow(vz + 0.5 * dvz - this->VZ0, 2.0)) / (2.0 * kb * this->TEMPERATURE))); } -// void TestHall::setCellBackgroundField(SpatialCell *cell){ +// void TestHall::setProjectBackgroundField(SpatialCell *cell){ // Dipole bgField; // bgField.initialize(8e15 *this->dipoleScalingFactor,this->dipoleTilt); //set dipole moment // if(cell->sysBoundaryFlag == sysboundarytype::SET_MAXWELLIAN && this->noDipoleInSW) { diff --git a/projects/testHall/testHall.h b/projects/testHall/testHall.h index 37efa5375..5e60b047d 100644 --- a/projects/testHall/testHall.h +++ b/projects/testHall/testHall.h @@ -35,7 +35,7 @@ namespace projects { virtual bool initialize(void); static void addParameters(void); virtual void getParameters(void); -// virtual void setCellBackgroundField(SpatialCell* cell); +// virtual void setProjectBackgroundField(SpatialCell* cell); virtual void calcCellParameters(spatial_cell::SpatialCell* cell,creal& t); virtual Real calcPhaseSpaceDensity( creal& x, creal& y, creal& z, diff --git a/projects/test_fp/test_fp.cpp b/projects/test_fp/test_fp.cpp index 5dfc2f63a..2be445620 100644 --- a/projects/test_fp/test_fp.cpp +++ b/projects/test_fp/test_fp.cpp @@ -105,8 +105,11 @@ namespace projects { return result; } - void test_fp::setCellBackgroundField(spatial_cell::SpatialCell *cell) const { - setBackgroundFieldToZero(cell->parameters.data(), cell->derivatives.data(),cell->derivativesBVOL.data()); + void test_fp::setProjectBackgroundField( + FsGrid< std::array, 2>& BgBGrid, + FsGrid< fsgrids::technical, 2>& technicalGrid + ) { + setBackgroundFieldToZero(BgBGrid); } void test_fp::calcCellParameters(spatial_cell::SpatialCell* cell,creal& t) { diff --git a/projects/test_fp/test_fp.h b/projects/test_fp/test_fp.h index 8388ab6ee..7400d8b43 100644 --- a/projects/test_fp/test_fp.h +++ b/projects/test_fp/test_fp.h @@ -37,7 +37,10 @@ namespace projects { virtual bool initialize(void); static void addParameters(void); virtual void getParameters(void); - virtual void setCellBackgroundField(spatial_cell::SpatialCell* cell) const; + virtual void setProjectBackgroundField( + FsGrid< std::array, 2>& BgBGrid, + FsGrid< fsgrids::technical, 2>& technicalGrid + ); protected: Real sign(creal value) const; diff --git a/projects/test_trans/test_trans.cpp b/projects/test_trans/test_trans.cpp index b85b7878c..e8617cddf 100644 --- a/projects/test_trans/test_trans.cpp +++ b/projects/test_trans/test_trans.cpp @@ -136,10 +136,13 @@ namespace projects { cellParams[CellParams::PERBZ ] = 0.0; } - void test_trans::setCellBackgroundField(SpatialCell* cell) const { + void test_trans::setProjectBackgroundField( + FsGrid< std::array, 2>& BgBGrid, + FsGrid< fsgrids::technical, 2>& technicalGrid + ) { ConstantField bgField; bgField.initialize(0.0,0.0,1e-9); - setBackgroundField(bgField,cell->parameters.data(), cell->derivatives.data(),cell->derivativesBVOL.data()); + setBackgroundField(bgField, BgBGrid); } }// namespace projects diff --git a/projects/test_trans/test_trans.h b/projects/test_trans/test_trans.h index 3714fcca0..2315ed0be 100644 --- a/projects/test_trans/test_trans.h +++ b/projects/test_trans/test_trans.h @@ -37,7 +37,10 @@ namespace projects { virtual bool initialize(void); static void addParameters(void); virtual void getParameters(void); - void setCellBackgroundField(spatial_cell::SpatialCell* cell) const; + virtual void setProjectBackgroundField( + FsGrid< std::array, 2>& BgBGrid, + FsGrid< fsgrids::technical, 2>& technicalGrid + ); protected: Real getDistribValue(creal& vx, creal& vy, creal& vz); diff --git a/projects/verificationLarmor/verificationLarmor.cpp b/projects/verificationLarmor/verificationLarmor.cpp index bff19a35e..668a8ff9b 100644 --- a/projects/verificationLarmor/verificationLarmor.cpp +++ b/projects/verificationLarmor/verificationLarmor.cpp @@ -127,13 +127,16 @@ namespace projects { cellParams[CellParams::PERBZ ] = 0.0; } - void verificationLarmor::setCellBackgroundField(SpatialCell* cell) { + void verificationLarmor::setProjectBackgroundField( + FsGrid< std::array, 2>& BgBGrid, + FsGrid< fsgrids::technical, 2>& technicalGrid + ) { ConstantField bgField; bgField.initialize(this->BX0, this->BY0, this->BZ0); - setBackgroundField(bgField,cell->parameters.data(), cell->derivatives.data(),cell->derivativesBVOL.data()); + setBackgroundField(bgField, BgBGrid); } } //namespace projects diff --git a/projects/verificationLarmor/verificationLarmor.h b/projects/verificationLarmor/verificationLarmor.h index 851a15203..7838e4516 100644 --- a/projects/verificationLarmor/verificationLarmor.h +++ b/projects/verificationLarmor/verificationLarmor.h @@ -37,7 +37,10 @@ namespace projects { virtual bool initialize(void); static void addParameters(void); virtual void getParameters(void); - virtual void setCellBackgroundField(spatial_cell::SpatialCell* cell); + virtual void setProjectBackgroundField( + FsGrid< std::array, 2>& BgBGrid, + FsGrid< fsgrids::technical, 2>& technicalGrid + ); protected: Real getDistribValue(creal& vx, creal& vy, creal& vz, const uint popID) const; virtual void calcCellParameters(spatial_cell::SpatialCell* cell,creal& t); diff --git a/vlasiator.cpp b/vlasiator.cpp index f4f51eaf2..0e1b2678a 100644 --- a/vlasiator.cpp +++ b/vlasiator.cpp @@ -372,27 +372,8 @@ int main(int argn,char* args[]) { // Add AMR refinement criterias: amr_ref_criteria::addRefinementCriteria(); - // Initialize grid. After initializeGrid local cells have dist - // functions, and B fields set. Cells have also been classified for - // the various sys boundary conditions. All remote cells have been - // created. All spatial date computed this far is up to date for - // FULL_NEIGHBORHOOD. Block lists up to date for - // VLASOV_SOLVER_NEIGHBORHOOD (but dist function has not been communicated) - phiprof::start("Init grid"); - //dccrg::Dccrg mpiGrid; - initializeGrid(argn,args,mpiGrid,sysBoundaries,*project); - isSysBoundaryCondDynamic = sysBoundaries.isDynamic(); - - phiprof::stop("Init grid"); - - // Initialize data reduction operators. This should be done elsewhere in order to initialize - // user-defined operators: - phiprof::start("Init DROs"); - DataReducer outputReducer, diagnosticReducer; - initializeDataReducers(&outputReducer, &diagnosticReducer); - phiprof::stop("Init DROs"); - // Initialize simplified Fieldsolver grids. + // Needs to be done here already ad the background field will be set right away, before going to initializeGrid even phiprof::start("Init fieldsolver grids"); const std::array fsGridDimensions = {convert(P::xcells_ini) * pow(2,P::amrMaxSpatialRefLevel), convert(P::ycells_ini) * pow(2,P::amrMaxSpatialRefLevel), @@ -429,7 +410,39 @@ int main(int argn,char* args[]) { perBGrid.DZ = perBDt2Grid.DZ = EGrid.DZ = EDt2Grid.DZ = EHallGrid.DZ = EGradPeGrid.DZ = momentsGrid.DZ = momentsDt2Grid.DZ = dPerBGrid.DZ = dMomentsGrid.DZ = BgBGrid.DZ = volGrid.DZ = technicalGrid.DZ = P::dz_ini * pow(2,-P::amrMaxSpatialRefLevel); + // Set the physical start (lower left corner) X, Y, Z + perBGrid.physicalGlobalStart = perBDt2Grid.physicalGlobalStart = EGrid.physicalGlobalStart = EDt2Grid.physicalGlobalStart + = EHallGrid.physicalGlobalStart = EGradPeGrid.physicalGlobalStart = momentsGrid.physicalGlobalStart + = momentsDt2Grid.physicalGlobalStart = dPerBGrid.physicalGlobalStart = dMomentsGrid.physicalGlobalStart + = BgBGrid.physicalGlobalStart = volGrid.physicalGlobalStart = technicalGrid.physicalGlobalStart + = {P::xmin, P::ymin, P::zmin}; phiprof::stop("Init fieldsolver grids"); + + // Initialize grid. After initializeGrid local cells have dist + // functions, and B fields set. Cells have also been classified for + // the various sys boundary conditions. All remote cells have been + // created. All spatial date computed this far is up to date for + // FULL_NEIGHBORHOOD. Block lists up to date for + // VLASOV_SOLVER_NEIGHBORHOOD (but dist function has not been communicated) + phiprof::start("Init grid"); + + phiprof::start("setCellBackgroundField"); + project->setProjectBackgroundField(BgBGrid, technicalGrid); + phiprof::stop("setCellBackgroundField"); + + //dccrg::Dccrg mpiGrid; + initializeGrid(argn,args,mpiGrid,sysBoundaries,*project); + isSysBoundaryCondDynamic = sysBoundaries.isDynamic(); + + phiprof::stop("Init grid"); + + // Initialize data reduction operators. This should be done elsewhere in order to initialize + // user-defined operators: + phiprof::start("Init DROs"); + DataReducer outputReducer, diagnosticReducer; + initializeDataReducers(&outputReducer, &diagnosticReducer); + phiprof::stop("Init DROs"); + phiprof::start("Initial fsgrid coupling"); const std::vector& cells = getLocalCells(); From 76f2a503aaf1aa48be1f335f08073f6e95a54744 Mon Sep 17 00:00:00 2001 From: ykempf Date: Thu, 11 Apr 2019 09:04:17 +0300 Subject: [PATCH 292/602] Implemented getBgFieldsAndDerivativesFromFsGrid. --- fieldsolver/gridGlue.cpp | 147 ++++++++++++++++++++++++--------------- fieldsolver/gridGlue.hpp | 27 ++++--- vlasiator.cpp | 2 +- 3 files changed, 103 insertions(+), 73 deletions(-) diff --git a/fieldsolver/gridGlue.cpp b/fieldsolver/gridGlue.cpp index 1cf3c3126..38614ed42 100644 --- a/fieldsolver/gridGlue.cpp +++ b/fieldsolver/gridGlue.cpp @@ -78,64 +78,6 @@ void feedMomentsIntoFsGrid(dccrg::Dccrg& } -void feedBgFieldsIntoFsGrid(dccrg::Dccrg& mpiGrid, - const std::vector& cells, FsGrid< std::array, 2>& bgBGrid) { - - int nCells = getNumberOfCellsOnMaxRefLvl(mpiGrid, cells); - bgBGrid.setupForTransferIn(nCells); - - // Setup transfer buffers - std::vector< std::array > transferBuffer(cells.size()); - - // Fill from cellParams - // We only need to read data once per dccrg cell here -#pragma omp parallel for - for(uint i = 0; i < cells.size(); ++i) { - CellID dccrgId = cells[i]; - auto cellParams = mpiGrid[dccrgId]->get_cell_parameters(); - auto derivatives = mpiGrid[dccrgId]->derivatives; - auto volumeDerivatives = mpiGrid[dccrgId]->derivativesBVOL; - - // std::cout << "I am at line " << __LINE__ << " of " << __FILE__ << std::endl; - - std::array* thisCellData = &transferBuffer[i]; - - thisCellData->at(fsgrids::bgbfield::BGBX) = cellParams[CellParams::BGBX]; - thisCellData->at(fsgrids::bgbfield::BGBY) = cellParams[CellParams::BGBY]; - thisCellData->at(fsgrids::bgbfield::BGBZ) = cellParams[CellParams::BGBZ]; - thisCellData->at(fsgrids::bgbfield::BGBXVOL) = cellParams[CellParams::BGBXVOL]; - thisCellData->at(fsgrids::bgbfield::BGBYVOL) = cellParams[CellParams::BGBYVOL]; - thisCellData->at(fsgrids::bgbfield::BGBZVOL) = cellParams[CellParams::BGBZVOL]; - - thisCellData->at(fsgrids::bgbfield::dBGBxdy) = derivatives[fieldsolver::dBGBxdy]; - thisCellData->at(fsgrids::bgbfield::dBGBxdz) = derivatives[fieldsolver::dBGBxdz]; - thisCellData->at(fsgrids::bgbfield::dBGBydx) = derivatives[fieldsolver::dBGBydx]; - thisCellData->at(fsgrids::bgbfield::dBGBydz) = derivatives[fieldsolver::dBGBydz]; - thisCellData->at(fsgrids::bgbfield::dBGBzdx) = derivatives[fieldsolver::dBGBzdx]; - thisCellData->at(fsgrids::bgbfield::dBGBzdy) = derivatives[fieldsolver::dBGBzdy]; - - thisCellData->at(fsgrids::bgbfield::dBGBXVOLdy) = volumeDerivatives[bvolderivatives::dBGBXVOLdy]; - thisCellData->at(fsgrids::bgbfield::dBGBXVOLdz) = volumeDerivatives[bvolderivatives::dBGBXVOLdz]; - thisCellData->at(fsgrids::bgbfield::dBGBYVOLdx) = volumeDerivatives[bvolderivatives::dBGBYVOLdx]; - thisCellData->at(fsgrids::bgbfield::dBGBYVOLdz) = volumeDerivatives[bvolderivatives::dBGBYVOLdz]; - thisCellData->at(fsgrids::bgbfield::dBGBZVOLdx) = volumeDerivatives[bvolderivatives::dBGBZVOLdx]; - thisCellData->at(fsgrids::bgbfield::dBGBZVOLdy) = volumeDerivatives[bvolderivatives::dBGBZVOLdy]; - } - - // Copy data into each fsgrid cell overlapping the dccrg cell - for (uint i = 0; i < cells.size(); ++i) { - CellID dccrgId = cells[i]; - const auto fsgridIds = mapDccrgIdToFsGridGlobalID(mpiGrid, dccrgId); - for (auto fsgridId : fsgridIds) { - bgBGrid.transferDataIn(fsgridId, &transferBuffer[i]); - } - } - - // Finish the actual transfer - bgBGrid.finishTransfersIn(); - -} - void getVolumeFieldsFromFsGrid(FsGrid< std::array, 2>& volumeFieldsGrid, dccrg::Dccrg& mpiGrid, const std::vector& cells) { @@ -216,6 +158,95 @@ void getVolumeFieldsFromFsGrid(FsGrid< std::array, 2>& BgBGrid, + dccrg::Dccrg& mpiGrid, + const std::vector& cells +) { + // Setup transfer buffers + int nCells = getNumberOfCellsOnMaxRefLvl(mpiGrid, cells); + std::vector< std::array > transferBuffer(nCells); + std::vector< std::array*> transferBufferPointer; + + // Setup transfer pointers + BgBGrid.setupForTransferOut(nCells); + int k = 0; + for(auto dccrgId : cells) { + const auto fsgridIds = mapDccrgIdToFsGridGlobalID(mpiGrid, dccrgId); + // Store a pointer to the first fsgrid cell that maps to each dccrg Id + transferBufferPointer.push_back(&transferBuffer[k]); + for (auto fsgridId : fsgridIds) { + std::array* thisCellData = &transferBuffer[k++]; + BgBGrid.transferDataOut(fsgridId, thisCellData); + } + } + // Do the transfer + BgBGrid.finishTransfersOut(); + + // Build lists of index pairs to dccrg and fsgrid + std::vector> iCellParams; + iCellParams.reserve(6); + iCellParams.push_back(std::make_pair(CellParams::BGBX, fsgrids::bgbfield::BGBX)); + iCellParams.push_back(std::make_pair(CellParams::BGBY, fsgrids::bgbfield::BGBY)); + iCellParams.push_back(std::make_pair(CellParams::BGBZ, fsgrids::bgbfield::BGBZ)); + iCellParams.push_back(std::make_pair(CellParams::BGBXVOL, fsgrids::bgbfield::BGBXVOL)); + iCellParams.push_back(std::make_pair(CellParams::BGBYVOL, fsgrids::bgbfield::BGBYVOL)); + iCellParams.push_back(std::make_pair(CellParams::BGBZVOL, fsgrids::bgbfield::BGBZVOL)); + std::vector> iDerivatives; + iDerivatives.reserve(6); + iDerivatives.push_back(std::make_pair(fieldsolver::dBGBxdy, fsgrids::bgbfield::dBGBxdy)); + iDerivatives.push_back(std::make_pair(fieldsolver::dBGBxdz, fsgrids::bgbfield::dBGBxdz)); + iDerivatives.push_back(std::make_pair(fieldsolver::dBGBydx, fsgrids::bgbfield::dBGBydx)); + iDerivatives.push_back(std::make_pair(fieldsolver::dBGBydz, fsgrids::bgbfield::dBGBydz)); + iDerivatives.push_back(std::make_pair(fieldsolver::dBGBzdx, fsgrids::bgbfield::dBGBzdx)); + iDerivatives.push_back(std::make_pair(fieldsolver::dBGBzdy, fsgrids::bgbfield::dBGBzdy)); + std::vector> iDerivativesBVOL; + iDerivativesBVOL.reserve(6); + iDerivativesBVOL.push_back(std::make_pair(bvolderivatives::dBGBXVOLdy, fsgrids::bgbfield::dBGBXVOLdy)); + iDerivativesBVOL.push_back(std::make_pair(bvolderivatives::dBGBXVOLdz, fsgrids::bgbfield::dBGBXVOLdz)); + iDerivativesBVOL.push_back(std::make_pair(bvolderivatives::dBGBYVOLdx, fsgrids::bgbfield::dBGBYVOLdx)); + iDerivativesBVOL.push_back(std::make_pair(bvolderivatives::dBGBYVOLdz, fsgrids::bgbfield::dBGBYVOLdz)); + iDerivativesBVOL.push_back(std::make_pair(bvolderivatives::dBGBZVOLdx, fsgrids::bgbfield::dBGBZVOLdx)); + iDerivativesBVOL.push_back(std::make_pair(bvolderivatives::dBGBZVOLdy, fsgrids::bgbfield::dBGBZVOLdy)); + + // Distribute data from the transfer buffer back into the appropriate mpiGrid places + #pragma omp parallel for + for(uint i = 0; i < cells.size(); ++i) { + + int dccrgId = cells[i]; + auto cellParams = mpiGrid[dccrgId]->get_cell_parameters(); + + // Calculate the number of fsgrid cells we need to average into the current dccrg cell + int nCells = pow(pow(2,mpiGrid.mapping.get_maximum_refinement_level() - mpiGrid.mapping.get_refinement_level(dccrgId)),3); + + // TODO: Could optimize here by adding a separate branch for nCells == 1 with direct assignment of the value + // Could also do the average in a temporary value and only access grid structure once. + + // Initialize values to 0 + for (auto j : iCellParams) cellParams[j.first] = 0.0; + for (auto j : iDerivatives) mpiGrid[dccrgId]->derivatives[j.first] = 0.0; + for (auto j : iDerivativesBVOL) mpiGrid[dccrgId]->derivativesBVOL[j.first] = 0.0; + + for(int iCell = 0; iCell < nCells; ++iCell) { + // The fsgrid cells that cover the i'th dccrg cell are pointed at by + // transferBufferPointer[i] ... transferBufferPointer[i] + nCell. We want to average + // over all of them to get the value for the dccrg cell + std::array* thisCellData = transferBufferPointer[i] + iCell; + + for (auto j : iCellParams) cellParams[j.first] += thisCellData->at(j.second); + for (auto j : iDerivatives) mpiGrid[dccrgId]->derivatives[j.first] += thisCellData->at(j.second); + for (auto j : iDerivativesBVOL) mpiGrid[dccrgId]->derivativesBVOL[j.first] += thisCellData->at(j.second); + } + + // Divide by the number of cells to get the average + for (auto j : iCellParams) cellParams[j.first] /= nCells; + for (auto j : iDerivatives) mpiGrid[dccrgId]->derivatives[j.first] /= nCells; + for (auto j : iDerivativesBVOL) mpiGrid[dccrgId]->derivativesBVOL[j.first] /= nCells; + + } +} + + void getDerivativesFromFsGrid(FsGrid< std::array, 2>& dperbGrid, FsGrid< std::array, 2>& dmomentsGrid, FsGrid< std::array, 2>& bgbfieldGrid, diff --git a/fieldsolver/gridGlue.hpp b/fieldsolver/gridGlue.hpp index a3c41a35c..4df65a607 100644 --- a/fieldsolver/gridGlue.hpp +++ b/fieldsolver/gridGlue.hpp @@ -30,6 +30,19 @@ void getVolumeFieldsFromFsGrid(FsGrid< std::array& mpiGrid, const std::vector& cells); +/*! Copy background B fields and store them into DCCRG + * \param mpiGrid The DCCRG grid carrying fields. + * \param cells List of local cells + * \param BgBGrid Background field fsgrid + * + * This function assumes that proper grid coupling has been set up. + */ +void getBgFieldsAndDerivativesFromFsGrid( + FsGrid< std::array, 2>& BgBGrid, + dccrg::Dccrg& mpiGrid, + const std::vector& cells +); + /*! Copy field derivatives from the appropriate FsGrids and store them back into DCCRG * * This should only be neccessary for debugging. @@ -61,20 +74,6 @@ void getFsGridMaxDt(FsGrid< fsgrids::technical, 2>& technicalGrid, dccrg::Dccrg& mpiGrid, const std::vector& cells); -/*! Transfer background fields into the appropriate FsGrid structure - * This requires separate handling, since the source data is not lying - * continuous in memory on the DCCRG side. - * - * \param mpiGrid The DCCRG grid carrying fieldparam data - * \param cells List of local cells - * \param targetGrid Fieldsolver grid for these quantities - * - * This function assumes that proper grid coupling has been set up. - */ -void feedBgFieldsIntoFsGrid(dccrg::Dccrg& mpiGrid, - const std::vector& cells, - FsGrid< std::array, 2>& BgBGrid); - int getNumberOfCellsOnMaxRefLvl(dccrg::Dccrg& mpiGrid, const std::vector& cells); diff --git a/vlasiator.cpp b/vlasiator.cpp index 0e1b2678a..0b362e9c0 100644 --- a/vlasiator.cpp +++ b/vlasiator.cpp @@ -469,7 +469,7 @@ int main(int argn,char* args[]) { // Transfer initial field configuration into the FsGrids feedFieldDataIntoFsGrid(mpiGrid,cells,CellParams::PERBX,perBGrid); - feedBgFieldsIntoFsGrid(mpiGrid,cells,BgBGrid); + getBgFieldsAndDerivativesFromFsGrid(BgBGrid, mpiGrid, cells); BgBGrid.updateGhostCells(); setupTechnicalFsGrid(mpiGrid, cells, technicalGrid); From 05a32c87f593916230d2c89912db7c18accf5d2c Mon Sep 17 00:00:00 2001 From: ykempf Date: Thu, 11 Apr 2019 09:07:59 +0300 Subject: [PATCH 293/602] Took out background fields from getDerivativesFromFsGrid as they don't change in time. --- fieldsolver/gridGlue.cpp | 21 --------------------- fieldsolver/gridGlue.hpp | 1 - vlasiator.cpp | 4 ++-- 3 files changed, 2 insertions(+), 24 deletions(-) diff --git a/fieldsolver/gridGlue.cpp b/fieldsolver/gridGlue.cpp index 38614ed42..817233275 100644 --- a/fieldsolver/gridGlue.cpp +++ b/fieldsolver/gridGlue.cpp @@ -249,7 +249,6 @@ void getBgFieldsAndDerivativesFromFsGrid( void getDerivativesFromFsGrid(FsGrid< std::array, 2>& dperbGrid, FsGrid< std::array, 2>& dmomentsGrid, - FsGrid< std::array, 2>& bgbfieldGrid, dccrg::Dccrg& mpiGrid, const std::vector& cells) { @@ -257,15 +256,12 @@ void getDerivativesFromFsGrid(FsGrid< std::array, int nCellsOnMaxRefLvl = getNumberOfCellsOnMaxRefLvl(mpiGrid, cells); std::vector< std::array > dperbTransferBuffer(nCellsOnMaxRefLvl); std::vector< std::array > dmomentsTransferBuffer(nCellsOnMaxRefLvl); - std::vector< std::array > bgbfieldTransferBuffer(nCellsOnMaxRefLvl); std::vector< std::array*> dperbTransferBufferPointer; std::vector< std::array*> dmomentsTransferBufferPointer; - std::vector< std::array*> bgbfieldTransferBufferPointer; dperbGrid.setupForTransferOut(nCellsOnMaxRefLvl); dmomentsGrid.setupForTransferOut(nCellsOnMaxRefLvl); - bgbfieldGrid.setupForTransferOut(nCellsOnMaxRefLvl); int k = 0; for (auto dccrgId : cells) { @@ -275,7 +271,6 @@ void getDerivativesFromFsGrid(FsGrid< std::array, // Store a pointer to the first fsgrid cell that maps to each dccrg Id dperbTransferBufferPointer.push_back(&dperbTransferBuffer[k]); dmomentsTransferBufferPointer.push_back(&dmomentsTransferBuffer[k]); - bgbfieldTransferBufferPointer.push_back(&bgbfieldTransferBuffer[k]); for (auto fsgridId : fsgridIds) { @@ -283,19 +278,15 @@ void getDerivativesFromFsGrid(FsGrid< std::array, dperbGrid.transferDataOut(fsgridId, dperbCellData); std::array* dmomentsCellData = &dmomentsTransferBuffer[k]; dmomentsGrid.transferDataOut(fsgridId, dmomentsCellData); - std::array* bgbfieldCellData = &bgbfieldTransferBuffer[k++]; - bgbfieldGrid.transferDataOut(fsgridId, bgbfieldCellData); } } // Do the transfer dperbGrid.finishTransfersOut(); dmomentsGrid.finishTransfersOut(); - bgbfieldGrid.finishTransfersOut(); std::vector> iDmoments; std::vector> iDperb; - std::vector> iBgbfield; iDmoments.reserve(24); iDmoments.push_back(std::make_pair(fieldsolver::drhomdx, fsgrids::dmoments::drhomdx)); iDmoments.push_back(std::make_pair(fieldsolver::drhomdy, fsgrids::dmoments::drhomdy)); @@ -338,14 +329,6 @@ void getDerivativesFromFsGrid(FsGrid< std::array, iDperb.push_back(std::make_pair(fieldsolver::dPERBxdyz, fsgrids::dperb::dPERBxdyz)); iDperb.push_back(std::make_pair(fieldsolver::dPERBydxz, fsgrids::dperb::dPERBydxz)); iDperb.push_back(std::make_pair(fieldsolver::dPERBzdxy, fsgrids::dperb::dPERBzdxy)); - - iBgbfield.reserve(6); - iBgbfield.push_back(std::make_pair(fieldsolver::dBGBxdy, fsgrids::bgbfield::dBGBxdy)); - iBgbfield.push_back(std::make_pair(fieldsolver::dBGBxdz, fsgrids::bgbfield::dBGBxdz)); - iBgbfield.push_back(std::make_pair(fieldsolver::dBGBydx, fsgrids::bgbfield::dBGBydx)); - iBgbfield.push_back(std::make_pair(fieldsolver::dBGBydz, fsgrids::bgbfield::dBGBydz)); - iBgbfield.push_back(std::make_pair(fieldsolver::dBGBzdx, fsgrids::bgbfield::dBGBzdx)); - iBgbfield.push_back(std::make_pair(fieldsolver::dBGBzdy, fsgrids::bgbfield::dBGBzdy)); // Distribute data from the transfer buffers back into the appropriate mpiGrid places #pragma omp parallel for @@ -359,7 +342,6 @@ void getDerivativesFromFsGrid(FsGrid< std::array, for (auto j : iDmoments) mpiGrid[dccrgId]->derivatives[j.first] = 0.0; for (auto j : iDperb ) mpiGrid[dccrgId]->derivatives[j.first] = 0.0; - for (auto j : iBgbfield) mpiGrid[dccrgId]->derivatives[j.first] = 0.0; for(int iCell = 0; iCell < nCells; ++iCell) { // The fsgrid cells that cover the i'th dccrg cell are pointed at by @@ -368,16 +350,13 @@ void getDerivativesFromFsGrid(FsGrid< std::array, std::array* dperb = dperbTransferBufferPointer[i] + iCell; std::array* dmoments = dmomentsTransferBufferPointer[i] + iCell; - std::array* bgbfield = bgbfieldTransferBufferPointer[i] + iCell; for (auto j : iDmoments) mpiGrid[dccrgId]->derivatives[j.first] += dmoments->at(j.second); for (auto j : iDperb ) mpiGrid[dccrgId]->derivatives[j.first] += dperb ->at(j.second); - for (auto j : iBgbfield) mpiGrid[dccrgId]->derivatives[j.first] += bgbfield->at(j.second); } for (auto j : iDmoments) mpiGrid[dccrgId]->derivatives[j.first] /= nCells; for (auto j : iDperb ) mpiGrid[dccrgId]->derivatives[j.first] /= nCells; - for (auto j : iBgbfield) mpiGrid[dccrgId]->derivatives[j.first] /= nCells; } } diff --git a/fieldsolver/gridGlue.hpp b/fieldsolver/gridGlue.hpp index 4df65a607..34fd31c3d 100644 --- a/fieldsolver/gridGlue.hpp +++ b/fieldsolver/gridGlue.hpp @@ -49,7 +49,6 @@ void getBgFieldsAndDerivativesFromFsGrid( */ void getDerivativesFromFsGrid(FsGrid< std::array, 2>& dperbGrid, FsGrid< std::array, 2>& dmomentsGrid, - FsGrid< std::array, 2>& bgbfieldGrid, dccrg::Dccrg& mpiGrid, const std::vector& cells); diff --git a/vlasiator.cpp b/vlasiator.cpp index 0b362e9c0..7bda5889e 100644 --- a/vlasiator.cpp +++ b/vlasiator.cpp @@ -563,7 +563,7 @@ int main(int argn,char* args[]) { getFieldDataFromFsGrid(EGrid,mpiGrid,cells,CellParams::EX); getFieldDataFromFsGrid(EHallGrid,mpiGrid,cells,CellParams::EXHALL_000_100); getFieldDataFromFsGrid(EGradPeGrid,mpiGrid,cells,CellParams::EXGRADPE); - getDerivativesFromFsGrid(dPerBGrid, dMomentsGrid, BgBGrid, mpiGrid, cells); + getDerivativesFromFsGrid(dPerBGrid, dMomentsGrid, mpiGrid, cells); phiprof::stop("fsgrid-coupling-out"); if (myRank == MASTER_RANK) @@ -791,7 +791,7 @@ int main(int argn,char* args[]) { } if (*it == "derivs") { phiprof::start("fsgrid-coupling-out"); - getDerivativesFromFsGrid(dPerBGrid, dMomentsGrid, BgBGrid, mpiGrid, cells); + getDerivativesFromFsGrid(dPerBGrid, dMomentsGrid, mpiGrid, cells); phiprof::stop("fsgrid-coupling-out"); } } From da5704972a4f49151fc8f81fd2f353a665deadc0 Mon Sep 17 00:00:00 2001 From: ykempf Date: Thu, 11 Apr 2019 11:12:13 +0300 Subject: [PATCH 294/602] Reorganised and renamed initializeGrid, to get correct BGB initialisation and to clean up a bit the fsgrids initialisation. --- grid.cpp | 45 +++++++++++++++++++++------ grid.h | 10 ++++-- vlasiator.cpp | 84 ++++++++++++++++++++++++--------------------------- 3 files changed, 82 insertions(+), 57 deletions(-) diff --git a/grid.cpp b/grid.cpp index d53586fe4..060ee94e0 100644 --- a/grid.cpp +++ b/grid.cpp @@ -38,6 +38,7 @@ #include "datareduction/datareducer.h" #include "sysboundary/sysboundary.h" #include "fieldsolver/fs_common.h" +#include "fieldsolver/gridGlue.hpp" #include "projects/project.h" #include "iowrite.h" #include "ioread.h" @@ -81,10 +82,16 @@ void writeVelMesh(dccrg::Dccrg& mpiGrid) ++counter; } -void initializeGrid( +void initializeGrids( int argn, char **argc, dccrg::Dccrg& mpiGrid, + FsGrid< std::array, 2> & perBGrid, + FsGrid< std::array, 2> & perBDt2Grid, + FsGrid< std::array, 2>& BgBGrid, + FsGrid< std::array, 2> & momentsGrid, + FsGrid< std::array, 2> & momentsDt2Grid, + FsGrid< fsgrids::technical, 2>& technicalGrid, SysBoundary& sysBoundaries, Project& project ) { @@ -130,9 +137,11 @@ void initializeGrid( phiprof::stop("Refine spatial cells"); // Init velocity mesh on all cells - initVelocityGridGeometry(mpiGrid); + initVelocityGridGeometry(mpiGrid); initializeStencils(mpiGrid); + const vector& cells = getLocalCells(); + mpiGrid.set_partitioning_option("IMBALANCE_TOL", P::loadBalanceTolerance); phiprof::start("Initial load-balancing"); if (myRank == MASTER_RANK) logFile << "(INIT): Starting initial load balance." << endl << writeVerbose; @@ -161,17 +170,14 @@ void initializeGrid( cerr << "(MAIN) ERROR: System boundary conditions were not set correctly." << endl; exit(1); } - phiprof::stop("Classify cells (sys boundary conditions)"); // Check refined cells do not touch boundary cells phiprof::start("Check boundary refinement"); - if(!sysBoundaries.checkRefinement(mpiGrid)) { cerr << "(MAIN) ERROR: Boundary cells must have identical refinement level " << endl; exit(1); } - phiprof::stop("Check boundary refinement"); if (P::isRestart) { @@ -182,7 +188,6 @@ void initializeGrid( exit(1); } phiprof::stop("Read restart"); - const vector& cells = getLocalCells(); //initial state for sys-boundary cells, will skip those not set to be reapplied at restart phiprof::start("Apply system boundary conditions state"); @@ -201,20 +206,19 @@ void initializeGrid( // -Background field on all cells // -Perturbed fields and ion distribution function in non-sysboundary cells // Each initialization has to be independent to avoid threading problems - const vector& cells = getLocalCells(); // Allow the project to set up data structures for it's setCell calls project.setupBeforeSetCell(cells); + phiprof::start("setCell"); #pragma omp parallel for schedule(dynamic) for (size_t i=0; isysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY) { project.setCell(cell); } - phiprof::stop("setCell"); } + phiprof::stop("setCell"); // Initial state for sys-boundary cells phiprof::stop("Apply initial state"); @@ -255,6 +259,15 @@ void initializeGrid( phiprof::stop("Init moments"); */ } + + phiprof::start("setupTechnicalFsGrid"); + setupTechnicalFsGrid(mpiGrid, cells, technicalGrid); + technicalGrid.updateGhostCells(); + phiprof::stop("setupTechnicalFsGrid"); + + phiprof::start("setProjectBackgroundField"); + project.setProjectBackgroundField(BgBGrid, technicalGrid); + phiprof::stop("setProjectBackgroundField"); // Init mesh data container if (getObjectWrapper().meshData.initialize("SpatialGrid") == false) { @@ -281,7 +294,19 @@ void initializeGrid( calculateInitialVelocityMoments(mpiGrid); phiprof::stop("Init moments"); } - + + phiprof::start("Finish fsgrid setup"); + // Transfer initial field configuration into the FsGrids + feedFieldDataIntoFsGrid(mpiGrid,cells,CellParams::PERBX,perBGrid); + + getBgFieldsAndDerivativesFromFsGrid(BgBGrid, mpiGrid, cells); + BgBGrid.updateGhostCells(); + + // WARNING this means moments and dt2 moments are the same here. + feedMomentsIntoFsGrid(mpiGrid, cells, momentsGrid,false); + feedMomentsIntoFsGrid(mpiGrid, cells, momentsDt2Grid,false); + phiprof::stop("Finish fsgrid setup"); + phiprof::stop("Set initial state"); } diff --git a/grid.h b/grid.h index efe795011..316c4fb3e 100644 --- a/grid.h +++ b/grid.h @@ -30,12 +30,18 @@ #include /*! - \brief Initialize parallel grid + \brief Initialize DCCRG and fsgrids */ -void initializeGrid( +void initializeGrids( int argn, char **argc, dccrg::Dccrg& mpiGrid, + FsGrid< std::array, 2> & perBGrid, + FsGrid< std::array, 2> & perBDt2Grid, + FsGrid< std::array, 2>& BgBGrid, + FsGrid< std::array, 2> & momentsGrid, + FsGrid< std::array, 2> & momentsDt2Grid, + FsGrid< fsgrids::technical, 2>& technicalGrid, SysBoundary& sysBoundaries, Project& project ); diff --git a/vlasiator.cpp b/vlasiator.cpp index 7bda5889e..6a69bb635 100644 --- a/vlasiator.cpp +++ b/vlasiator.cpp @@ -418,70 +418,64 @@ int main(int argn,char* args[]) { = {P::xmin, P::ymin, P::zmin}; phiprof::stop("Init fieldsolver grids"); - // Initialize grid. After initializeGrid local cells have dist - // functions, and B fields set. Cells have also been classified for - // the various sys boundary conditions. All remote cells have been - // created. All spatial date computed this far is up to date for - // FULL_NEIGHBORHOOD. Block lists up to date for - // VLASOV_SOLVER_NEIGHBORHOOD (but dist function has not been communicated) - phiprof::start("Init grid"); - - phiprof::start("setCellBackgroundField"); - project->setProjectBackgroundField(BgBGrid, technicalGrid); - phiprof::stop("setCellBackgroundField"); - - //dccrg::Dccrg mpiGrid; - initializeGrid(argn,args,mpiGrid,sysBoundaries,*project); - isSysBoundaryCondDynamic = sysBoundaries.isDynamic(); - - phiprof::stop("Init grid"); - - // Initialize data reduction operators. This should be done elsewhere in order to initialize - // user-defined operators: - phiprof::start("Init DROs"); - DataReducer outputReducer, diagnosticReducer; - initializeDataReducers(&outputReducer, &diagnosticReducer); - phiprof::stop("Init DROs"); - phiprof::start("Initial fsgrid coupling"); const std::vector& cells = getLocalCells(); // Couple FSGrids to mpiGrid. Note that the coupling information is shared // between them. technicalGrid.setupForGridCoupling(cells.size()); - + // Each dccrg cell may have to communicate with multiple fsgrid cells, if they are on a lower refinement level. // Calculate the corresponding fsgrid ids for each dccrg cell and set coupling for each fsgrid id. for(auto& dccrgId : cells) { const auto fsgridIds = mapDccrgIdToFsGridGlobalID(mpiGrid, dccrgId); - + for (auto fsgridId : fsgridIds) { - technicalGrid. setGridCoupling(fsgridId, myRank); + technicalGrid.setGridCoupling(fsgridId, myRank); } } - technicalGrid. finishGridCoupling(); - + technicalGrid.finishGridCoupling(); phiprof::stop("Initial fsgrid coupling"); + + // Initialize grid. After initializeGrid local cells have dist + // functions, and B fields set. Cells have also been classified for + // the various sys boundary conditions. All remote cells have been + // created. All spatial date computed this far is up to date for + // FULL_NEIGHBORHOOD. Block lists up to date for + // VLASOV_SOLVER_NEIGHBORHOOD (but dist function has not been communicated) + phiprof::start("Init grids"); + initializeGrids( + argn, + args, + mpiGrid, + perBGrid, + perBDt2Grid, + BgBGrid, + momentsGrid, + momentsDt2Grid, + technicalGrid, + sysBoundaries, + *project + ); + isSysBoundaryCondDynamic = sysBoundaries.isDynamic(); + phiprof::stop("Init grids"); + + // Initialize data reduction operators. This should be done elsewhere in order to initialize + // user-defined operators: + phiprof::start("Init DROs"); + DataReducer outputReducer, diagnosticReducer; + initializeDataReducers(&outputReducer, &diagnosticReducer); + phiprof::stop("Init DROs"); + - // Transfer initial field configuration into the FsGrids - feedFieldDataIntoFsGrid(mpiGrid,cells,CellParams::PERBX,perBGrid); - - getBgFieldsAndDerivativesFromFsGrid(BgBGrid, mpiGrid, cells); - BgBGrid.updateGhostCells(); - - setupTechnicalFsGrid(mpiGrid, cells, technicalGrid); - technicalGrid.updateGhostCells(); - - // if(myRank == MASTER_RANK) { - // technicalGrid.debugOutput([](const fsgrids::technical& a)->void{cerr << a.sysBoundaryLayer << " ";}); - // } - // WARNING this means moments and dt2 moments are the same here. - feedMomentsIntoFsGrid(mpiGrid, cells, momentsGrid,false); - feedMomentsIntoFsGrid(mpiGrid, cells, momentsDt2Grid,false); + + + + phiprof::start("Init field propagator"); if ( From 48f9a1c8331f16f8ec649d721c9aab03bff920d7 Mon Sep 17 00:00:00 2001 From: ykempf Date: Thu, 11 Apr 2019 12:15:00 +0300 Subject: [PATCH 295/602] Reordered things in order to get a correct initialisatoin without deadlocks and the like. --- fieldsolver/gridGlue.cpp | 10 ++++---- grid.cpp | 55 +++++++++++++++++++++++++++------------- vlasiator.cpp | 32 +++-------------------- 3 files changed, 45 insertions(+), 52 deletions(-) diff --git a/fieldsolver/gridGlue.cpp b/fieldsolver/gridGlue.cpp index 817233275..a6488527f 100644 --- a/fieldsolver/gridGlue.cpp +++ b/fieldsolver/gridGlue.cpp @@ -399,7 +399,7 @@ void setupTechnicalFsGrid(dccrg::Dccrg& m int nCells = getNumberOfCellsOnMaxRefLvl(mpiGrid, cells); technicalGrid.setupForTransferIn(nCells); - + // Setup transfer buffers std::vector< fsgrids::technical > transferBuffer(cells.size()); @@ -413,7 +413,7 @@ void setupTechnicalFsGrid(dccrg::Dccrg& m // thisCellData->sysBoundaryLayer = mpiGrid[cells[i]]->sysBoundaryLayer; thisCellData->maxFsDt = std::numeric_limits::max(); } - + for(uint i = 0; i < cells.size(); ++i) { const auto fsgridIds = mapDccrgIdToFsGridGlobalID(mpiGrid, cells[i]); @@ -426,9 +426,9 @@ void setupTechnicalFsGrid(dccrg::Dccrg& m technicalGrid.transferDataIn(fsgridId,&transferBuffer[i]); } } - + technicalGrid.finishTransfersIn(); - + auto localSize = technicalGrid.getLocalSize(); // Add layer calculation here. Include diagonals +-1. @@ -442,7 +442,7 @@ void setupTechnicalFsGrid(dccrg::Dccrg& m } } } - + // In dccrg initialization the max number of boundary layers is set to 3. const int MAX_NUMBER_OF_BOUNDARY_LAYERS = 3 * (mpiGrid.get_maximum_refinement_level() + 1); diff --git a/grid.cpp b/grid.cpp index 060ee94e0..51b1fbf50 100644 --- a/grid.cpp +++ b/grid.cpp @@ -97,7 +97,7 @@ void initializeGrids( ) { int myRank; MPI_Comm_rank(MPI_COMM_WORLD,&myRank); - + // Init Zoltan: float zoltanVersion; if (Zoltan_Initialize(argn,argc,&zoltanVersion) != ZOLTAN_OK) { @@ -135,19 +135,18 @@ void initializeGrids( recalculateLocalCellsCache(); } phiprof::stop("Refine spatial cells"); - + // Init velocity mesh on all cells initVelocityGridGeometry(mpiGrid); initializeStencils(mpiGrid); - const vector& cells = getLocalCells(); - mpiGrid.set_partitioning_option("IMBALANCE_TOL", P::loadBalanceTolerance); phiprof::start("Initial load-balancing"); if (myRank == MASTER_RANK) logFile << "(INIT): Starting initial load balance." << endl << writeVerbose; mpiGrid.balance_load(); recalculateLocalCellsCache(); setFaceNeighborRanks( mpiGrid ); + const vector& cells = getLocalCells(); phiprof::stop("Initial load-balancing"); if (myRank == MASTER_RANK) logFile << "(INIT): Set initial state." << endl << writeVerbose; @@ -156,7 +155,7 @@ void initializeGrids( phiprof::start("Set spatial cell coordinates"); initSpatialCellCoordinates(mpiGrid); phiprof::stop("Set spatial cell coordinates"); - + phiprof::start("Initialize system boundary conditions"); if(sysBoundaries.initSysBoundaries(project, P::t_min) == false) { if (myRank == MASTER_RANK) cerr << "Error in initialising the system boundaries." << endl; @@ -219,7 +218,7 @@ void initializeGrids( } } phiprof::stop("setCell"); - + // Initial state for sys-boundary cells phiprof::stop("Apply initial state"); phiprof::start("Apply system boundary conditions state"); @@ -228,7 +227,7 @@ void initializeGrids( exit(1); } phiprof::stop("Apply system boundary conditions state"); - + for (size_t i=0; iparameters[CellParams::LBWEIGHTCOUNTER] = 0; } @@ -260,15 +259,6 @@ void initializeGrids( */ } - phiprof::start("setupTechnicalFsGrid"); - setupTechnicalFsGrid(mpiGrid, cells, technicalGrid); - technicalGrid.updateGhostCells(); - phiprof::stop("setupTechnicalFsGrid"); - - phiprof::start("setProjectBackgroundField"); - project.setProjectBackgroundField(BgBGrid, technicalGrid); - phiprof::stop("setProjectBackgroundField"); - // Init mesh data container if (getObjectWrapper().meshData.initialize("SpatialGrid") == false) { cerr << "(Grid) Failed to initialize mesh data container in " << __FILE__ << ":" << __LINE__ << endl; @@ -277,14 +267,14 @@ void initializeGrids( //Balance load before we transfer all data below balanceLoad(mpiGrid, sysBoundaries); - + phiprof::initializeTimer("Fetch Neighbour data","MPI"); phiprof::start("Fetch Neighbour data"); // update complete cell spatial data for full stencil ( SpatialCell::set_mpi_transfer_type(Transfer::ALL_SPATIAL_DATA); mpiGrid.update_copies_of_remote_neighbors(FULL_NEIGHBORHOOD_ID); phiprof::stop("Fetch Neighbour data"); - + if (P::isRestart == false) { // Apply boundary conditions so that we get correct initial moments sysBoundaries.applySysBoundaryVlasovConditions(mpiGrid,Parameters::t); @@ -295,6 +285,35 @@ void initializeGrids( phiprof::stop("Init moments"); } + phiprof::start("Initial fsgrid coupling"); + // Couple FSGrids to mpiGrid. Note that the coupling information is shared + // between them. + technicalGrid.setupForGridCoupling(cells.size()); + + // Each dccrg cell may have to communicate with multiple fsgrid cells, if they are on a lower refinement level. + // Calculate the corresponding fsgrid ids for each dccrg cell and set coupling for each fsgrid id. + for(auto& dccrgId : cells) { + const auto fsgridIds = mapDccrgIdToFsGridGlobalID(mpiGrid, dccrgId); + + for (auto fsgridId : fsgridIds) { + + technicalGrid.setGridCoupling(fsgridId, myRank); + } + } + + technicalGrid.finishGridCoupling(); + phiprof::stop("Initial fsgrid coupling"); + + phiprof::start("setupTechnicalFsGrid"); + setupTechnicalFsGrid(mpiGrid, cells, technicalGrid); + + technicalGrid.updateGhostCells(); + phiprof::stop("setupTechnicalFsGrid"); + + phiprof::start("setProjectBackgroundField"); + project.setProjectBackgroundField(BgBGrid, technicalGrid); + phiprof::stop("setProjectBackgroundField"); + phiprof::start("Finish fsgrid setup"); // Transfer initial field configuration into the FsGrids feedFieldDataIntoFsGrid(mpiGrid,cells,CellParams::PERBX,perBGrid); diff --git a/vlasiator.cpp b/vlasiator.cpp index 6a69bb635..0836ab101 100644 --- a/vlasiator.cpp +++ b/vlasiator.cpp @@ -418,28 +418,6 @@ int main(int argn,char* args[]) { = {P::xmin, P::ymin, P::zmin}; phiprof::stop("Init fieldsolver grids"); - phiprof::start("Initial fsgrid coupling"); - const std::vector& cells = getLocalCells(); - - // Couple FSGrids to mpiGrid. Note that the coupling information is shared - // between them. - technicalGrid.setupForGridCoupling(cells.size()); - - - // Each dccrg cell may have to communicate with multiple fsgrid cells, if they are on a lower refinement level. - // Calculate the corresponding fsgrid ids for each dccrg cell and set coupling for each fsgrid id. - for(auto& dccrgId : cells) { - const auto fsgridIds = mapDccrgIdToFsGridGlobalID(mpiGrid, dccrgId); - - for (auto fsgridId : fsgridIds) { - - technicalGrid.setGridCoupling(fsgridId, myRank); - } - } - - technicalGrid.finishGridCoupling(); - phiprof::stop("Initial fsgrid coupling"); - // Initialize grid. After initializeGrid local cells have dist // functions, and B fields set. Cells have also been classified for // the various sys boundary conditions. All remote cells have been @@ -461,6 +439,9 @@ int main(int argn,char* args[]) { *project ); isSysBoundaryCondDynamic = sysBoundaries.isDynamic(); + + const std::vector& cells = getLocalCells(); + phiprof::stop("Init grids"); // Initialize data reduction operators. This should be done elsewhere in order to initialize @@ -470,13 +451,6 @@ int main(int argn,char* args[]) { initializeDataReducers(&outputReducer, &diagnosticReducer); phiprof::stop("Init DROs"); - - - - - - - phiprof::start("Init field propagator"); if ( initializeFieldPropagator( From dd1fb4b46c863ad0ed5b50fae0cb4d4db2198566 Mon Sep 17 00:00:00 2001 From: ykempf Date: Thu, 11 Apr 2019 13:34:13 +0300 Subject: [PATCH 296/602] Optimised BGB setting in Magnetosphere and renamed setPorjectBackgroundField to setProjectBField. --- grid.cpp | 6 +- projects/Diffusion/Diffusion.cpp | 2 +- projects/Diffusion/Diffusion.h | 2 +- projects/Dispersion/Dispersion.cpp | 2 +- projects/Dispersion/Dispersion.h | 2 +- projects/Distributions/Distributions.cpp | 2 +- projects/Distributions/Distributions.h | 2 +- projects/ElectricSail/electric_sail.cpp | 4 +- projects/ElectricSail/electric_sail.h | 2 +- projects/Flowthrough/Flowthrough.cpp | 2 +- projects/Flowthrough/Flowthrough.h | 2 +- projects/Fluctuations/Fluctuations.cpp | 2 +- projects/Fluctuations/Fluctuations.h | 2 +- projects/Harris/Harris.cpp | 2 +- projects/Harris/Harris.h | 2 +- projects/IPShock/IPShock.cpp | 2 +- projects/IPShock/IPShock.h | 2 +- projects/Larmor/Larmor.cpp | 2 +- projects/Larmor/Larmor.h | 2 +- projects/Magnetosphere/Magnetosphere.cpp | 69 ++++++++++--------- projects/Magnetosphere/Magnetosphere.h | 2 +- projects/MultiPeak/MultiPeak.cpp | 2 +- projects/MultiPeak/MultiPeak.h | 2 +- projects/Poisson/poisson_test.cpp | 2 +- projects/Poisson/poisson_test.h | 2 +- projects/Shocktest/Shocktest.cpp | 2 +- projects/Shocktest/Shocktest.h | 2 +- projects/Template/Template.cpp | 2 +- projects/Template/Template.h | 2 +- projects/VelocityBox/VelocityBox.cpp | 2 +- projects/VelocityBox/VelocityBox.h | 2 +- projects/project.cpp | 2 +- projects/project.h | 2 +- projects/testAmr/testAmr.cpp | 2 +- projects/testAmr/testAmr.h | 2 +- projects/testHall/testHall.cpp | 2 +- projects/testHall/testHall.h | 2 +- projects/test_fp/test_fp.cpp | 2 +- projects/test_fp/test_fp.h | 2 +- projects/test_trans/test_trans.cpp | 2 +- projects/test_trans/test_trans.h | 2 +- .../verificationLarmor/verificationLarmor.cpp | 2 +- .../verificationLarmor/verificationLarmor.h | 2 +- 43 files changed, 81 insertions(+), 78 deletions(-) diff --git a/grid.cpp b/grid.cpp index 51b1fbf50..01aa76af5 100644 --- a/grid.cpp +++ b/grid.cpp @@ -310,9 +310,9 @@ void initializeGrids( technicalGrid.updateGhostCells(); phiprof::stop("setupTechnicalFsGrid"); - phiprof::start("setProjectBackgroundField"); - project.setProjectBackgroundField(BgBGrid, technicalGrid); - phiprof::stop("setProjectBackgroundField"); + phiprof::start("setProjectBField"); + project.setProjectBField(BgBGrid, technicalGrid); + phiprof::stop("setProjectBField"); phiprof::start("Finish fsgrid setup"); // Transfer initial field configuration into the FsGrids diff --git a/projects/Diffusion/Diffusion.cpp b/projects/Diffusion/Diffusion.cpp index 8e9e447fd..09b218d28 100644 --- a/projects/Diffusion/Diffusion.cpp +++ b/projects/Diffusion/Diffusion.cpp @@ -133,7 +133,7 @@ namespace projects { } - void Diffusion::setProjectBackgroundField( + void Diffusion::setProjectBField( FsGrid< std::array, 2>& BgBGrid, FsGrid< fsgrids::technical, 2>& technicalGrid ) { diff --git a/projects/Diffusion/Diffusion.h b/projects/Diffusion/Diffusion.h index c1e289d4b..cf618752a 100644 --- a/projects/Diffusion/Diffusion.h +++ b/projects/Diffusion/Diffusion.h @@ -46,7 +46,7 @@ namespace projects { static void addParameters(void); virtual void getParameters(void); /*! set background field, should set it for all cells */ - virtual void setProjectBackgroundField( + virtual void setProjectBField( FsGrid< std::array, 2>& BgBGrid, FsGrid< fsgrids::technical, 2>& technicalGrid ); diff --git a/projects/Dispersion/Dispersion.cpp b/projects/Dispersion/Dispersion.cpp index 4af7db0fa..ee67c19f4 100644 --- a/projects/Dispersion/Dispersion.cpp +++ b/projects/Dispersion/Dispersion.cpp @@ -254,7 +254,7 @@ namespace projects { } - void Dispersion::setProjectBackgroundField( + void Dispersion::setProjectBField( FsGrid< std::array, 2>& BgBGrid, FsGrid< fsgrids::technical, 2>& technicalGrid ) { diff --git a/projects/Dispersion/Dispersion.h b/projects/Dispersion/Dispersion.h index 77ae7987a..34afe8d5b 100644 --- a/projects/Dispersion/Dispersion.h +++ b/projects/Dispersion/Dispersion.h @@ -51,7 +51,7 @@ namespace projects { virtual bool initialize(void); static void addParameters(void); virtual void getParameters(void); - virtual void setProjectBackgroundField( + virtual void setProjectBField( FsGrid< std::array, 2>& BgBGrid, FsGrid< fsgrids::technical, 2>& technicalGrid ); diff --git a/projects/Distributions/Distributions.cpp b/projects/Distributions/Distributions.cpp index 4c69c4d56..d2adb641f 100644 --- a/projects/Distributions/Distributions.cpp +++ b/projects/Distributions/Distributions.cpp @@ -167,7 +167,7 @@ namespace projects { } } - void Distributions::setProjectBackgroundField( + void Distributions::setProjectBField( FsGrid< std::array, 2>& BgBGrid, FsGrid< fsgrids::technical, 2>& technicalGrid ) { diff --git a/projects/Distributions/Distributions.h b/projects/Distributions/Distributions.h index 9cf799db6..12eef5cf9 100644 --- a/projects/Distributions/Distributions.h +++ b/projects/Distributions/Distributions.h @@ -36,7 +36,7 @@ namespace projects { virtual bool initialize(void); static void addParameters(void); virtual void getParameters(void); - virtual void setProjectBackgroundField( + virtual void setProjectBField( FsGrid< std::array, 2>& BgBGrid, FsGrid< fsgrids::technical, 2>& technicalGrid ); diff --git a/projects/ElectricSail/electric_sail.cpp b/projects/ElectricSail/electric_sail.cpp index 3a96c97c3..7bd0ed3d1 100644 --- a/projects/ElectricSail/electric_sail.cpp +++ b/projects/ElectricSail/electric_sail.cpp @@ -211,12 +211,12 @@ namespace projects { * * NOTE: This is only called in grid.cpp:initializeGrid. */ - void ElectricSail::setProjectBackgroundField( + void ElectricSail::setProjectBField( FsGrid< std::array, 2>& BgBGrid, FsGrid< fsgrids::technical, 2>& technicalGrid ) { #warning this is not supported at the moment, needs to be ported to fsgrid - std::cerr << "ERROR: ElectricSail::setProjectBackgroundField is not ported to fsgrid! Aborting." << std::endl; + std::cerr << "ERROR: ElectricSail::setProjectBField is not ported to fsgrid! Aborting." << std::endl; abort(); // Real X = cell->parameters[CellParams::XCRD]; // Real Y = cell->parameters[CellParams::YCRD]; diff --git a/projects/ElectricSail/electric_sail.h b/projects/ElectricSail/electric_sail.h index 3ea7020f7..f98e50603 100644 --- a/projects/ElectricSail/electric_sail.h +++ b/projects/ElectricSail/electric_sail.h @@ -52,7 +52,7 @@ namespace projects { Real getCorrectNumberDensity(spatial_cell::SpatialCell* cell,const uint popID) const; virtual void getParameters(); virtual bool initialize(); - virtual void setProjectBackgroundField( + virtual void setProjectBField( FsGrid< std::array, 2>& BgBGrid, FsGrid< fsgrids::technical, 2>& technicalGrid ); diff --git a/projects/Flowthrough/Flowthrough.cpp b/projects/Flowthrough/Flowthrough.cpp index 4ee88cf7a..ad813622e 100644 --- a/projects/Flowthrough/Flowthrough.cpp +++ b/projects/Flowthrough/Flowthrough.cpp @@ -209,7 +209,7 @@ namespace projects { cellParams[CellParams::PERBZ] = 0.; } - void Flowthrough::setProjectBackgroundField( + void Flowthrough::setProjectBField( FsGrid< std::array, 2>& BgBGrid, FsGrid< fsgrids::technical, 2>& technicalGrid ) { diff --git a/projects/Flowthrough/Flowthrough.h b/projects/Flowthrough/Flowthrough.h index 39f64c2df..660915b64 100644 --- a/projects/Flowthrough/Flowthrough.h +++ b/projects/Flowthrough/Flowthrough.h @@ -47,7 +47,7 @@ namespace projects { virtual bool initialize(void); static void addParameters(void); virtual void getParameters(void); - virtual void setProjectBackgroundField( + virtual void setProjectBField( FsGrid< std::array, 2>& BgBGrid, FsGrid< fsgrids::technical, 2>& technicalGrid ); diff --git a/projects/Fluctuations/Fluctuations.cpp b/projects/Fluctuations/Fluctuations.cpp index c8d9bda6c..d5a0efb22 100644 --- a/projects/Fluctuations/Fluctuations.cpp +++ b/projects/Fluctuations/Fluctuations.cpp @@ -178,7 +178,7 @@ namespace projects { cellParams[CellParams::PERBZ] = this->magZPertAbsAmp * (0.5 - getRandomNumber(cell)); } - void Fluctuations::setProjectBackgroundField( + void Fluctuations::setProjectBField( FsGrid< std::array, 2>& BgBGrid, FsGrid< fsgrids::technical, 2>& technicalGrid ) { diff --git a/projects/Fluctuations/Fluctuations.h b/projects/Fluctuations/Fluctuations.h index 31eb6db9d..b1a0e0ea7 100644 --- a/projects/Fluctuations/Fluctuations.h +++ b/projects/Fluctuations/Fluctuations.h @@ -48,7 +48,7 @@ namespace projects { virtual bool initialize(void); static void addParameters(void); virtual void getParameters(void); - virtual void setProjectBackgroundField( + virtual void setProjectBField( FsGrid< std::array, 2>& BgBGrid, FsGrid< fsgrids::technical, 2>& technicalGrid ); diff --git a/projects/Harris/Harris.cpp b/projects/Harris/Harris.cpp index ed0617708..590b724cd 100644 --- a/projects/Harris/Harris.cpp +++ b/projects/Harris/Harris.cpp @@ -166,7 +166,7 @@ namespace projects { return V0; } - void Harris::setProjectBackgroundField( + void Harris::setProjectBField( FsGrid< std::array, 2>& BgBGrid, FsGrid< fsgrids::technical, 2>& technicalGrid ) { diff --git a/projects/Harris/Harris.h b/projects/Harris/Harris.h index 1d430e890..6e122fe5d 100644 --- a/projects/Harris/Harris.h +++ b/projects/Harris/Harris.h @@ -44,7 +44,7 @@ namespace projects { static void addParameters(void); virtual void getParameters(void); virtual void calcCellParameters(spatial_cell::SpatialCell* cell,creal& t); - virtual void setProjectBackgroundField( + virtual void setProjectBField( FsGrid< std::array, 2>& BgBGrid, FsGrid< fsgrids::technical, 2>& technicalGrid ); diff --git a/projects/IPShock/IPShock.cpp b/projects/IPShock/IPShock.cpp index 677e132c2..b74ec22c7 100644 --- a/projects/IPShock/IPShock.cpp +++ b/projects/IPShock/IPShock.cpp @@ -491,7 +491,7 @@ namespace projects { return a; } - void IPShock::setProjectBackgroundField( + void IPShock::setProjectBField( FsGrid< std::array, 2>& BgBGrid, FsGrid< fsgrids::technical, 2>& technicalGrid ) { diff --git a/projects/IPShock/IPShock.h b/projects/IPShock/IPShock.h index de561b5cf..9b43cef35 100644 --- a/projects/IPShock/IPShock.h +++ b/projects/IPShock/IPShock.h @@ -62,7 +62,7 @@ namespace projects { static void addParameters(void); virtual void getParameters(void); - virtual void setProjectBackgroundField( + virtual void setProjectBField( FsGrid< std::array, 2>& BgBGrid, FsGrid< fsgrids::technical, 2>& technicalGrid ); diff --git a/projects/Larmor/Larmor.cpp b/projects/Larmor/Larmor.cpp index 4b8a85024..e39948cd4 100644 --- a/projects/Larmor/Larmor.cpp +++ b/projects/Larmor/Larmor.cpp @@ -155,7 +155,7 @@ namespace projects { cellParams[CellParams::PERBZ ] = 0.0; } - void Larmor::setProjectBackgroundField( + void Larmor::setProjectBField( FsGrid< std::array, 2>& BgBGrid, FsGrid< fsgrids::technical, 2>& technicalGrid ) { diff --git a/projects/Larmor/Larmor.h b/projects/Larmor/Larmor.h index b4f5d839c..766de2960 100644 --- a/projects/Larmor/Larmor.h +++ b/projects/Larmor/Larmor.h @@ -38,7 +38,7 @@ namespace projects { virtual bool initialize(void); static void addParameters(void); virtual void getParameters(void); - virtual void setProjectBackgroundField( + virtual void setProjectBField( FsGrid< std::array, 2>& BgBGrid, FsGrid< fsgrids::technical, 2>& technicalGrid ); diff --git a/projects/Magnetosphere/Magnetosphere.cpp b/projects/Magnetosphere/Magnetosphere.cpp index 0a9e5d2c7..dae24f818 100644 --- a/projects/Magnetosphere/Magnetosphere.cpp +++ b/projects/Magnetosphere/Magnetosphere.cpp @@ -257,7 +257,7 @@ namespace projects { } /* set 0-centered dipole */ - void Magnetosphere::setProjectBackgroundField( + void Magnetosphere::setProjectBField( FsGrid< std::array, 2>& BgBGrid, FsGrid< fsgrids::technical, 2>& technicalGrid ) { @@ -307,16 +307,17 @@ namespace projects { for (int x = 0; x < localSize[0]; ++x) { for (int y = 0; y < localSize[1]; ++y) { for (int z = 0; z < localSize[2]; ++z) { - BgBGrid.get(x, y, z)->at(fsgrids::bgbfield::BGBX)=0; - BgBGrid.get(x, y, z)->at(fsgrids::bgbfield::BGBXVOL)=0.0; - BgBGrid.get(x, y, z)->at(fsgrids::bgbfield::dBGBydx)=0.0; - BgBGrid.get(x, y, z)->at(fsgrids::bgbfield::dBGBzdx)=0.0; - BgBGrid.get(x, y, z)->at(fsgrids::bgbfield::dBGBxdy)=0.0; - BgBGrid.get(x, y, z)->at(fsgrids::bgbfield::dBGBxdz)=0.0; - BgBGrid.get(x, y, z)->at(fsgrids::bgbfield::dBGBYVOLdx)=0.0; - BgBGrid.get(x, y, z)->at(fsgrids::bgbfield::dBGBZVOLdx)=0.0; - BgBGrid.get(x, y, z)->at(fsgrids::bgbfield::dBGBXVOLdy)=0.0; - BgBGrid.get(x, y, z)->at(fsgrids::bgbfield::dBGBXVOLdz)=0.0; + std::array* cell = BgBGrid.get(x, y, z); + cell->at(fsgrids::bgbfield::BGBX)=0; + cell->at(fsgrids::bgbfield::BGBXVOL)=0.0; + cell->at(fsgrids::bgbfield::dBGBydx)=0.0; + cell->at(fsgrids::bgbfield::dBGBzdx)=0.0; + cell->at(fsgrids::bgbfield::dBGBxdy)=0.0; + cell->at(fsgrids::bgbfield::dBGBxdz)=0.0; + cell->at(fsgrids::bgbfield::dBGBYVOLdx)=0.0; + cell->at(fsgrids::bgbfield::dBGBZVOLdx)=0.0; + cell->at(fsgrids::bgbfield::dBGBXVOLdy)=0.0; + cell->at(fsgrids::bgbfield::dBGBXVOLdz)=0.0; } } } @@ -327,16 +328,17 @@ namespace projects { for (int x = 0; x < localSize[0]; ++x) { for (int y = 0; y < localSize[1]; ++y) { for (int z = 0; z < localSize[2]; ++z) { - BgBGrid.get(x, y, z)->at(fsgrids::bgbfield::BGBY)=0.0; - BgBGrid.get(x, y, z)->at(fsgrids::bgbfield::BGBYVOL)=0.0; - BgBGrid.get(x, y, z)->at(fsgrids::bgbfield::dBGBxdy)=0.0; - BgBGrid.get(x, y, z)->at(fsgrids::bgbfield::dBGBzdy)=0.0; - BgBGrid.get(x, y, z)->at(fsgrids::bgbfield::dBGBydx)=0.0; - BgBGrid.get(x, y, z)->at(fsgrids::bgbfield::dBGBydz)=0.0; - BgBGrid.get(x, y, z)->at(fsgrids::bgbfield::dBGBXVOLdy)=0.0; - BgBGrid.get(x, y, z)->at(fsgrids::bgbfield::dBGBZVOLdy)=0.0; - BgBGrid.get(x, y, z)->at(fsgrids::bgbfield::dBGBYVOLdx)=0.0; - BgBGrid.get(x, y, z)->at(fsgrids::bgbfield::dBGBYVOLdz)=0.0; + std::array* cell = BgBGrid.get(x, y, z); + cell->at(fsgrids::bgbfield::BGBY)=0.0; + cell->at(fsgrids::bgbfield::BGBYVOL)=0.0; + cell->at(fsgrids::bgbfield::dBGBxdy)=0.0; + cell->at(fsgrids::bgbfield::dBGBzdy)=0.0; + cell->at(fsgrids::bgbfield::dBGBydx)=0.0; + cell->at(fsgrids::bgbfield::dBGBydz)=0.0; + cell->at(fsgrids::bgbfield::dBGBXVOLdy)=0.0; + cell->at(fsgrids::bgbfield::dBGBZVOLdy)=0.0; + cell->at(fsgrids::bgbfield::dBGBYVOLdx)=0.0; + cell->at(fsgrids::bgbfield::dBGBYVOLdz)=0.0; } } } @@ -346,18 +348,19 @@ namespace projects { for (int x = 0; x < localSize[0]; ++x) { for (int y = 0; y < localSize[1]; ++y) { for (int z = 0; z < localSize[2]; ++z) { - BgBGrid.get(x, y, z)->at(fsgrids::bgbfield::BGBX)=0; - BgBGrid.get(x, y, z)->at(fsgrids::bgbfield::BGBY)=0; - BgBGrid.get(x, y, z)->at(fsgrids::bgbfield::BGBYVOL)=0.0; - BgBGrid.get(x, y, z)->at(fsgrids::bgbfield::BGBXVOL)=0.0; - BgBGrid.get(x, y, z)->at(fsgrids::bgbfield::dBGBxdy)=0.0; - BgBGrid.get(x, y, z)->at(fsgrids::bgbfield::dBGBxdz)=0.0; - BgBGrid.get(x, y, z)->at(fsgrids::bgbfield::dBGBydx)=0.0; - BgBGrid.get(x, y, z)->at(fsgrids::bgbfield::dBGBydz)=0.0; - BgBGrid.get(x, y, z)->at(fsgrids::bgbfield::dBGBXVOLdy)=0.0; - BgBGrid.get(x, y, z)->at(fsgrids::bgbfield::dBGBXVOLdz)=0.0; - BgBGrid.get(x, y, z)->at(fsgrids::bgbfield::dBGBYVOLdx)=0.0; - BgBGrid.get(x, y, z)->at(fsgrids::bgbfield::dBGBYVOLdz)=0.0; + std::array* cell = BgBGrid.get(x, y, z); + cell->at(fsgrids::bgbfield::BGBX)=0; + cell->at(fsgrids::bgbfield::BGBY)=0; + cell->at(fsgrids::bgbfield::BGBYVOL)=0.0; + cell->at(fsgrids::bgbfield::BGBXVOL)=0.0; + cell->at(fsgrids::bgbfield::dBGBxdy)=0.0; + cell->at(fsgrids::bgbfield::dBGBxdz)=0.0; + cell->at(fsgrids::bgbfield::dBGBydx)=0.0; + cell->at(fsgrids::bgbfield::dBGBydz)=0.0; + cell->at(fsgrids::bgbfield::dBGBXVOLdy)=0.0; + cell->at(fsgrids::bgbfield::dBGBXVOLdz)=0.0; + cell->at(fsgrids::bgbfield::dBGBYVOLdx)=0.0; + cell->at(fsgrids::bgbfield::dBGBYVOLdz)=0.0; } } } diff --git a/projects/Magnetosphere/Magnetosphere.h b/projects/Magnetosphere/Magnetosphere.h index 8926291de..3b91ab062 100644 --- a/projects/Magnetosphere/Magnetosphere.h +++ b/projects/Magnetosphere/Magnetosphere.h @@ -47,7 +47,7 @@ namespace projects { virtual bool initialize(void); static void addParameters(void); virtual void getParameters(void); - virtual void setProjectBackgroundField( + virtual void setProjectBField( FsGrid< std::array, 2>& BgBGrid, FsGrid< fsgrids::technical, 2>& technicalGrid ); diff --git a/projects/MultiPeak/MultiPeak.cpp b/projects/MultiPeak/MultiPeak.cpp index f9a5ad8d3..1d7c540a2 100644 --- a/projects/MultiPeak/MultiPeak.cpp +++ b/projects/MultiPeak/MultiPeak.cpp @@ -229,7 +229,7 @@ namespace projects { rhoRnd = 0.5 - getRandomNumber(cell); } - void MultiPeak::setProjectBackgroundField( + void MultiPeak::setProjectBField( FsGrid< std::array, 2>& BgBGrid, FsGrid< fsgrids::technical, 2>& technicalGrid ) { diff --git a/projects/MultiPeak/MultiPeak.h b/projects/MultiPeak/MultiPeak.h index eec1626e9..26491f86c 100644 --- a/projects/MultiPeak/MultiPeak.h +++ b/projects/MultiPeak/MultiPeak.h @@ -61,7 +61,7 @@ namespace projects { virtual bool initialize(void); static void addParameters(void); virtual void getParameters(void); - virtual void setProjectBackgroundField( + virtual void setProjectBField( FsGrid< std::array, 2>& BgBGrid, FsGrid< fsgrids::technical, 2>& technicalGrid ); diff --git a/projects/Poisson/poisson_test.cpp b/projects/Poisson/poisson_test.cpp index 81e912a36..7b48e726c 100644 --- a/projects/Poisson/poisson_test.cpp +++ b/projects/Poisson/poisson_test.cpp @@ -72,7 +72,7 @@ namespace projects { return true; } - void PoissonTest::setProjectBackgroundField( + void PoissonTest::setProjectBField( FsGrid< std::array, 2>& BgBGrid, FsGrid< fsgrids::technical, 2>& technicalGrid ) { diff --git a/projects/Poisson/poisson_test.h b/projects/Poisson/poisson_test.h index ad579e558..dc2edfef8 100644 --- a/projects/Poisson/poisson_test.h +++ b/projects/Poisson/poisson_test.h @@ -41,7 +41,7 @@ namespace projects { static void addParameters(); virtual void getParameters(); virtual bool initialize(); - virtual void setProjectBackgroundField( + virtual void setProjectBField( FsGrid< std::array, 2>& BgBGrid, FsGrid< fsgrids::technical, 2>& technicalGrid ); diff --git a/projects/Shocktest/Shocktest.cpp b/projects/Shocktest/Shocktest.cpp index d727be585..c3e2983a9 100644 --- a/projects/Shocktest/Shocktest.cpp +++ b/projects/Shocktest/Shocktest.cpp @@ -222,7 +222,7 @@ namespace projects { } - void Shocktest::setProjectBackgroundField( + void Shocktest::setProjectBField( FsGrid< std::array, 2>& BgBGrid, FsGrid< fsgrids::technical, 2>& technicalGrid ) { diff --git a/projects/Shocktest/Shocktest.h b/projects/Shocktest/Shocktest.h index 26a29220f..7266ecd53 100644 --- a/projects/Shocktest/Shocktest.h +++ b/projects/Shocktest/Shocktest.h @@ -62,7 +62,7 @@ namespace projects { creal& dvx, creal& dvy, creal& dvz, const uint popID ) const; - virtual void setProjectBackgroundField( + virtual void setProjectBField( FsGrid< std::array, 2>& BgBGrid, FsGrid< fsgrids::technical, 2>& technicalGrid ); diff --git a/projects/Template/Template.cpp b/projects/Template/Template.cpp index 09f42f57a..591e2f599 100644 --- a/projects/Template/Template.cpp +++ b/projects/Template/Template.cpp @@ -71,7 +71,7 @@ namespace projects { exp(- physicalconstants::MASS_PROTON * ((vx-Vx0)*(vx-Vx0) + (vy-Vy0)*(vy-Vy0) + (vz-Vz0)*(vz-Vz0)) / (2.0 * physicalconstants::K_B * T)); } - void Template::setProjectBackgroundField( + void Template::setProjectBField( FsGrid< std::array, 2>& BgBGrid, FsGrid< fsgrids::technical, 2>& technicalGrid ) { diff --git a/projects/Template/Template.h b/projects/Template/Template.h index f01702637..9a7000c6e 100644 --- a/projects/Template/Template.h +++ b/projects/Template/Template.h @@ -35,7 +35,7 @@ namespace projects { virtual bool initialize(void); static void addParameters(void); virtual void getParameters(void); - virtual void setProjectBackgroundField( + virtual void setProjectBField( FsGrid< std::array, 2>& BgBGrid, FsGrid< fsgrids::technical, 2>& technicalGrid ); diff --git a/projects/VelocityBox/VelocityBox.cpp b/projects/VelocityBox/VelocityBox.cpp index 4fde4ec11..bd1af27ca 100644 --- a/projects/VelocityBox/VelocityBox.cpp +++ b/projects/VelocityBox/VelocityBox.cpp @@ -109,7 +109,7 @@ namespace projects { cellParams[CellParams::PERBZ ] = 0.0; } - void VelocityBox::setProjectBackgroundField( + void VelocityBox::setProjectBField( FsGrid< std::array, 2>& BgBGrid, FsGrid< fsgrids::technical, 2>& technicalGrid ) { diff --git a/projects/VelocityBox/VelocityBox.h b/projects/VelocityBox/VelocityBox.h index 90aadfb7a..2fe236f65 100644 --- a/projects/VelocityBox/VelocityBox.h +++ b/projects/VelocityBox/VelocityBox.h @@ -36,7 +36,7 @@ namespace projects { virtual bool initialize(void); static void addParameters(void); virtual void getParameters(void); - virtual void setProjectBackgroundField( + virtual void setProjectBField( FsGrid< std::array, 2>& BgBGrid, FsGrid< fsgrids::technical, 2>& technicalGrid ); diff --git a/projects/project.cpp b/projects/project.cpp index 773f1b805..2cce25a3c 100644 --- a/projects/project.cpp +++ b/projects/project.cpp @@ -183,7 +183,7 @@ namespace projects { bool Project::initialized() {return baseClassInitialized;} /*! Print a warning message to stderr and abort, one should not use the base class functions. */ - void Project::setProjectBackgroundField( + void Project::setProjectBField( FsGrid< std::array, 2>& BgBGrid, FsGrid< fsgrids::technical, 2>& technicalGrid ) { diff --git a/projects/project.h b/projects/project.h index 5b08a6db6..99ba05e44 100644 --- a/projects/project.h +++ b/projects/project.h @@ -58,7 +58,7 @@ namespace projects { * @param BgBGrid Background field fsgrid * @param technicalGrid Technical fsgrid */ - virtual void setProjectBackgroundField( + virtual void setProjectBField( FsGrid< std::array, 2>& BgBGrid, FsGrid< fsgrids::technical, 2>& technicalGrid ); diff --git a/projects/testAmr/testAmr.cpp b/projects/testAmr/testAmr.cpp index abaa21215..e3d4af737 100644 --- a/projects/testAmr/testAmr.cpp +++ b/projects/testAmr/testAmr.cpp @@ -230,7 +230,7 @@ namespace projects { rhoRnd = 0.5 - getRandomNumber(cell); } - void testAmr::setProjectBackgroundField( + void testAmr::setProjectBField( FsGrid< std::array, 2>& BgBGrid, FsGrid< fsgrids::technical, 2>& technicalGrid ) { diff --git a/projects/testAmr/testAmr.h b/projects/testAmr/testAmr.h index a7daf55dd..9a7318b05 100644 --- a/projects/testAmr/testAmr.h +++ b/projects/testAmr/testAmr.h @@ -61,7 +61,7 @@ namespace projects { virtual bool initialize(void); static void addParameters(void); virtual void getParameters(void); - virtual void setProjectBackgroundField( + virtual void setProjectBField( FsGrid< std::array, 2>& BgBGrid, FsGrid< fsgrids::technical, 2>& technicalGrid ); diff --git a/projects/testHall/testHall.cpp b/projects/testHall/testHall.cpp index bcf7bab29..e703c22a4 100644 --- a/projects/testHall/testHall.cpp +++ b/projects/testHall/testHall.cpp @@ -92,7 +92,7 @@ namespace projects { exp(- mass * (pow(vx + 0.5 * dvx - this->VX0, 2.0) + pow(vy + 0.5 * dvy - this->VY0, 2.0) + pow(vz + 0.5 * dvz - this->VZ0, 2.0)) / (2.0 * kb * this->TEMPERATURE))); } -// void TestHall::setProjectBackgroundField(SpatialCell *cell){ +// void TestHall::setProjectBField(SpatialCell *cell){ // Dipole bgField; // bgField.initialize(8e15 *this->dipoleScalingFactor,this->dipoleTilt); //set dipole moment // if(cell->sysBoundaryFlag == sysboundarytype::SET_MAXWELLIAN && this->noDipoleInSW) { diff --git a/projects/testHall/testHall.h b/projects/testHall/testHall.h index 5e60b047d..6de5eecf8 100644 --- a/projects/testHall/testHall.h +++ b/projects/testHall/testHall.h @@ -35,7 +35,7 @@ namespace projects { virtual bool initialize(void); static void addParameters(void); virtual void getParameters(void); -// virtual void setProjectBackgroundField(SpatialCell* cell); +// virtual void setProjectBField(SpatialCell* cell); virtual void calcCellParameters(spatial_cell::SpatialCell* cell,creal& t); virtual Real calcPhaseSpaceDensity( creal& x, creal& y, creal& z, diff --git a/projects/test_fp/test_fp.cpp b/projects/test_fp/test_fp.cpp index 2be445620..44036c2b9 100644 --- a/projects/test_fp/test_fp.cpp +++ b/projects/test_fp/test_fp.cpp @@ -105,7 +105,7 @@ namespace projects { return result; } - void test_fp::setProjectBackgroundField( + void test_fp::setProjectBField( FsGrid< std::array, 2>& BgBGrid, FsGrid< fsgrids::technical, 2>& technicalGrid ) { diff --git a/projects/test_fp/test_fp.h b/projects/test_fp/test_fp.h index 7400d8b43..44cfb8be9 100644 --- a/projects/test_fp/test_fp.h +++ b/projects/test_fp/test_fp.h @@ -37,7 +37,7 @@ namespace projects { virtual bool initialize(void); static void addParameters(void); virtual void getParameters(void); - virtual void setProjectBackgroundField( + virtual void setProjectBField( FsGrid< std::array, 2>& BgBGrid, FsGrid< fsgrids::technical, 2>& technicalGrid ); diff --git a/projects/test_trans/test_trans.cpp b/projects/test_trans/test_trans.cpp index e8617cddf..8d797499a 100644 --- a/projects/test_trans/test_trans.cpp +++ b/projects/test_trans/test_trans.cpp @@ -136,7 +136,7 @@ namespace projects { cellParams[CellParams::PERBZ ] = 0.0; } - void test_trans::setProjectBackgroundField( + void test_trans::setProjectBField( FsGrid< std::array, 2>& BgBGrid, FsGrid< fsgrids::technical, 2>& technicalGrid ) { diff --git a/projects/test_trans/test_trans.h b/projects/test_trans/test_trans.h index 2315ed0be..a2acbfe2a 100644 --- a/projects/test_trans/test_trans.h +++ b/projects/test_trans/test_trans.h @@ -37,7 +37,7 @@ namespace projects { virtual bool initialize(void); static void addParameters(void); virtual void getParameters(void); - virtual void setProjectBackgroundField( + virtual void setProjectBField( FsGrid< std::array, 2>& BgBGrid, FsGrid< fsgrids::technical, 2>& technicalGrid ); diff --git a/projects/verificationLarmor/verificationLarmor.cpp b/projects/verificationLarmor/verificationLarmor.cpp index 668a8ff9b..160bb1822 100644 --- a/projects/verificationLarmor/verificationLarmor.cpp +++ b/projects/verificationLarmor/verificationLarmor.cpp @@ -127,7 +127,7 @@ namespace projects { cellParams[CellParams::PERBZ ] = 0.0; } - void verificationLarmor::setProjectBackgroundField( + void verificationLarmor::setProjectBField( FsGrid< std::array, 2>& BgBGrid, FsGrid< fsgrids::technical, 2>& technicalGrid ) { diff --git a/projects/verificationLarmor/verificationLarmor.h b/projects/verificationLarmor/verificationLarmor.h index 7838e4516..01afe1417 100644 --- a/projects/verificationLarmor/verificationLarmor.h +++ b/projects/verificationLarmor/verificationLarmor.h @@ -37,7 +37,7 @@ namespace projects { virtual bool initialize(void); static void addParameters(void); virtual void getParameters(void); - virtual void setProjectBackgroundField( + virtual void setProjectBField( FsGrid< std::array, 2>& BgBGrid, FsGrid< fsgrids::technical, 2>& technicalGrid ); From b8515cb65139096dae1002e0a12125df9d1e5bd5 Mon Sep 17 00:00:00 2001 From: ykempf Date: Thu, 11 Apr 2019 13:47:36 +0300 Subject: [PATCH 297/602] setProjectBField for Alfven --- projects/Alfven/Alfven.cpp | 41 ++++++++++++++++++++++++++++++++++++++ projects/Alfven/Alfven.h | 5 +++++ 2 files changed, 46 insertions(+) diff --git a/projects/Alfven/Alfven.cpp b/projects/Alfven/Alfven.cpp index 026db254d..e73022d27 100644 --- a/projects/Alfven/Alfven.cpp +++ b/projects/Alfven/Alfven.cpp @@ -168,4 +168,45 @@ namespace projects { cellParams[CellParams::PERBY ] = this->B0 * sin(this->ALPHA) + this->A_MAG * this->B0 * cos(this->ALPHA) * dByavg / nPts; cellParams[CellParams::PERBZ ] = this->B0 * this->A_MAG * dBzavg / nPts; } + + void Alfven::setProjectBField( + FsGrid< std::array, 2> & perBGrid, + FsGrid< std::array, 2>& BgBGrid, + FsGrid< fsgrids::technical, 2>& technicalGrid + ) { + auto localSize = perBGrid.getLocalSize(); + +#pragma omp parallel for collapse(3) + for (int x = 0; x < localSize[0]; ++x) { + for (int y = 0; y < localSize[1]; ++y) { + for (int z = 0; z < localSize[2]; ++z) { + const std::array xyz = perBGrid.getPhysicalCoords(x, y, z); + std::array* cell = perBGrid.get(x, y, z); + + Real dBxavg, dByavg, dBzavg; + dBxavg = dByavg = dBzavg = 0.0; + Real d_x = perBGrid.DX / (this->nSpaceSamples - 1); + Real d_y = perBGrid.DY / (this->nSpaceSamples - 1); + + for (uint i=0; inSpaceSamples; ++i) { + for (uint j=0; jnSpaceSamples; ++j) { + for (uint k=0; knSpaceSamples; ++k) { + Real ksi = ((xyz[0] + i * d_x) * cos(this->ALPHA) + (xyz[1] + j * d_y) * sin(this->ALPHA)) / this->WAVELENGTH; + dBxavg += sin(2.0 * M_PI * ksi); + dByavg += sin(2.0 * M_PI * ksi); + dBzavg += cos(2.0 * M_PI * ksi); + } + } + } + + cuint nPts = pow(this->nSpaceSamples, 3.0); + cell->at(fsgrids::bfield::PERBX) = this->B0 * cos(this->ALPHA) - this->A_MAG * this->B0 * sin(this->ALPHA) * dBxavg / nPts; + cell->at(fsgrids::bfield::PERBY) = this->B0 * sin(this->ALPHA) + this->A_MAG * this->B0 * cos(this->ALPHA) * dByavg / nPts; + cell->at(fsgrids::bfield::PERBZ) = this->B0 * this->A_MAG * dBzavg / nPts; + + } + } + } + } + } // namespace projects diff --git a/projects/Alfven/Alfven.h b/projects/Alfven/Alfven.h index 3a7a1e011..69642ca7f 100644 --- a/projects/Alfven/Alfven.h +++ b/projects/Alfven/Alfven.h @@ -43,6 +43,11 @@ namespace projects { virtual bool initialize(void); static void addParameters(void); virtual void getParameters(void); + virtual void setProjectBField( + FsGrid< std::array, 2> & perBGrid, + FsGrid< std::array, 2>& BgBGrid, + FsGrid< fsgrids::technical, 2>& technicalGrid + ); protected: Real getDistribValue( From 4d816ec80da8850a8b00016eb4342e663e64279d Mon Sep 17 00:00:00 2001 From: ykempf Date: Thu, 11 Apr 2019 13:54:42 +0300 Subject: [PATCH 298/602] etProjectBField for electric_sail, Diffusion and Flowthrough. --- projects/Diffusion/Diffusion.cpp | 15 ++------------- projects/Diffusion/Diffusion.h | 1 + projects/ElectricSail/electric_sail.cpp | 1 + projects/ElectricSail/electric_sail.h | 1 + projects/Flowthrough/Flowthrough.cpp | 8 ++------ projects/Flowthrough/Flowthrough.h | 1 + 6 files changed, 8 insertions(+), 19 deletions(-) diff --git a/projects/Diffusion/Diffusion.cpp b/projects/Diffusion/Diffusion.cpp index 09b218d28..ea92c60ad 100644 --- a/projects/Diffusion/Diffusion.cpp +++ b/projects/Diffusion/Diffusion.cpp @@ -119,21 +119,10 @@ namespace projects { return avg / (sP.nSpaceSamples*sP.nSpaceSamples*sP.nSpaceSamples) / (sP.nVelocitySamples*sP.nVelocitySamples*sP.nVelocitySamples); } - void Diffusion::calcCellParameters(spatial_cell::SpatialCell* cell,creal& t) { - Real* cellParams = cell->get_cell_parameters(); - creal x = cellParams[CellParams::XCRD]; - creal dx = cellParams[CellParams::DX]; - - cellParams[CellParams::EX ] = 0.0; - cellParams[CellParams::EY ] = 0.0; - cellParams[CellParams::EZ ] = 0.0; - cellParams[CellParams::PERBX ] = 0.0; - cellParams[CellParams::PERBY ] = 0.0; - cellParams[CellParams::PERBZ ] = 0.0; - - } + void Diffusion::calcCellParameters(spatial_cell::SpatialCell* cell,creal& t) { } void Diffusion::setProjectBField( + FsGrid< std::array, 2>& perBGrid, FsGrid< std::array, 2>& BgBGrid, FsGrid< fsgrids::technical, 2>& technicalGrid ) { diff --git a/projects/Diffusion/Diffusion.h b/projects/Diffusion/Diffusion.h index cf618752a..007c7fbab 100644 --- a/projects/Diffusion/Diffusion.h +++ b/projects/Diffusion/Diffusion.h @@ -47,6 +47,7 @@ namespace projects { virtual void getParameters(void); /*! set background field, should set it for all cells */ virtual void setProjectBField( + FsGrid< std::array, 2>& perBGrid, FsGrid< std::array, 2>& BgBGrid, FsGrid< fsgrids::technical, 2>& technicalGrid ); diff --git a/projects/ElectricSail/electric_sail.cpp b/projects/ElectricSail/electric_sail.cpp index 7bd0ed3d1..725d423e0 100644 --- a/projects/ElectricSail/electric_sail.cpp +++ b/projects/ElectricSail/electric_sail.cpp @@ -212,6 +212,7 @@ namespace projects { * NOTE: This is only called in grid.cpp:initializeGrid. */ void ElectricSail::setProjectBField( + FsGrid< std::array, 2>& perBGrid, FsGrid< std::array, 2>& BgBGrid, FsGrid< fsgrids::technical, 2>& technicalGrid ) { diff --git a/projects/ElectricSail/electric_sail.h b/projects/ElectricSail/electric_sail.h index f98e50603..5a2866150 100644 --- a/projects/ElectricSail/electric_sail.h +++ b/projects/ElectricSail/electric_sail.h @@ -53,6 +53,7 @@ namespace projects { virtual void getParameters(); virtual bool initialize(); virtual void setProjectBField( + FsGrid< std::array, 2>& perBGrid, FsGrid< std::array, 2>& BgBGrid, FsGrid< fsgrids::technical, 2>& technicalGrid ); diff --git a/projects/Flowthrough/Flowthrough.cpp b/projects/Flowthrough/Flowthrough.cpp index ad813622e..a1541ca0d 100644 --- a/projects/Flowthrough/Flowthrough.cpp +++ b/projects/Flowthrough/Flowthrough.cpp @@ -202,14 +202,10 @@ namespace projects { } } - void Flowthrough::calcCellParameters(spatial_cell::SpatialCell* cell,creal& t) { - Real* cellParams = cell->get_cell_parameters(); - cellParams[CellParams::PERBX] = 0.; - cellParams[CellParams::PERBY] = 0.; - cellParams[CellParams::PERBZ] = 0.; - } + void Flowthrough::calcCellParameters(spatial_cell::SpatialCell* cell,creal& t) { } void Flowthrough::setProjectBField( + FsGrid< std::array, 2>& perBGrid, FsGrid< std::array, 2>& BgBGrid, FsGrid< fsgrids::technical, 2>& technicalGrid ) { diff --git a/projects/Flowthrough/Flowthrough.h b/projects/Flowthrough/Flowthrough.h index 660915b64..e58976a55 100644 --- a/projects/Flowthrough/Flowthrough.h +++ b/projects/Flowthrough/Flowthrough.h @@ -48,6 +48,7 @@ namespace projects { static void addParameters(void); virtual void getParameters(void); virtual void setProjectBField( + FsGrid< std::array, 2>& perBGrid, FsGrid< std::array, 2>& BgBGrid, FsGrid< fsgrids::technical, 2>& technicalGrid ); From cf61cab50e5565a9deca82cd6aecde75d31d8bbd Mon Sep 17 00:00:00 2001 From: ykempf Date: Thu, 11 Apr 2019 14:22:05 +0300 Subject: [PATCH 299/602] Random number generator interface change and setProjectBField for Dispersion. --- projects/Dispersion/Dispersion.cpp | 48 ++++++++++++++---------- projects/Dispersion/Dispersion.h | 1 + projects/Distributions/Distributions.cpp | 8 ++-- projects/Fluctuations/Fluctuations.cpp | 16 ++++---- projects/MultiPeak/MultiPeak.cpp | 8 ++-- projects/project.cpp | 6 +-- projects/project.h | 4 +- projects/testAmr/testAmr.cpp | 8 ++-- 8 files changed, 55 insertions(+), 44 deletions(-) diff --git a/projects/Dispersion/Dispersion.cpp b/projects/Dispersion/Dispersion.cpp index ee67c19f4..e3cbe9535 100644 --- a/projects/Dispersion/Dispersion.cpp +++ b/projects/Dispersion/Dispersion.cpp @@ -231,30 +231,17 @@ namespace projects { (int) ((y - Parameters::ymin) / dy) * Parameters::xcells_ini + (int) ((z - Parameters::zmin) / dz) * Parameters::xcells_ini * Parameters::ycells_ini; - setRandomSeed(cell,cellID); - - cellParams[CellParams::EX ] = 0.0; - cellParams[CellParams::EY ] = 0.0; - cellParams[CellParams::EZ ] = 0.0; + setRandomSeed(cellID); - this->rndRho=getRandomNumber(cell); + this->rndRho=getRandomNumber(); - this->rndVel[0]=getRandomNumber(cell); - this->rndVel[1]=getRandomNumber(cell); - this->rndVel[2]=getRandomNumber(cell); - - Real rndBuffer[3]; - rndBuffer[0]=getRandomNumber(cell); - rndBuffer[1]=getRandomNumber(cell); - rndBuffer[2]=getRandomNumber(cell); - - cellParams[CellParams::PERBX] = this->magXPertAbsAmp * (0.5 - rndBuffer[0]); - cellParams[CellParams::PERBY] = this->magYPertAbsAmp * (0.5 - rndBuffer[1]); - cellParams[CellParams::PERBZ] = this->magZPertAbsAmp * (0.5 - rndBuffer[2]); - + this->rndVel[0]=getRandomNumber(); + this->rndVel[1]=getRandomNumber(); + this->rndVel[2]=getRandomNumber(); } void Dispersion::setProjectBField( + FsGrid< std::array, 2>& perBGrid, FsGrid< std::array, 2>& BgBGrid, FsGrid< fsgrids::technical, 2>& technicalGrid ) { @@ -264,5 +251,28 @@ namespace projects { this->B0 * sin(this->angleXZ)); setBackgroundField(bgField, BgBGrid); + + const auto localSize = BgBGrid.getLocalSize(); + +#pragma omp parallel for collapse(3) + for (int x = 0; x < localSize[0]; ++x) { + for (int y = 0; y < localSize[1]; ++y) { + for (int z = 0; z < localSize[2]; ++z) { + std::array* cell = perBGrid.get(x, y, z); + const int64_t cellid = perBGrid.GlobalIDForCoords(x, y, z); + + setRandomSeed(cellid); + + Real rndBuffer[3]; + rndBuffer[0]=getRandomNumber(); + rndBuffer[1]=getRandomNumber(); + rndBuffer[2]=getRandomNumber(); + + cell->at(fsgrids::bfield::PERBX) = this->magXPertAbsAmp * (0.5 - rndBuffer[0]); + cell->at(fsgrids::bfield::PERBY) = this->magYPertAbsAmp * (0.5 - rndBuffer[1]); + cell->at(fsgrids::bfield::PERBZ) = this->magZPertAbsAmp * (0.5 - rndBuffer[2]); + } + } + } } } // namespace projects diff --git a/projects/Dispersion/Dispersion.h b/projects/Dispersion/Dispersion.h index 34afe8d5b..3a1d5b902 100644 --- a/projects/Dispersion/Dispersion.h +++ b/projects/Dispersion/Dispersion.h @@ -52,6 +52,7 @@ namespace projects { static void addParameters(void); virtual void getParameters(void); virtual void setProjectBField( + FsGrid< std::array, 2>& perBGrid, FsGrid< std::array, 2>& BgBGrid, FsGrid< fsgrids::technical, 2>& technicalGrid ); diff --git a/projects/Distributions/Distributions.cpp b/projects/Distributions/Distributions.cpp index d2adb641f..e26b6f1a3 100644 --- a/projects/Distributions/Distributions.cpp +++ b/projects/Distributions/Distributions.cpp @@ -158,12 +158,12 @@ namespace projects { cellParams[CellParams::PERBZ] = this->dBz*cos(2.0 * M_PI * cellParams[CellParams::XCRD] / this->lambda); } - cellParams[CellParams::PERBX] += this->magXPertAbsAmp * (0.5 - getRandomNumber(cell)); - cellParams[CellParams::PERBY] += this->magYPertAbsAmp * (0.5 - getRandomNumber(cell)); - cellParams[CellParams::PERBZ] += this->magZPertAbsAmp * (0.5 - getRandomNumber(cell)); + cellParams[CellParams::PERBX] += this->magXPertAbsAmp * (0.5 - getRandomNumber()); + cellParams[CellParams::PERBY] += this->magYPertAbsAmp * (0.5 - getRandomNumber()); + cellParams[CellParams::PERBZ] += this->magZPertAbsAmp * (0.5 - getRandomNumber()); for (uint i=0; i<2; i++) { - this->rhoRnd[i] = this->rho[i] + this->rhoPertAbsAmp[i] * (0.5 - getRandomNumber(cell)); + this->rhoRnd[i] = this->rho[i] + this->rhoPertAbsAmp[i] * (0.5 - getRandomNumber()); } } diff --git a/projects/Fluctuations/Fluctuations.cpp b/projects/Fluctuations/Fluctuations.cpp index d5a0efb22..9f8558d2b 100644 --- a/projects/Fluctuations/Fluctuations.cpp +++ b/projects/Fluctuations/Fluctuations.cpp @@ -162,20 +162,20 @@ namespace projects { (int) ((y - Parameters::ymin) / dy) * Parameters::xcells_ini + (int) ((z - Parameters::zmin) / dz) * Parameters::xcells_ini * Parameters::ycells_ini; - setRandomSeed(cell,cellID); + setRandomSeed(cellID); cellParams[CellParams::EX ] = 0.0; cellParams[CellParams::EY ] = 0.0; cellParams[CellParams::EZ ] = 0.0; - this->rndRho=getRandomNumber(cell); - this->rndVel[0]=getRandomNumber(cell); - this->rndVel[1]=getRandomNumber(cell); - this->rndVel[2]=getRandomNumber(cell); + this->rndRho=getRandomNumber(); + this->rndVel[0]=getRandomNumber(); + this->rndVel[1]=getRandomNumber(); + this->rndVel[2]=getRandomNumber(); - cellParams[CellParams::PERBX] = this->magXPertAbsAmp * (0.5 - getRandomNumber(cell)); - cellParams[CellParams::PERBY] = this->magYPertAbsAmp * (0.5 - getRandomNumber(cell)); - cellParams[CellParams::PERBZ] = this->magZPertAbsAmp * (0.5 - getRandomNumber(cell)); + cellParams[CellParams::PERBX] = this->magXPertAbsAmp * (0.5 - getRandomNumber()); + cellParams[CellParams::PERBY] = this->magYPertAbsAmp * (0.5 - getRandomNumber()); + cellParams[CellParams::PERBZ] = this->magZPertAbsAmp * (0.5 - getRandomNumber()); } void Fluctuations::setProjectBField( diff --git a/projects/MultiPeak/MultiPeak.cpp b/projects/MultiPeak/MultiPeak.cpp index 1d7c540a2..f8a598d42 100644 --- a/projects/MultiPeak/MultiPeak.cpp +++ b/projects/MultiPeak/MultiPeak.cpp @@ -222,11 +222,11 @@ namespace projects { cellParams[CellParams::PERBZ] = this->dBz*cos(2.0 * M_PI * cellParams[CellParams::XCRD] / this->lambda); } - cellParams[CellParams::PERBX] += this->magXPertAbsAmp * (0.5 - getRandomNumber(cell)); - cellParams[CellParams::PERBY] += this->magYPertAbsAmp * (0.5 - getRandomNumber(cell)); - cellParams[CellParams::PERBZ] += this->magZPertAbsAmp * (0.5 - getRandomNumber(cell)); + cellParams[CellParams::PERBX] += this->magXPertAbsAmp * (0.5 - getRandomNumber()); + cellParams[CellParams::PERBY] += this->magYPertAbsAmp * (0.5 - getRandomNumber()); + cellParams[CellParams::PERBZ] += this->magZPertAbsAmp * (0.5 - getRandomNumber()); - rhoRnd = 0.5 - getRandomNumber(cell); + rhoRnd = 0.5 - getRandomNumber(); } void MultiPeak::setProjectBField( diff --git a/projects/project.cpp b/projects/project.cpp index 2cce25a3c..05e11c297 100644 --- a/projects/project.cpp +++ b/projects/project.cpp @@ -465,7 +465,7 @@ namespace projects { /** Get random number between 0 and 1.0. One should always first initialize the rng. * @param cell Spatial cell. * @return Uniformly distributed random number between 0 and 1.*/ - Real Project::getRandomNumber(spatial_cell::SpatialCell* cell) const { + Real Project::getRandomNumber() const { #ifdef _AIX int64_t rndInt; random_r(&rndInt, &rngDataBuffer); @@ -483,7 +483,7 @@ namespace projects { \param seedModifier d. Seed is based on the seed read in from cfg + the seedModifier parameter */ - void Project::setRandomSeed(spatial_cell::SpatialCell* cell,CellID seedModifier) const { + void Project::setRandomSeed(CellID seedModifier) const { memset(&(this->rngDataBuffer), 0, sizeof(this->rngDataBuffer)); #ifdef _AIX initstate_r(this->seed+seedModifier, &(this->rngStateBuffer[0]), 256, NULL, &(this->rngDataBuffer)); @@ -510,7 +510,7 @@ namespace projects { const CellID cellID = (int) ((x - Parameters::xmin) / dx) + (int) ((y - Parameters::ymin) / dy) * Parameters::xcells_ini + (int) ((z - Parameters::zmin) / dz) * Parameters::xcells_ini * Parameters::ycells_ini; - setRandomSeed(cell,cellID); + setRandomSeed(cellID); } /* diff --git a/projects/project.h b/projects/project.h index 99ba05e44..7bd09a442 100644 --- a/projects/project.h +++ b/projects/project.h @@ -146,7 +146,7 @@ namespace projects { /*! Get random number between 0 and 1.0. One should always first initialize the rng. */ - Real getRandomNumber(spatial_cell::SpatialCell* cell) const; + Real getRandomNumber() const; void printPopulations(); @@ -158,7 +158,7 @@ namespace projects { * \param seedModified d. Seed is based on the seed read in from cfg + the seedModifier parameter */ - void setRandomSeed(spatial_cell::SpatialCell* cell,uint64_t seedModifier) const; + void setRandomSeed(uint64_t seedModifier) const; /*! Set random seed (thread-safe) that is always the same for this particular cellID. Can be used to make reproducible diff --git a/projects/testAmr/testAmr.cpp b/projects/testAmr/testAmr.cpp index e3d4af737..45c2783bb 100644 --- a/projects/testAmr/testAmr.cpp +++ b/projects/testAmr/testAmr.cpp @@ -223,11 +223,11 @@ namespace projects { cellParams[CellParams::PERBZ] = this->dBz*cos(2.0 * M_PI * cellParams[CellParams::XCRD] / this->lambda); } - cellParams[CellParams::PERBX] += this->magXPertAbsAmp * (0.5 - getRandomNumber(cell)); - cellParams[CellParams::PERBY] += this->magYPertAbsAmp * (0.5 - getRandomNumber(cell)); - cellParams[CellParams::PERBZ] += this->magZPertAbsAmp * (0.5 - getRandomNumber(cell)); + cellParams[CellParams::PERBX] += this->magXPertAbsAmp * (0.5 - getRandomNumber()); + cellParams[CellParams::PERBY] += this->magYPertAbsAmp * (0.5 - getRandomNumber()); + cellParams[CellParams::PERBZ] += this->magZPertAbsAmp * (0.5 - getRandomNumber()); - rhoRnd = 0.5 - getRandomNumber(cell); + rhoRnd = 0.5 - getRandomNumber(); } void testAmr::setProjectBField( From c53860c15f8599e3670e9ade9efeed1f453fd1fc Mon Sep 17 00:00:00 2001 From: ykempf Date: Thu, 11 Apr 2019 14:36:24 +0300 Subject: [PATCH 300/602] setProjectBField for Distributions. --- projects/Distributions/Distributions.cpp | 36 +++++++++++++++++------- projects/Distributions/Distributions.h | 1 + 2 files changed, 27 insertions(+), 10 deletions(-) diff --git a/projects/Distributions/Distributions.cpp b/projects/Distributions/Distributions.cpp index e26b6f1a3..6516115d2 100644 --- a/projects/Distributions/Distributions.cpp +++ b/projects/Distributions/Distributions.cpp @@ -152,22 +152,13 @@ namespace projects { void Distributions::calcCellParameters(spatial_cell::SpatialCell* cell,creal& t) { Real* cellParams = cell->get_cell_parameters(); setRandomCellSeed(cell,cellParams); - if (this->lambda != 0.0) { - cellParams[CellParams::PERBX] = this->dBx*cos(2.0 * M_PI * cellParams[CellParams::XCRD] / this->lambda); - cellParams[CellParams::PERBY] = this->dBy*sin(2.0 * M_PI * cellParams[CellParams::XCRD] / this->lambda); - cellParams[CellParams::PERBZ] = this->dBz*cos(2.0 * M_PI * cellParams[CellParams::XCRD] / this->lambda); - } - - cellParams[CellParams::PERBX] += this->magXPertAbsAmp * (0.5 - getRandomNumber()); - cellParams[CellParams::PERBY] += this->magYPertAbsAmp * (0.5 - getRandomNumber()); - cellParams[CellParams::PERBZ] += this->magZPertAbsAmp * (0.5 - getRandomNumber()); - for (uint i=0; i<2; i++) { this->rhoRnd[i] = this->rho[i] + this->rhoPertAbsAmp[i] * (0.5 - getRandomNumber()); } } void Distributions::setProjectBField( + FsGrid< std::array, 2>& perBGrid, FsGrid< std::array, 2>& BgBGrid, FsGrid< fsgrids::technical, 2>& technicalGrid ) { @@ -177,6 +168,31 @@ namespace projects { this->Bz); setBackgroundField(bgField, BgBGrid); + + const auto localSize = BgBGrid.getLocalSize(); + +#pragma omp parallel for collapse(3) + for (int x = 0; x < localSize[0]; ++x) { + for (int y = 0; y < localSize[1]; ++y) { + for (int z = 0; z < localSize[2]; ++z) { + std::array* cell = perBGrid.get(x, y, z); + const int64_t cellid = perBGrid.GlobalIDForCoords(x, y, z); + const std::array xyz = perBGrid.getPhysicalCoords(x, y, z); + + setRandomSeed(cellid); + + if (this->lambda != 0.0) { + cell->at(fsgrids::bfield::PERBX) = this->dBx*cos(2.0 * M_PI * xyz[0] / this->lambda); + cell->at(fsgrids::bfield::PERBY) = this->dBy*sin(2.0 * M_PI * xyz[0] / this->lambda); + cell->at(fsgrids::bfield::PERBZ) = this->dBz*cos(2.0 * M_PI * xyz[0] / this->lambda); + } + + cell->at(fsgrids::bfield::PERBX) += this->magXPertAbsAmp * (0.5 - getRandomNumber()); + cell->at(fsgrids::bfield::PERBY) += this->magYPertAbsAmp * (0.5 - getRandomNumber()); + cell->at(fsgrids::bfield::PERBZ) += this->magZPertAbsAmp * (0.5 - getRandomNumber()); + } + } + } } vector> Distributions::getV0( diff --git a/projects/Distributions/Distributions.h b/projects/Distributions/Distributions.h index 12eef5cf9..b594caf9b 100644 --- a/projects/Distributions/Distributions.h +++ b/projects/Distributions/Distributions.h @@ -37,6 +37,7 @@ namespace projects { static void addParameters(void); virtual void getParameters(void); virtual void setProjectBField( + FsGrid< std::array, 2>& perBGrid, FsGrid< std::array, 2>& BgBGrid, FsGrid< fsgrids::technical, 2>& technicalGrid ); From 7511304ca680dfd5b841ecf303e1efc09b4c0b8f Mon Sep 17 00:00:00 2001 From: ykempf Date: Thu, 11 Apr 2019 14:40:03 +0300 Subject: [PATCH 301/602] setProjectBField for Firehose --- projects/Firehose/Firehose.cpp | 20 +++++++++++++++----- projects/Firehose/Firehose.h | 6 +++++- 2 files changed, 20 insertions(+), 6 deletions(-) diff --git a/projects/Firehose/Firehose.cpp b/projects/Firehose/Firehose.cpp index b4e3eb2fb..77641034b 100644 --- a/projects/Firehose/Firehose.cpp +++ b/projects/Firehose/Firehose.cpp @@ -27,6 +27,7 @@ #include "../../common.h" #include "../../readparameters.h" #include "../../backgroundfield/backgroundfield.h" +#include "../../backgroundfield/constantfield.hpp" #include "../../object_wrapper.h" #include "Firehose.h" @@ -150,10 +151,19 @@ namespace projects { return avg / pow(sP.nSpaceSamples, 2.0) / pow(sP.nVelocitySamples, 3.0); } - void Firehose::calcCellParameters(spatial_cell::SpatialCell* cell,creal& t) { - Real* cellParams = cell->get_cell_parameters(); - cellParams[CellParams::PERBX ] = this->Bx; - cellParams[CellParams::PERBY ] = this->By; - cellParams[CellParams::PERBZ ] = this->Bz; + void Firehose::calcCellParameters(spatial_cell::SpatialCell* cell,creal& t) { } + + void Firehose::setProjectBField( + FsGrid< std::array, 2>& perBGrid, + FsGrid< std::array, 2>& BgBGrid, + FsGrid< fsgrids::technical, 2>& technicalGrid + ) { + ConstantField bgField; + bgField.initialize(this->Bx, + this->By, + this->Bz); + + setBackgroundField(bgField, BgBGrid); } + } // namespace projects diff --git a/projects/Firehose/Firehose.h b/projects/Firehose/Firehose.h index 3aad43792..97a6682cc 100644 --- a/projects/Firehose/Firehose.h +++ b/projects/Firehose/Firehose.h @@ -49,7 +49,11 @@ namespace projects { virtual bool initialize(void); static void addParameters(void); virtual void getParameters(void); - + virtual void setProjectBField( + FsGrid< std::array, 2>& perBGrid, + FsGrid< std::array, 2>& BgBGrid, + FsGrid< fsgrids::technical, 2>& technicalGrid + ); protected: Real getDistribValue( creal& x,creal& y, From 4e733668244904947fea4ecec5fbc8b88d49b1f6 Mon Sep 17 00:00:00 2001 From: ykempf Date: Thu, 11 Apr 2019 14:43:33 +0300 Subject: [PATCH 302/602] setProjectBField for Fluctuations --- projects/Fluctuations/Fluctuations.cpp | 27 ++++++++++++++++++-------- projects/Fluctuations/Fluctuations.h | 1 + 2 files changed, 20 insertions(+), 8 deletions(-) diff --git a/projects/Fluctuations/Fluctuations.cpp b/projects/Fluctuations/Fluctuations.cpp index 9f8558d2b..b891aa382 100644 --- a/projects/Fluctuations/Fluctuations.cpp +++ b/projects/Fluctuations/Fluctuations.cpp @@ -163,22 +163,15 @@ namespace projects { (int) ((z - Parameters::zmin) / dz) * Parameters::xcells_ini * Parameters::ycells_ini; setRandomSeed(cellID); - - cellParams[CellParams::EX ] = 0.0; - cellParams[CellParams::EY ] = 0.0; - cellParams[CellParams::EZ ] = 0.0; this->rndRho=getRandomNumber(); this->rndVel[0]=getRandomNumber(); this->rndVel[1]=getRandomNumber(); this->rndVel[2]=getRandomNumber(); - - cellParams[CellParams::PERBX] = this->magXPertAbsAmp * (0.5 - getRandomNumber()); - cellParams[CellParams::PERBY] = this->magYPertAbsAmp * (0.5 - getRandomNumber()); - cellParams[CellParams::PERBZ] = this->magZPertAbsAmp * (0.5 - getRandomNumber()); } void Fluctuations::setProjectBField( + FsGrid< std::array, 2>& perBGrid, FsGrid< std::array, 2>& BgBGrid, FsGrid< fsgrids::technical, 2>& technicalGrid ) { @@ -188,6 +181,24 @@ namespace projects { this->BZ0); setBackgroundField(bgField, BgBGrid); + + const auto localSize = BgBGrid.getLocalSize(); + + #pragma omp parallel for collapse(3) + for (int x = 0; x < localSize[0]; ++x) { + for (int y = 0; y < localSize[1]; ++y) { + for (int z = 0; z < localSize[2]; ++z) { + std::array* cell = perBGrid.get(x, y, z); + const int64_t cellid = perBGrid.GlobalIDForCoords(x, y, z); + + setRandomSeed(cellid); + + cell->at(fsgrids::bfield::PERBX) = this->magXPertAbsAmp * (0.5 - getRandomNumber()); + cell->at(fsgrids::bfield::PERBY) = this->magYPertAbsAmp * (0.5 - getRandomNumber()); + cell->at(fsgrids::bfield::PERBZ) = this->magZPertAbsAmp * (0.5 - getRandomNumber()); + } + } + } } std::vector > Fluctuations::getV0( diff --git a/projects/Fluctuations/Fluctuations.h b/projects/Fluctuations/Fluctuations.h index b1a0e0ea7..26c64a3cd 100644 --- a/projects/Fluctuations/Fluctuations.h +++ b/projects/Fluctuations/Fluctuations.h @@ -49,6 +49,7 @@ namespace projects { static void addParameters(void); virtual void getParameters(void); virtual void setProjectBField( + FsGrid< std::array, 2>& perBGrid, FsGrid< std::array, 2>& BgBGrid, FsGrid< fsgrids::technical, 2>& technicalGrid ); From 50d0cb22fd81912ac7d605444284b49272e705ed Mon Sep 17 00:00:00 2001 From: ykempf Date: Thu, 11 Apr 2019 14:48:05 +0300 Subject: [PATCH 303/602] setProjectBField for Harris --- projects/Harris/Harris.cpp | 37 ++++++++++++++++++------------------- projects/Harris/Harris.h | 1 + 2 files changed, 19 insertions(+), 19 deletions(-) diff --git a/projects/Harris/Harris.cpp b/projects/Harris/Harris.cpp index 590b724cd..b23e23881 100644 --- a/projects/Harris/Harris.cpp +++ b/projects/Harris/Harris.cpp @@ -134,25 +134,7 @@ namespace projects { } - void Harris::calcCellParameters(spatial_cell::SpatialCell* cell,creal& t) { - Real* cellParams = cell->get_cell_parameters(); - creal x = cellParams[CellParams::XCRD]; - creal dx = cellParams[CellParams::DX]; - creal y = cellParams[CellParams::YCRD]; - creal dy = cellParams[CellParams::DY]; - creal z = cellParams[CellParams::ZCRD]; - creal dz = cellParams[CellParams::DZ]; - - cellParams[CellParams::EX ] = 0.0; - cellParams[CellParams::EY ] = 0.0; - cellParams[CellParams::EZ ] = 0.0; - cellParams[CellParams::PERBX ] = this->BX0 * tanh((y + 0.5 * dy) / this->SCA_LAMBDA); - cellParams[CellParams::PERBY ] = this->BY0 * tanh((z + 0.5 * dz) / this->SCA_LAMBDA); - cellParams[CellParams::PERBZ ] = this->BZ0 * tanh((x + 0.5 * dx) / this->SCA_LAMBDA); - cellParams[CellParams::BGBX ] = 0.0; - cellParams[CellParams::BGBY ] = 0.0; - cellParams[CellParams::BGBZ ] = 0.0; - } + void Harris::calcCellParameters(spatial_cell::SpatialCell* cell,creal& t) { } vector> Harris::getV0( creal x, @@ -167,10 +149,27 @@ namespace projects { } void Harris::setProjectBField( + FsGrid< std::array, 2> & perBGrid, FsGrid< std::array, 2>& BgBGrid, FsGrid< fsgrids::technical, 2>& technicalGrid ) { setBackgroundFieldToZero(BgBGrid); + + auto localSize = perBGrid.getLocalSize(); + + #pragma omp parallel for collapse(3) + for (int x = 0; x < localSize[0]; ++x) { + for (int y = 0; y < localSize[1]; ++y) { + for (int z = 0; z < localSize[2]; ++z) { + const std::array xyz = perBGrid.getPhysicalCoords(x, y, z); + std::array* cell = perBGrid.get(x, y, z); + + cell->at(fsgrids::bfield::PERBX) = this->BX0 * tanh((xyz[1] + 0.5 * perBGrid.DY) / this->SCA_LAMBDA); + cell->at(fsgrids::bfield::PERBY) = this->BY0 * tanh((xyz[2] + 0.5 * perBGrid.DZ) / this->SCA_LAMBDA); + cell->at(fsgrids::bfield::PERBZ) = this->BZ0 * tanh((xyz[0] + 0.5 * perBGrid.DX) / this->SCA_LAMBDA); + } + } + } } } // namespace projects diff --git a/projects/Harris/Harris.h b/projects/Harris/Harris.h index 6e122fe5d..3cae54111 100644 --- a/projects/Harris/Harris.h +++ b/projects/Harris/Harris.h @@ -45,6 +45,7 @@ namespace projects { virtual void getParameters(void); virtual void calcCellParameters(spatial_cell::SpatialCell* cell,creal& t); virtual void setProjectBField( + FsGrid< std::array, 2> & perBGrid, FsGrid< std::array, 2>& BgBGrid, FsGrid< fsgrids::technical, 2>& technicalGrid ); From c70c7f6172b1bdba681588abbc5edb020e62243e Mon Sep 17 00:00:00 2001 From: ykempf Date: Thu, 11 Apr 2019 15:03:08 +0300 Subject: [PATCH 304/602] setProjectBField for IPShock --- projects/IPShock/IPShock.cpp | 106 +++++++++++++++++------------------ projects/IPShock/IPShock.h | 1 + 2 files changed, 52 insertions(+), 55 deletions(-) diff --git a/projects/IPShock/IPShock.cpp b/projects/IPShock/IPShock.cpp index b74ec22c7..123dd770c 100644 --- a/projects/IPShock/IPShock.cpp +++ b/projects/IPShock/IPShock.cpp @@ -421,61 +421,7 @@ namespace projects { } } - void IPShock::calcCellParameters(spatial_cell::SpatialCell* cell, creal& t) { - // Disable compiler warnings: (unused variables but the function is inherited) - (void)t; - - /* Maintain all values in BPERT for simplicity */ - Real* cellParams = cell->get_cell_parameters(); - - creal x = cellParams[CellParams::XCRD]; - creal dx = cellParams[CellParams::DX]; - creal y = cellParams[CellParams::YCRD]; - creal dy = cellParams[CellParams::DY]; - creal z = cellParams[CellParams::ZCRD]; - creal dz = cellParams[CellParams::DZ]; - - cellParams[CellParams::EX ] = 0.0; - cellParams[CellParams::EY ] = 0.0; - cellParams[CellParams::EZ ] = 0.0; - - Real KB = physicalconstants::K_B; - Real mu0 = physicalconstants::MU_0; - Real adiab = 5./3.; - - // Interpolate density between upstream and downstream - // All other values are calculated from jump conditions - Real MassDensity = 0.; - Real MassDensityU = 0.; - Real EffectiveVu0 = 0.; - for(uint i=0; i< getObjectWrapper().particleSpecies.size(); i++) { - const IPShockSpeciesParameters& sP = speciesParams[i]; - Real mass = getObjectWrapper().particleSpecies[i].mass; - - MassDensity += mass * interpolate(sP.DENSITYu,sP.DENSITYd, x); - MassDensityU += mass * sP.DENSITYu; - EffectiveVu0 += sP.V0u[0] * mass * sP.DENSITYu; - } - EffectiveVu0 /= MassDensityU; - - // Solve tangential components for B and V - Real VX = MassDensityU * EffectiveVu0 / MassDensity; - Real BX = this->B0u[0]; - Real MAsq = std::pow((EffectiveVu0/this->B0u[0]), 2) * MassDensityU * mu0; - Real Btang = this->B0utangential * (MAsq - 1.0)/(MAsq*VX/EffectiveVu0 -1.0); - Real Vtang = VX * Btang / BX; - - /* Reconstruct Y and Z components using cos(phi) values and signs. Tangential variables are always positive. */ - Real BY = abs(Btang) * this->Bucosphi * this->Byusign; - Real BZ = abs(Btang) * sqrt(1. - this->Bucosphi * this->Bucosphi) * this->Bzusign; - //Real VY = Vtang * this->Vucosphi * this->Vyusign; - //Real VZ = Vtang * sqrt(1. - this->Vucosphi * this->Vucosphi) * this->Vzusign; - - cellParams[CellParams::PERBX ] = BX; - cellParams[CellParams::PERBY ] = BY; - cellParams[CellParams::PERBZ ] = BZ; - - } + void IPShock::calcCellParameters(spatial_cell::SpatialCell* cell, creal& t) { } Real IPShock::interpolate(Real upstream, Real downstream, Real x) const { Real coord = 0.5 + x/this->Shockwidth; //Now shock will be from 0 to 1 @@ -492,10 +438,60 @@ namespace projects { } void IPShock::setProjectBField( + FsGrid< std::array, 2> & perBGrid, FsGrid< std::array, 2>& BgBGrid, FsGrid< fsgrids::technical, 2>& technicalGrid ) { setBackgroundFieldToZero(BgBGrid); + + auto localSize = perBGrid.getLocalSize(); + + #pragma omp parallel for collapse(3) + for (int x = 0; x < localSize[0]; ++x) { + for (int y = 0; y < localSize[1]; ++y) { + for (int z = 0; z < localSize[2]; ++z) { + const std::array xyz = perBGrid.getPhysicalCoords(x, y, z); + std::array* cell = perBGrid.get(x, y, z); + + /* Maintain all values in BPERT for simplicity */ + Real KB = physicalconstants::K_B; + Real mu0 = physicalconstants::MU_0; + Real adiab = 5./3.; + + // Interpolate density between upstream and downstream + // All other values are calculated from jump conditions + Real MassDensity = 0.; + Real MassDensityU = 0.; + Real EffectiveVu0 = 0.; + for(uint i=0; i< getObjectWrapper().particleSpecies.size(); i++) { + const IPShockSpeciesParameters& sP = speciesParams[i]; + Real mass = getObjectWrapper().particleSpecies[i].mass; + + MassDensity += mass * interpolate(sP.DENSITYu,sP.DENSITYd, xyz[0]); + MassDensityU += mass * sP.DENSITYu; + EffectiveVu0 += sP.V0u[0] * mass * sP.DENSITYu; + } + EffectiveVu0 /= MassDensityU; + + // Solve tangential components for B and V + Real VX = MassDensityU * EffectiveVu0 / MassDensity; + Real BX = this->B0u[0]; + Real MAsq = std::pow((EffectiveVu0/this->B0u[0]), 2) * MassDensityU * mu0; + Real Btang = this->B0utangential * (MAsq - 1.0)/(MAsq*VX/EffectiveVu0 -1.0); + Real Vtang = VX * Btang / BX; + + /* Reconstruct Y and Z components using cos(phi) values and signs. Tangential variables are always positive. */ + Real BY = abs(Btang) * this->Bucosphi * this->Byusign; + Real BZ = abs(Btang) * sqrt(1. - this->Bucosphi * this->Bucosphi) * this->Bzusign; + //Real VY = Vtang * this->Vucosphi * this->Vyusign; + //Real VZ = Vtang * sqrt(1. - this->Vucosphi * this->Vucosphi) * this->Vzusign; + + cell->at(fsgrids::bfield::PERBX) = BX; + cell->at(fsgrids::bfield::PERBY) = BY; + cell->at(fsgrids::bfield::PERBZ) = BZ; + } + } + } } }//namespace projects diff --git a/projects/IPShock/IPShock.h b/projects/IPShock/IPShock.h index 9b43cef35..e4fd60566 100644 --- a/projects/IPShock/IPShock.h +++ b/projects/IPShock/IPShock.h @@ -63,6 +63,7 @@ namespace projects { virtual void getParameters(void); virtual void setProjectBField( + FsGrid< std::array, 2> & perBGrid, FsGrid< std::array, 2>& BgBGrid, FsGrid< fsgrids::technical, 2>& technicalGrid ); From fa733a12a54a7e518dc58024f46576940228dc3d Mon Sep 17 00:00:00 2001 From: ykempf Date: Thu, 11 Apr 2019 15:09:59 +0300 Subject: [PATCH 305/602] setProjectBField for Magnetosphere --- projects/Magnetosphere/Magnetosphere.cpp | 10 +++------- projects/Magnetosphere/Magnetosphere.h | 1 + 2 files changed, 4 insertions(+), 7 deletions(-) diff --git a/projects/Magnetosphere/Magnetosphere.cpp b/projects/Magnetosphere/Magnetosphere.cpp index dae24f818..c8c8c5b07 100644 --- a/projects/Magnetosphere/Magnetosphere.cpp +++ b/projects/Magnetosphere/Magnetosphere.cpp @@ -249,15 +249,11 @@ namespace projects { } /*! Magnetosphere does not set any extra perturbed B. */ - void Magnetosphere::calcCellParameters(spatial_cell::SpatialCell* cell,creal& t) { - Real* cellParams = cell->get_cell_parameters(); - cellParams[CellParams::PERBX] = 0.0; - cellParams[CellParams::PERBY] = 0.0; - cellParams[CellParams::PERBZ] = 0.0; - } + void Magnetosphere::calcCellParameters(spatial_cell::SpatialCell* cell,creal& t) { } /* set 0-centered dipole */ void Magnetosphere::setProjectBField( + FsGrid< std::array, 2> & perBGrid, FsGrid< std::array, 2>& BgBGrid, FsGrid< fsgrids::technical, 2>& technicalGrid ) { @@ -297,7 +293,7 @@ namespace projects { } - auto localSize = BgBGrid.getLocalSize(); + const auto localSize = BgBGrid.getLocalSize(); #pragma omp parallel { diff --git a/projects/Magnetosphere/Magnetosphere.h b/projects/Magnetosphere/Magnetosphere.h index 3b91ab062..34ffe208a 100644 --- a/projects/Magnetosphere/Magnetosphere.h +++ b/projects/Magnetosphere/Magnetosphere.h @@ -48,6 +48,7 @@ namespace projects { static void addParameters(void); virtual void getParameters(void); virtual void setProjectBField( + FsGrid< std::array, 2> & perBGrid, FsGrid< std::array, 2>& BgBGrid, FsGrid< fsgrids::technical, 2>& technicalGrid ); From eeb1ec0711326b7f644e729987e4fd71bb48b45b Mon Sep 17 00:00:00 2001 From: ykempf Date: Thu, 11 Apr 2019 15:12:25 +0300 Subject: [PATCH 306/602] setProjectBField for Larmor --- projects/Larmor/Larmor.cpp | 18 ++---------------- projects/Larmor/Larmor.h | 1 + 2 files changed, 3 insertions(+), 16 deletions(-) diff --git a/projects/Larmor/Larmor.cpp b/projects/Larmor/Larmor.cpp index e39948cd4..ea6ed8bb6 100644 --- a/projects/Larmor/Larmor.cpp +++ b/projects/Larmor/Larmor.cpp @@ -138,24 +138,10 @@ namespace projects { } - void Larmor::calcCellParameters(spatial_cell::SpatialCell* cell,creal& t) { - Real* cellParams = cell->get_cell_parameters(); - creal x = cellParams[CellParams::XCRD]; - creal dx = cellParams[CellParams::DX]; - creal y = cellParams[CellParams::YCRD]; - creal dy = cellParams[CellParams::DY]; - creal z = cellParams[CellParams::ZCRD]; - creal dz = cellParams[CellParams::DZ]; - - cellParams[CellParams::EX ] = 0.0; - cellParams[CellParams::EY ] = 0.0; - cellParams[CellParams::EZ ] = 0.0; - cellParams[CellParams::PERBX ] = 0.0; - cellParams[CellParams::PERBY ] = 0.0; - cellParams[CellParams::PERBZ ] = 0.0; - } + void Larmor::calcCellParameters(spatial_cell::SpatialCell* cell,creal& t) { } void Larmor::setProjectBField( + FsGrid< std::array, 2> & perBGrid, FsGrid< std::array, 2>& BgBGrid, FsGrid< fsgrids::technical, 2>& technicalGrid ) { diff --git a/projects/Larmor/Larmor.h b/projects/Larmor/Larmor.h index 766de2960..69dbc48d5 100644 --- a/projects/Larmor/Larmor.h +++ b/projects/Larmor/Larmor.h @@ -39,6 +39,7 @@ namespace projects { static void addParameters(void); virtual void getParameters(void); virtual void setProjectBField( + FsGrid< std::array, 2> & perBGrid, FsGrid< std::array, 2>& BgBGrid, FsGrid< fsgrids::technical, 2>& technicalGrid ); From cb5d03fccf7f6f5b45b07f658dccf48cbc092583 Mon Sep 17 00:00:00 2001 From: ykempf Date: Thu, 11 Apr 2019 15:14:28 +0300 Subject: [PATCH 307/602] setProjectBField for Poisson --- projects/Poisson/poisson_test.cpp | 5 ++--- projects/Poisson/poisson_test.h | 1 + 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/projects/Poisson/poisson_test.cpp b/projects/Poisson/poisson_test.cpp index 7b48e726c..757c13da6 100644 --- a/projects/Poisson/poisson_test.cpp +++ b/projects/Poisson/poisson_test.cpp @@ -73,11 +73,10 @@ namespace projects { } void PoissonTest::setProjectBField( + FsGrid< std::array, 2> & perBGrid, FsGrid< std::array, 2>& BgBGrid, FsGrid< fsgrids::technical, 2>& technicalGrid - ) { - - } + ) { } void PoissonTest::calcCellParameters(spatial_cell::SpatialCell* cell,creal& t) { Real* cellParams = cell->get_cell_parameters(); diff --git a/projects/Poisson/poisson_test.h b/projects/Poisson/poisson_test.h index dc2edfef8..23080a5ef 100644 --- a/projects/Poisson/poisson_test.h +++ b/projects/Poisson/poisson_test.h @@ -42,6 +42,7 @@ namespace projects { virtual void getParameters(); virtual bool initialize(); virtual void setProjectBField( + FsGrid< std::array, 2> & perBGrid, FsGrid< std::array, 2>& BgBGrid, FsGrid< fsgrids::technical, 2>& technicalGrid ); From ffe1485e295c93e728ff7f67c8efa45bb927668c Mon Sep 17 00:00:00 2001 From: ykempf Date: Thu, 11 Apr 2019 15:16:45 +0300 Subject: [PATCH 308/602] setProjectBField for Template... --- projects/Template/Template.cpp | 1 + projects/Template/Template.h | 1 + 2 files changed, 2 insertions(+) diff --git a/projects/Template/Template.cpp b/projects/Template/Template.cpp index 591e2f599..8f61c0872 100644 --- a/projects/Template/Template.cpp +++ b/projects/Template/Template.cpp @@ -72,6 +72,7 @@ namespace projects { } void Template::setProjectBField( + FsGrid< std::array, 2> & perBGrid, FsGrid< std::array, 2>& BgBGrid, FsGrid< fsgrids::technical, 2>& technicalGrid ) { diff --git a/projects/Template/Template.h b/projects/Template/Template.h index 9a7000c6e..ea7612417 100644 --- a/projects/Template/Template.h +++ b/projects/Template/Template.h @@ -36,6 +36,7 @@ namespace projects { static void addParameters(void); virtual void getParameters(void); virtual void setProjectBField( + FsGrid< std::array, 2> & perBGrid, FsGrid< std::array, 2>& BgBGrid, FsGrid< fsgrids::technical, 2>& technicalGrid ); From 75f458fd12ce39032f7575a28d361d97bd9eef6d Mon Sep 17 00:00:00 2001 From: ykempf Date: Thu, 11 Apr 2019 15:18:06 +0300 Subject: [PATCH 309/602] setProjectBField for test_trans --- projects/test_trans/test_trans.cpp | 11 ++--------- projects/test_trans/test_trans.h | 1 + 2 files changed, 3 insertions(+), 9 deletions(-) diff --git a/projects/test_trans/test_trans.cpp b/projects/test_trans/test_trans.cpp index 8d797499a..4ac5a7151 100644 --- a/projects/test_trans/test_trans.cpp +++ b/projects/test_trans/test_trans.cpp @@ -126,17 +126,10 @@ namespace projects { return 0.0; } - void test_trans::calcCellParameters(spatial_cell::SpatialCell* cell,creal& t) { - Real* cellParams = cell->get_cell_parameters(); - cellParams[CellParams::EX ] = 0.0; - cellParams[CellParams::EY ] = 0.0; - cellParams[CellParams::EZ ] = 0.0; - cellParams[CellParams::PERBX ] = 0.0; - cellParams[CellParams::PERBY ] = 0.0; - cellParams[CellParams::PERBZ ] = 0.0; - } + void test_trans::calcCellParameters(spatial_cell::SpatialCell* cell,creal& t) { } void test_trans::setProjectBField( + FsGrid< std::array, 2> & perBGrid, FsGrid< std::array, 2>& BgBGrid, FsGrid< fsgrids::technical, 2>& technicalGrid ) { diff --git a/projects/test_trans/test_trans.h b/projects/test_trans/test_trans.h index a2acbfe2a..da92bc83d 100644 --- a/projects/test_trans/test_trans.h +++ b/projects/test_trans/test_trans.h @@ -38,6 +38,7 @@ namespace projects { static void addParameters(void); virtual void getParameters(void); virtual void setProjectBField( + FsGrid< std::array, 2> & perBGrid, FsGrid< std::array, 2>& BgBGrid, FsGrid< fsgrids::technical, 2>& technicalGrid ); From 3a7eea22066a39bf3e7c6bace77cbeb520b5e5b3 Mon Sep 17 00:00:00 2001 From: ykempf Date: Thu, 11 Apr 2019 15:19:47 +0300 Subject: [PATCH 310/602] setProjectBField for VelocityBox --- projects/VelocityBox/VelocityBox.cpp | 11 ++--------- projects/VelocityBox/VelocityBox.h | 1 + 2 files changed, 3 insertions(+), 9 deletions(-) diff --git a/projects/VelocityBox/VelocityBox.cpp b/projects/VelocityBox/VelocityBox.cpp index bd1af27ca..891ffbe19 100644 --- a/projects/VelocityBox/VelocityBox.cpp +++ b/projects/VelocityBox/VelocityBox.cpp @@ -99,17 +99,10 @@ namespace projects { - void VelocityBox::calcCellParameters(spatial_cell::SpatialCell* cell,creal& t) { - Real* cellParams = cell->get_cell_parameters(); - cellParams[CellParams::EX ] = 0.0; - cellParams[CellParams::EY ] = 0.0; - cellParams[CellParams::EZ ] = 0.0; - cellParams[CellParams::PERBX ] = 0.0; - cellParams[CellParams::PERBY ] = 0.0; - cellParams[CellParams::PERBZ ] = 0.0; - } + void VelocityBox::calcCellParameters(spatial_cell::SpatialCell* cell,creal& t) { } void VelocityBox::setProjectBField( + FsGrid< std::array, 2> & perBGrid, FsGrid< std::array, 2>& BgBGrid, FsGrid< fsgrids::technical, 2>& technicalGrid ) { diff --git a/projects/VelocityBox/VelocityBox.h b/projects/VelocityBox/VelocityBox.h index 2fe236f65..8afbc8134 100644 --- a/projects/VelocityBox/VelocityBox.h +++ b/projects/VelocityBox/VelocityBox.h @@ -37,6 +37,7 @@ namespace projects { static void addParameters(void); virtual void getParameters(void); virtual void setProjectBField( + FsGrid< std::array, 2> & perBGrid, FsGrid< std::array, 2>& BgBGrid, FsGrid< fsgrids::technical, 2>& technicalGrid ); From ffa3d47f7ee55e9d117f7d0630ca3237c6b42420 Mon Sep 17 00:00:00 2001 From: ykempf Date: Thu, 11 Apr 2019 15:21:35 +0300 Subject: [PATCH 311/602] setProjectBField for verificationLarmor --- .../verificationLarmor/verificationLarmor.cpp | 18 ++---------------- .../verificationLarmor/verificationLarmor.h | 1 + 2 files changed, 3 insertions(+), 16 deletions(-) diff --git a/projects/verificationLarmor/verificationLarmor.cpp b/projects/verificationLarmor/verificationLarmor.cpp index 160bb1822..a919f156e 100644 --- a/projects/verificationLarmor/verificationLarmor.cpp +++ b/projects/verificationLarmor/verificationLarmor.cpp @@ -110,24 +110,10 @@ namespace projects { } - void verificationLarmor::calcCellParameters(spatial_cell::SpatialCell* cell,creal& t) { - Real* cellParams = cell->get_cell_parameters(); - creal x = cellParams[CellParams::XCRD]; - creal dx = cellParams[CellParams::DX]; - creal y = cellParams[CellParams::YCRD]; - creal dy = cellParams[CellParams::DY]; - creal z = cellParams[CellParams::ZCRD]; - creal dz = cellParams[CellParams::DZ]; - - cellParams[CellParams::EX ] = 0.0; - cellParams[CellParams::EY ] = 0.0; - cellParams[CellParams::EZ ] = 0.0; - cellParams[CellParams::PERBX ] = 0.0; - cellParams[CellParams::PERBY ] = 0.0; - cellParams[CellParams::PERBZ ] = 0.0; - } + void verificationLarmor::calcCellParameters(spatial_cell::SpatialCell* cell,creal& t) { } void verificationLarmor::setProjectBField( + FsGrid< std::array, 2> & perBGrid, FsGrid< std::array, 2>& BgBGrid, FsGrid< fsgrids::technical, 2>& technicalGrid ) { diff --git a/projects/verificationLarmor/verificationLarmor.h b/projects/verificationLarmor/verificationLarmor.h index 01afe1417..2bc34b013 100644 --- a/projects/verificationLarmor/verificationLarmor.h +++ b/projects/verificationLarmor/verificationLarmor.h @@ -38,6 +38,7 @@ namespace projects { static void addParameters(void); virtual void getParameters(void); virtual void setProjectBField( + FsGrid< std::array, 2> & perBGrid, FsGrid< std::array, 2>& BgBGrid, FsGrid< fsgrids::technical, 2>& technicalGrid ); From d7ae3c610cd6bc79d811269be1b302b3fa5fa853 Mon Sep 17 00:00:00 2001 From: ykempf Date: Mon, 15 Apr 2019 10:09:36 +0300 Subject: [PATCH 312/602] setProjectBField for KHB, support for nSpaceSamples == 1 --- projects/KHB/KHB.cpp | 65 ++++++++++++++++++++++++++++---------------- projects/KHB/KHB.h | 5 ++++ 2 files changed, 46 insertions(+), 24 deletions(-) diff --git a/projects/KHB/KHB.cpp b/projects/KHB/KHB.cpp index fcbc1d638..e64d0a496 100644 --- a/projects/KHB/KHB.cpp +++ b/projects/KHB/KHB.cpp @@ -151,31 +151,48 @@ namespace projects { } - void KHB::calcCellParameters(spatial_cell::SpatialCell* cell,creal& t) { - Real* cellParams = cell->get_cell_parameters(); - cellParams[CellParams::EX ] = 0.0; - cellParams[CellParams::EY ] = 0.0; - cellParams[CellParams::EZ ] = 0.0; - creal x = cellParams[CellParams::XCRD]; - creal dx = cellParams[CellParams::DX]; - creal z = cellParams[CellParams::ZCRD]; - creal dz = cellParams[CellParams::DZ]; + void KHB::calcCellParameters(spatial_cell::SpatialCell* cell,creal& t) { } + + void KHB::setProjectBField( + FsGrid< std::array, 2> & perBGrid, + FsGrid< std::array, 2>& BgBGrid, + FsGrid< fsgrids::technical, 2>& technicalGrid + ) { + setBackgroundFieldToZero(BgBGrid); - Real Bxavg, Byavg, Bzavg; - Bxavg = Byavg = Bzavg = 0.0; - Real d_x = dx / (this->nSpaceSamples - 1); - Real d_z = dz / (this->nSpaceSamples - 1); - for (uint i=0; inSpaceSamples; ++i) - for (uint k=0; knSpaceSamples; ++k) { - Bxavg += profile(this->Bx[this->BOTTOM], this->Bx[this->TOP], x+i*d_x, z+k*d_z); - Byavg += profile(this->By[this->BOTTOM], this->By[this->TOP], x+i*d_x, z+k*d_z); - Bzavg += profile(this->Bz[this->BOTTOM], this->Bz[this->TOP], x+i*d_x, z+k*d_z); - } - cuint nPts = pow(this->nSpaceSamples, 2.0); + auto localSize = perBGrid.getLocalSize(); - cellParams[CellParams::PERBX ] = Bxavg / nPts; - cellParams[CellParams::PERBY ] = Byavg / nPts; - cellParams[CellParams::PERBZ ] = Bzavg / nPts; + #pragma omp parallel for collapse(3) + for (int x = 0; x < localSize[0]; ++x) { + for (int y = 0; y < localSize[1]; ++y) { + for (int z = 0; z < localSize[2]; ++z) { + const std::array xyz = perBGrid.getPhysicalCoords(x, y, z); + std::array* cell = perBGrid.get(x, y, z); + + Real Bxavg, Byavg, Bzavg; + Bxavg = Byavg = Bzavg = 0.0; + if(this->nSpaceSamples > 1) { + Real d_x = perBGrid.DX / (this->nSpaceSamples - 1); + Real d_z = perBGrid.DZ / (this->nSpaceSamples - 1); + for (uint i=0; inSpaceSamples; ++i) { + for (uint k=0; knSpaceSamples; ++k) { + Bxavg += profile(this->Bx[this->BOTTOM], this->Bx[this->TOP], xyz[0]+i*d_x, xyz[2]+k*d_z); + Byavg += profile(this->By[this->BOTTOM], this->By[this->TOP], xyz[0]+i*d_x, xyz[2]+k*d_z); + Bzavg += profile(this->Bz[this->BOTTOM], this->Bz[this->TOP], xyz[0]+i*d_x, xyz[2]+k*d_z); + } + } + cuint nPts = pow(this->nSpaceSamples, 2.0); + cell->at(fsgrids::bfield::PERBX) = Bxavg / nPts; + cell->at(fsgrids::bfield::PERBY) = Byavg / nPts; + cell->at(fsgrids::bfield::PERBZ) = Bzavg / nPts; + } else { + cell->at(fsgrids::bfield::PERBX) = profile(this->Bx[this->BOTTOM], this->Bx[this->TOP], xyz[0]+0.5*perBGrid.DX, xyz[2]+0.5*perBGrid.DZ); + cell->at(fsgrids::bfield::PERBY) = profile(this->By[this->BOTTOM], this->By[this->TOP], xyz[0]+0.5*perBGrid.DX, xyz[2]+0.5*perBGrid.DZ); + cell->at(fsgrids::bfield::PERBZ) = profile(this->Bz[this->BOTTOM], this->Bz[this->TOP], xyz[0]+0.5*perBGrid.DX, xyz[2]+0.5*perBGrid.DZ); + } + } + } + } } - + } // namespace projects diff --git a/projects/KHB/KHB.h b/projects/KHB/KHB.h index 82da2c04c..a11332cc1 100644 --- a/projects/KHB/KHB.h +++ b/projects/KHB/KHB.h @@ -45,6 +45,11 @@ namespace projects { creal& dvx, creal& dvy, creal& dvz, const uint popID ) const; + virtual void setProjectBField( + FsGrid< std::array, 2> & perBGrid, + FsGrid< std::array, 2>& BgBGrid, + FsGrid< fsgrids::technical, 2>& technicalGrid + ); protected: Real getDistribValue( creal& x, creal& z, From b7d403ee1812993b1dc2c8fe4bae583620270628 Mon Sep 17 00:00:00 2001 From: ykempf Date: Mon, 15 Apr 2019 10:50:55 +0300 Subject: [PATCH 313/602] setProjectBField for MultiPeak --- projects/MultiPeak/MultiPeak.cpp | 36 ++++++++++++++++++++++---------- projects/MultiPeak/MultiPeak.h | 1 + 2 files changed, 26 insertions(+), 11 deletions(-) diff --git a/projects/MultiPeak/MultiPeak.cpp b/projects/MultiPeak/MultiPeak.cpp index f8a598d42..bebaaad7c 100644 --- a/projects/MultiPeak/MultiPeak.cpp +++ b/projects/MultiPeak/MultiPeak.cpp @@ -215,21 +215,11 @@ namespace projects { void MultiPeak::calcCellParameters(spatial_cell::SpatialCell* cell,creal& t) { Real* cellParams = cell->get_cell_parameters(); setRandomCellSeed(cell,cellParams); - - if (this->lambda != 0.0) { - cellParams[CellParams::PERBX] = this->dBx*cos(2.0 * M_PI * cellParams[CellParams::XCRD] / this->lambda); - cellParams[CellParams::PERBY] = this->dBy*sin(2.0 * M_PI * cellParams[CellParams::XCRD] / this->lambda); - cellParams[CellParams::PERBZ] = this->dBz*cos(2.0 * M_PI * cellParams[CellParams::XCRD] / this->lambda); - } - - cellParams[CellParams::PERBX] += this->magXPertAbsAmp * (0.5 - getRandomNumber()); - cellParams[CellParams::PERBY] += this->magYPertAbsAmp * (0.5 - getRandomNumber()); - cellParams[CellParams::PERBZ] += this->magZPertAbsAmp * (0.5 - getRandomNumber()); - rhoRnd = 0.5 - getRandomNumber(); } void MultiPeak::setProjectBField( + FsGrid< std::array, 2> & perBGrid, FsGrid< std::array, 2>& BgBGrid, FsGrid< fsgrids::technical, 2>& technicalGrid ) { @@ -239,6 +229,30 @@ namespace projects { this->Bz); setBackgroundField(bgField, BgBGrid); + + auto localSize = perBGrid.getLocalSize(); + + #pragma omp parallel for collapse(3) + for (int x = 0; x < localSize[0]; ++x) { + for (int y = 0; y < localSize[1]; ++y) { + for (int z = 0; z < localSize[2]; ++z) { + const std::array xyz = perBGrid.getPhysicalCoords(x, y, z); + std::array* cell = perBGrid.get(x, y, z); + const int64_t cellid = perBGrid.GlobalIDForCoords(x, y, z); + setRandomSeed(cellid); + + if (this->lambda != 0.0) { + cell->at(fsgrids::bfield::PERBX) = this->dBx*cos(2.0 * M_PI * xyz[0] / this->lambda); + cell->at(fsgrids::bfield::PERBY) = this->dBy*sin(2.0 * M_PI * xyz[0] / this->lambda); + cell->at(fsgrids::bfield::PERBZ) = this->dBz*cos(2.0 * M_PI * xyz[0] / this->lambda); + } + + cell->at(fsgrids::bfield::PERBX) += this->magXPertAbsAmp * (0.5 - getRandomNumber()); + cell->at(fsgrids::bfield::PERBY) += this->magYPertAbsAmp * (0.5 - getRandomNumber()); + cell->at(fsgrids::bfield::PERBZ) += this->magZPertAbsAmp * (0.5 - getRandomNumber()); + } + } + } } std::vector > MultiPeak::getV0( diff --git a/projects/MultiPeak/MultiPeak.h b/projects/MultiPeak/MultiPeak.h index 26491f86c..1728141bc 100644 --- a/projects/MultiPeak/MultiPeak.h +++ b/projects/MultiPeak/MultiPeak.h @@ -62,6 +62,7 @@ namespace projects { static void addParameters(void); virtual void getParameters(void); virtual void setProjectBField( + FsGrid< std::array, 2> & perBGrid, FsGrid< std::array, 2>& BgBGrid, FsGrid< fsgrids::technical, 2>& technicalGrid ); From 2d69169379253f0f1405f273b81f08857a9422df Mon Sep 17 00:00:00 2001 From: ykempf Date: Mon, 15 Apr 2019 10:58:50 +0300 Subject: [PATCH 314/602] setProjectBField and nSpaceSamples == 1 for Riemann1 --- projects/Riemann1/Riemann1.h | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/projects/Riemann1/Riemann1.h b/projects/Riemann1/Riemann1.h index 4f99a81d6..50f00387c 100644 --- a/projects/Riemann1/Riemann1.h +++ b/projects/Riemann1/Riemann1.h @@ -36,7 +36,11 @@ namespace projects { virtual bool initialize(void); static void addParameters(void); virtual void getParameters(void); - + virtual void setProjectBField( + FsGrid< std::array, 2> & perBGrid, + FsGrid< std::array, 2>& BgBGrid, + FsGrid< fsgrids::technical, 2>& technicalGrid + ); protected: Real getDistribValue( creal& x,creal& y, creal& z, From ea3128ab212329876684f054898eba5cda38219f Mon Sep 17 00:00:00 2001 From: ykempf Date: Mon, 15 Apr 2019 11:04:29 +0300 Subject: [PATCH 315/602] Including the cpp for Riemann1... --- projects/Riemann1/Riemann1.cpp | 64 +++++++++++++++++++++------------- 1 file changed, 40 insertions(+), 24 deletions(-) diff --git a/projects/Riemann1/Riemann1.cpp b/projects/Riemann1/Riemann1.cpp index 08b370c66..edcc813c0 100644 --- a/projects/Riemann1/Riemann1.cpp +++ b/projects/Riemann1/Riemann1.cpp @@ -97,7 +97,6 @@ namespace projects { exp(- physicalconstants::MASS_PROTON * (pow(vx - this->Vx[side], 2.0) + pow(vy - this->Vy[side], 2.0) + pow(vz - this->Vz[side], 2.0)) / (2.0 * physicalconstants::K_B * this->T[side])); } - Real Riemann1::calcPhaseSpaceDensity(creal& x, creal& y, creal& z, creal& dx, creal& dy, creal& dz, creal& vx, creal& vy, creal& vz, creal& dvx, creal& dvy, creal& dvz,const uint popID) const { creal d_x = dx / (this->nSpaceSamples-1); creal d_y = dy / (this->nSpaceSamples-1); @@ -118,31 +117,48 @@ namespace projects { return avg / pow(this->nSpaceSamples, 3.0) / pow(this->nVelocitySamples, 3.0); } - - void Riemann1::calcCellParameters(spatial_cell::SpatialCell* cell,creal& t) { - Real* cellParams = cell->get_cell_parameters(); - creal x = cellParams[CellParams::XCRD]; - creal dx = cellParams[CellParams::DX]; + void Riemann1::calcCellParameters(spatial_cell::SpatialCell* cell,creal& t) { } + + void Riemann1::setProjectBField( + FsGrid< std::array, 2> & perBGrid, + FsGrid< std::array, 2>& BgBGrid, + FsGrid< fsgrids::technical, 2>& technicalGrid + ) { + setBackgroundFieldToZero(BgBGrid); - Real Bxavg, Byavg, Bzavg; - Bxavg = Byavg = Bzavg = 0.0; - Real d_x = dx / (this->nSpaceSamples - 1); + auto localSize = perBGrid.getLocalSize(); - for (uint i=0; inSpaceSamples; ++i) - for (uint j=0; jnSpaceSamples; ++j) - for (uint k=0; knSpaceSamples; ++k) { - Bxavg += ((x + i * d_x) < 0.0) ? this->Bx[this->LEFT] : this->Bx[this->RIGHT]; - Byavg += ((x + i * d_x) < 0.0) ? this->By[this->LEFT] : this->By[this->RIGHT]; - Bzavg += ((x + i * d_x) < 0.0) ? this->Bz[this->LEFT] : this->Bz[this->RIGHT]; + #pragma omp parallel for collapse(3) + for (int x = 0; x < localSize[0]; ++x) { + for (int y = 0; y < localSize[1]; ++y) { + for (int z = 0; z < localSize[2]; ++z) { + const std::array xyz = perBGrid.getPhysicalCoords(x, y, z); + std::array* cell = perBGrid.get(x, y, z); + + Real Bxavg, Byavg, Bzavg; + Bxavg = Byavg = Bzavg = 0.0; + if(this->nSpaceSamples > 1) { + Real d_x = perBGrid.DX / (this->nSpaceSamples - 1); + Real d_z = perBGrid.DZ / (this->nSpaceSamples - 1); + for (uint i=0; inSpaceSamples; ++i) { + for (uint k=0; knSpaceSamples; ++k) { + Bxavg += ((xyz[0] + i * d_x) < 0.0) ? this->Bx[this->LEFT] : this->Bx[this->RIGHT]; + Byavg += ((xyz[0] + i * d_x) < 0.0) ? this->By[this->LEFT] : this->By[this->RIGHT]; + Bzavg += ((xyz[0] + i * d_x) < 0.0) ? this->Bz[this->LEFT] : this->Bz[this->RIGHT]; + } + } + cuint nPts = pow(this->nSpaceSamples, 3.0); + + cell->at(fsgrids::bfield::PERBX) = Bxavg / nPts; + cell->at(fsgrids::bfield::PERBY) = Byavg / nPts; + cell->at(fsgrids::bfield::PERBZ) = Bzavg / nPts; + } else { + cell->at(fsgrids::bfield::PERBX) = (xyz[0] < 0.0) ? this->Bx[this->LEFT] : this->Bx[this->RIGHT]; + cell->at(fsgrids::bfield::PERBY) = (xyz[0] < 0.0) ? this->By[this->LEFT] : this->By[this->RIGHT]; + cell->at(fsgrids::bfield::PERBZ) = (xyz[0] < 0.0) ? this->Bz[this->LEFT] : this->Bz[this->RIGHT]; + } + } } - cuint nPts = pow(this->nSpaceSamples, 3.0); - - cellParams[CellParams::EX ] = 0.0; - cellParams[CellParams::EY ] = 0.0; - cellParams[CellParams::EZ ] = 0.0; - //FIXME, this field could also be in background field, but a simple bgfield class would then need to be defined, also, first derivatives are not so well defined for a step... - cellParams[CellParams::PERBX ] = Bxavg / nPts; - cellParams[CellParams::PERBY ] = Byavg / nPts; - cellParams[CellParams::PERBZ ] = Bzavg / nPts; + } } } From 08f68e45a0da27f3c12383a251dd42c06bc28bef Mon Sep 17 00:00:00 2001 From: ykempf Date: Mon, 15 Apr 2019 11:05:00 +0300 Subject: [PATCH 316/602] setProjectBField for Shock --- projects/Shock/Shock.cpp | 39 +++++++++++++++++++++++---------------- projects/Shock/Shock.h | 5 +++++ 2 files changed, 28 insertions(+), 16 deletions(-) diff --git a/projects/Shock/Shock.cpp b/projects/Shock/Shock.cpp index d8aa600d8..dd9eb567d 100644 --- a/projects/Shock/Shock.cpp +++ b/projects/Shock/Shock.cpp @@ -95,7 +95,6 @@ namespace projects { //*exp(-pow(x-Parameters::xmax/2.0, 2.0)/pow(this->SCA_X, 2.0))*exp(-pow(y-Parameters::ymax/4.0, 2.0)/pow(this->SCA_Y, 2.0)); } - Real Shock::calcPhaseSpaceDensity(creal& x, creal& y, creal& z, creal& dx, creal& dy, creal& dz, creal& vx, creal& vy, creal& vz, creal& dvx, creal& dvy, creal& dvz,const uint popID) const { const size_t meshID = getObjectWrapper().particleSpecies[popID].velocityMesh; @@ -143,21 +142,29 @@ namespace projects { } } - void Shock::calcCellParameters(spatial_cell::SpatialCell* cell,creal& t) { - Real* cellParams = cell->get_cell_parameters(); - creal x = cellParams[CellParams::XCRD]; - creal dx = cellParams[CellParams::DX]; - creal y = cellParams[CellParams::YCRD]; - creal dy = cellParams[CellParams::DY]; - creal z = cellParams[CellParams::ZCRD]; - creal dz = cellParams[CellParams::DZ]; + void Shock::calcCellParameters(spatial_cell::SpatialCell* cell,creal& t) { } + + void Shock::setProjectBField( + FsGrid< std::array, 2> & perBGrid, + FsGrid< std::array, 2>& BgBGrid, + FsGrid< fsgrids::technical, 2>& technicalGrid + ) { + setBackgroundFieldToZero(BgBGrid); + + auto localSize = perBGrid.getLocalSize(); - cellParams[CellParams::EX ] = 0.0; - cellParams[CellParams::EY ] = 0.0; - cellParams[CellParams::EZ ] = 0.0; - cellParams[CellParams::PERBX ] = 0.0; - cellParams[CellParams::PERBY ] = 0.0; - cellParams[CellParams::PERBZ ] = this->BZ0*(3.0 + 2.0*tanh((y - Parameters::ymax/2.0)/(this->Sharp_Y*Parameters::ymax))); + #pragma omp parallel for collapse(3) + for (int x = 0; x < localSize[0]; ++x) { + for (int y = 0; y < localSize[1]; ++y) { + for (int z = 0; z < localSize[2]; ++z) { + const std::array xyz = perBGrid.getPhysicalCoords(x, y, z); + std::array* cell = perBGrid.get(x, y, z); + + cell->at(fsgrids::bfield::PERBX) = 0.0; + cell->at(fsgrids::bfield::PERBY) = 0.0; + cell->at(fsgrids::bfield::PERBZ) = this->BZ0*(3.0 + 2.0*tanh((xyz[1] - Parameters::ymax/2.0)/(this->Sharp_Y*Parameters::ymax))); + } + } + } } - }//namespace projects diff --git a/projects/Shock/Shock.h b/projects/Shock/Shock.h index 011cb6df5..b8b1cbf25 100644 --- a/projects/Shock/Shock.h +++ b/projects/Shock/Shock.h @@ -35,6 +35,11 @@ namespace projects { virtual bool initialize(void); static void addParameters(void); virtual void getParameters(void); + virtual void setProjectBField( + FsGrid< std::array, 2> & perBGrid, + FsGrid< std::array, 2>& BgBGrid, + FsGrid< fsgrids::technical, 2>& technicalGrid + ); protected: Real getDistribValue( From 541cf209af3f65f1a484783e2aa22e3fb4773cef Mon Sep 17 00:00:00 2001 From: ykempf Date: Mon, 15 Apr 2019 11:09:25 +0300 Subject: [PATCH 317/602] setProjectBField and nSpaceSamples == 1 for Shocktest --- projects/Shocktest/Shocktest.cpp | 73 +++++++++++++++++--------------- projects/Shocktest/Shocktest.h | 1 + 2 files changed, 39 insertions(+), 35 deletions(-) diff --git a/projects/Shocktest/Shocktest.cpp b/projects/Shocktest/Shocktest.cpp index c3e2983a9..be44ee460 100644 --- a/projects/Shocktest/Shocktest.cpp +++ b/projects/Shocktest/Shocktest.cpp @@ -185,50 +185,53 @@ namespace projects { } /** Calculate parameters for the given spatial cell at the given time. - * Here you need to set values for the following array indices: - * CellParams::EX, CellParams::EY, CellParams::EZ, CellParams::BX, CellParams::BY, and CellParams::BZ. - * - * The following array indices contain the coordinates of the "lower left corner" of the cell: - * CellParams::XCRD, CellParams::YCRD, and CellParams::ZCRD. - * The cell size is given in the following array indices: CellParams::DX, CellParams::DY, and CellParams::DZ. * @param cellParams Array containing cell parameters. * @param t The current value of time. This is passed as a convenience. If you need more detailed information * of the state of the simulation, you can read it from Parameters. */ - void Shocktest::calcCellParameters(spatial_cell::SpatialCell* cell,creal& t) { - Real* cellParams = cell->get_cell_parameters(); - creal x = cellParams[CellParams::XCRD]; - creal dx = cellParams[CellParams::DX]; - - Real Bxavg, Byavg, Bzavg; - Bxavg = Byavg = Bzavg = 0.0; - Real d_x = dx / (this->nSpaceSamples - 1); - - for (uint i=0; inSpaceSamples; ++i) - for (uint j=0; jnSpaceSamples; ++j) - for (uint k=0; knSpaceSamples; ++k) { - Bxavg += ((x + i * d_x) < 0.0) ? this->Bx[this->LEFT] : this->Bx[this->RIGHT]; - Byavg += ((x + i * d_x) < 0.0) ? this->By[this->LEFT] : this->By[this->RIGHT]; - Bzavg += ((x + i * d_x) < 0.0) ? this->Bz[this->LEFT] : this->Bz[this->RIGHT]; - } - cuint nPts = pow(this->nSpaceSamples, 3.0); - - cellParams[CellParams::EX ] = 0.0; - cellParams[CellParams::EY ] = 0.0; - cellParams[CellParams::EZ ] = 0.0; - cellParams[CellParams::PERBX ] = Bxavg / nPts; - cellParams[CellParams::PERBY ] = Byavg / nPts; - cellParams[CellParams::PERBZ ] = Bzavg / nPts; - } + void Shocktest::calcCellParameters(spatial_cell::SpatialCell* cell,creal& t) { } - void Shocktest::setProjectBField( + FsGrid< std::array, 2> & perBGrid, FsGrid< std::array, 2>& BgBGrid, FsGrid< fsgrids::technical, 2>& technicalGrid ) { - ConstantField bgField; - bgField.initialize(0,0,0); //bg bx, by,bz - setBackgroundField(bgField, BgBGrid); + setBackgroundFieldToZero(BgBGrid); + + auto localSize = perBGrid.getLocalSize(); + + #pragma omp parallel for collapse(3) + for (int x = 0; x < localSize[0]; ++x) { + for (int y = 0; y < localSize[1]; ++y) { + for (int z = 0; z < localSize[2]; ++z) { + const std::array xyz = perBGrid.getPhysicalCoords(x, y, z); + std::array* cell = perBGrid.get(x, y, z); + + Real Bxavg, Byavg, Bzavg; + Bxavg = Byavg = Bzavg = 0.0; + if(this->nSpaceSamples > 1) { + Real d_x = perBGrid.DX / (this->nSpaceSamples - 1); + Real d_z = perBGrid.DZ / (this->nSpaceSamples - 1); + for (uint i=0; inSpaceSamples; ++i) { + for (uint k=0; knSpaceSamples; ++k) { + Bxavg += ((xyz[0] + i * d_x) < 0.0) ? this->Bx[this->LEFT] : this->Bx[this->RIGHT]; + Byavg += ((xyz[0] + i * d_x) < 0.0) ? this->By[this->LEFT] : this->By[this->RIGHT]; + Bzavg += ((xyz[0] + i * d_x) < 0.0) ? this->Bz[this->LEFT] : this->Bz[this->RIGHT]; + } + } + cuint nPts = pow(this->nSpaceSamples, 3.0); + + cell->at(fsgrids::bfield::PERBX) = Bxavg / nPts; + cell->at(fsgrids::bfield::PERBY) = Byavg / nPts; + cell->at(fsgrids::bfield::PERBZ) = Bzavg / nPts; + } else { + cell->at(fsgrids::bfield::PERBX) = (xyz[0] < 0.0) ? this->Bx[this->LEFT] : this->Bx[this->RIGHT]; + cell->at(fsgrids::bfield::PERBY) = (xyz[0] < 0.0) ? this->By[this->LEFT] : this->By[this->RIGHT]; + cell->at(fsgrids::bfield::PERBZ) = (xyz[0] < 0.0) ? this->Bz[this->LEFT] : this->Bz[this->RIGHT]; + } + } + } + } } } // Namespace projects diff --git a/projects/Shocktest/Shocktest.h b/projects/Shocktest/Shocktest.h index 7266ecd53..a9ceff1ab 100644 --- a/projects/Shocktest/Shocktest.h +++ b/projects/Shocktest/Shocktest.h @@ -63,6 +63,7 @@ namespace projects { const uint popID ) const; virtual void setProjectBField( + FsGrid< std::array, 2> & perBGrid, FsGrid< std::array, 2>& BgBGrid, FsGrid< fsgrids::technical, 2>& technicalGrid ); From e447a7139188ff282d91ddee7936d8b6769eaa72 Mon Sep 17 00:00:00 2001 From: ykempf Date: Mon, 15 Apr 2019 11:51:47 +0300 Subject: [PATCH 318/602] setProjectBField for test_fp --- projects/test_fp/test_fp.cpp | 121 +++++++++++++++++++---------------- projects/test_fp/test_fp.h | 1 + 2 files changed, 66 insertions(+), 56 deletions(-) diff --git a/projects/test_fp/test_fp.cpp b/projects/test_fp/test_fp.cpp index 44036c2b9..ab5dab88e 100644 --- a/projects/test_fp/test_fp.cpp +++ b/projects/test_fp/test_fp.cpp @@ -106,71 +106,80 @@ namespace projects { } void test_fp::setProjectBField( + FsGrid< std::array, 2> & perBGrid, FsGrid< std::array, 2>& BgBGrid, FsGrid< fsgrids::technical, 2>& technicalGrid ) { setBackgroundFieldToZero(BgBGrid); + + auto localSize = perBGrid.getLocalSize(); + + creal dx = perBGrid.DX * 3.5; + creal dy = perBGrid.DY * 3.5; + creal dz = perBGrid.DZ * 3.5; + + Real areaFactor = 1.0; + + #pragma omp parallel for collapse(3) + for (int x = 0; x < localSize[0]; ++x) { + for (int y = 0; y < localSize[1]; ++y) { + for (int z = 0; z < localSize[2]; ++z) { + const std::array xyz = perBGrid.getPhysicalCoords(x, y, z); + std::array* cell = perBGrid.get(x, y, z); + + creal x = xyz[0] + 0.5 * perBGrid.DX; + creal y = xyz[1] + 0.5 * perBGrid.DY; + creal z = xyz[2] + 0.5 * perBGrid.DZ; + + switch (this->CASE) { + case BXCASE: + cell->at(fsgrids::bfield::PERBX) = 0.1 * this->B0 * areaFactor; + //areaFactor = (CellParams::DY * CellParams::DZ) / (dy * dz); + if (y >= -dy && y <= dy) + if (z >= -dz && z <= dz) + cell->at(fsgrids::bfield::PERBX) = this->B0 * areaFactor; + break; + case BYCASE: + cell->at(fsgrids::bfield::PERBY) = 0.1 * this->B0 * areaFactor; + //areaFactor = (CellParams::DX * CellParams::DZ) / (dx * dz); + if (x >= -dx && x <= dx) + if (z >= -dz && z <= dz) + cell->at(fsgrids::bfield::PERBY) = this->B0 * areaFactor; + break; + case BZCASE: + cell->at(fsgrids::bfield::PERBZ) = 0.1 * this->B0 * areaFactor; + //areaFactor = (CellParams::DX * CellParams::DY) / (dx * dy); + if (x >= -dx && x <= dx) + if (y >= -dy && y <= dy) + cell->at(fsgrids::bfield::PERBZ) = this->B0 * areaFactor; + break; + case BALLCASE: + cell->at(fsgrids::bfield::PERBX) = 0.1 * this->B0 * areaFactor; + cell->at(fsgrids::bfield::PERBY) = 0.1 * this->B0 * areaFactor; + cell->at(fsgrids::bfield::PERBZ) = 0.1 * this->B0 * areaFactor; + + //areaFactor = (CellParams::DX * CellParams::DY) / (dx * dy); + + if (y >= -dy && y <= dy) + if (z >= -dz && z <= dz) + cell->at(fsgrids::bfield::PERBX) = this->B0 * areaFactor; + if (x >= -dx && x <= dx) + if (z >= -dz && z <= dz) + cell->at(fsgrids::bfield::PERBY) = this->B0 * areaFactor; + if (x >= -dx && x <= dx) + if (y >= -dy && y <= dy) + cell->at(fsgrids::bfield::PERBZ) = this->B0 * areaFactor; + break; + } + } + } + } } void test_fp::calcCellParameters(spatial_cell::SpatialCell* cell,creal& t) { - Real* cellParams = cell->get_cell_parameters(); - cellParams[CellParams::EX ] = 0.0; - cellParams[CellParams::EY ] = 0.0; - cellParams[CellParams::EZ ] = 0.0; - cellParams[CellParams::PERBX ] = 0.0; - cellParams[CellParams::PERBY ] = 0.0; - cellParams[CellParams::PERBZ ] = 0.0; typedef Parameters P; - creal dx = P::dx_ini * 3.5; - creal dy = P::dy_ini * 3.5; - creal dz = P::dz_ini * 3.5; - creal x = cellParams[CellParams::XCRD] + 0.5 * cellParams[CellParams::DX]; - creal y = cellParams[CellParams::YCRD] + 0.5 * cellParams[CellParams::DY]; - creal z = cellParams[CellParams::ZCRD] + 0.5 * cellParams[CellParams::DZ]; - - Real areaFactor = 1.0; - - switch (this->CASE) { - case BXCASE: - cellParams[CellParams::PERBX] = 0.1 * this->B0 * areaFactor; - //areaFactor = (CellParams::DY * CellParams::DZ) / (dy * dz); - if (y >= -dy && y <= dy) - if (z >= -dz && z <= dz) - cellParams[CellParams::PERBX] = this->B0 * areaFactor; - break; - case BYCASE: - cellParams[CellParams::PERBY] = 0.1 * this->B0 * areaFactor; - //areaFactor = (CellParams::DX * CellParams::DZ) / (dx * dz); - if (x >= -dx && x <= dx) - if (z >= -dz && z <= dz) - cellParams[CellParams::PERBY] = this->B0 * areaFactor; - break; - case BZCASE: - cellParams[CellParams::PERBZ] = 0.1 * this->B0 * areaFactor; - //areaFactor = (CellParams::DX * CellParams::DY) / (dx * dy); - if (x >= -dx && x <= dx) - if (y >= -dy && y <= dy) - cellParams[CellParams::PERBZ] = this->B0 * areaFactor; - break; - case BALLCASE: - cellParams[CellParams::PERBX] = 0.1 * this->B0 * areaFactor; - cellParams[CellParams::PERBY] = 0.1 * this->B0 * areaFactor; - cellParams[CellParams::PERBZ] = 0.1 * this->B0 * areaFactor; - - //areaFactor = (CellParams::DX * CellParams::DY) / (dx * dy); - - if (y >= -dy && y <= dy) - if (z >= -dz && z <= dz) - cellParams[CellParams::PERBX] = this->B0 * areaFactor; - if (x >= -dx && x <= dx) - if (z >= -dz && z <= dz) - cellParams[CellParams::PERBY] = this->B0 * areaFactor; - if (x >= -dx && x <= dx) - if (y >= -dy && y <= dy) - cellParams[CellParams::PERBZ] = this->B0 * areaFactor; - break; - } + } vector> test_fp::getV0( diff --git a/projects/test_fp/test_fp.h b/projects/test_fp/test_fp.h index 44cfb8be9..fe165e619 100644 --- a/projects/test_fp/test_fp.h +++ b/projects/test_fp/test_fp.h @@ -38,6 +38,7 @@ namespace projects { static void addParameters(void); virtual void getParameters(void); virtual void setProjectBField( + FsGrid< std::array, 2> & perBGrid, FsGrid< std::array, 2>& BgBGrid, FsGrid< fsgrids::technical, 2>& technicalGrid ); From 20cd370d5ddaf6d6ec153f91ec36b7eb0217d863 Mon Sep 17 00:00:00 2001 From: ykempf Date: Mon, 15 Apr 2019 12:04:57 +0300 Subject: [PATCH 319/602] Simplified interface of setRandomCellSeed --- projects/Distributions/Distributions.cpp | 3 +-- projects/MultiPeak/MultiPeak.cpp | 3 +-- projects/project.cpp | 14 +++++++------- projects/project.h | 4 +--- 4 files changed, 10 insertions(+), 14 deletions(-) diff --git a/projects/Distributions/Distributions.cpp b/projects/Distributions/Distributions.cpp index 6516115d2..b4d7d13e4 100644 --- a/projects/Distributions/Distributions.cpp +++ b/projects/Distributions/Distributions.cpp @@ -150,8 +150,7 @@ namespace projects { } void Distributions::calcCellParameters(spatial_cell::SpatialCell* cell,creal& t) { - Real* cellParams = cell->get_cell_parameters(); - setRandomCellSeed(cell,cellParams); + setRandomCellSeed(cell); for (uint i=0; i<2; i++) { this->rhoRnd[i] = this->rho[i] + this->rhoPertAbsAmp[i] * (0.5 - getRandomNumber()); } diff --git a/projects/MultiPeak/MultiPeak.cpp b/projects/MultiPeak/MultiPeak.cpp index bebaaad7c..27c937e82 100644 --- a/projects/MultiPeak/MultiPeak.cpp +++ b/projects/MultiPeak/MultiPeak.cpp @@ -213,8 +213,7 @@ namespace projects { } void MultiPeak::calcCellParameters(spatial_cell::SpatialCell* cell,creal& t) { - Real* cellParams = cell->get_cell_parameters(); - setRandomCellSeed(cell,cellParams); + setRandomCellSeed(cell); rhoRnd = 0.5 - getRandomNumber(); } diff --git a/projects/project.cpp b/projects/project.cpp index 05e11c297..b72e55107 100644 --- a/projects/project.cpp +++ b/projects/project.cpp @@ -499,13 +499,13 @@ namespace projects { \param cellParams The cell parameters list in each spatial cell */ - void Project::setRandomCellSeed(spatial_cell::SpatialCell* cell,const Real* const cellParams) const { - const creal x = cellParams[CellParams::XCRD]; - const creal y = cellParams[CellParams::YCRD]; - const creal z = cellParams[CellParams::ZCRD]; - const creal dx = cellParams[CellParams::DX]; - const creal dy = cellParams[CellParams::DY]; - const creal dz = cellParams[CellParams::DZ]; + void Project::setRandomCellSeed(spatial_cell::SpatialCell* cell) const { + const creal x = cell->parameters[CellParams::XCRD]; + const creal y = cell->parameters[CellParams::YCRD]; + const creal z = cell->parameters[CellParams::ZCRD]; + const creal dx = cell->parameters[CellParams::DX]; + const creal dy = cell->parameters[CellParams::DY]; + const creal dz = cell->parameters[CellParams::DZ]; const CellID cellID = (int) ((x - Parameters::xmin) / dx) + (int) ((y - Parameters::ymin) / dy) * Parameters::xcells_ini + diff --git a/projects/project.h b/projects/project.h index 7bd09a442..2a2e3cd32 100644 --- a/projects/project.h +++ b/projects/project.h @@ -163,10 +163,8 @@ namespace projects { Set random seed (thread-safe) that is always the same for this particular cellID. Can be used to make reproducible simulations that do not depend on number of processes or threads. - * - \param cellParams The cell parameters list in each spatial cell */ - void setRandomCellSeed(spatial_cell::SpatialCell* cell,const Real* const cellParams) const; + void setRandomCellSeed(spatial_cell::SpatialCell* cell) const; private: uint seed; From 4a90d056b0fdb40680fd139784c58a0e118862de Mon Sep 17 00:00:00 2001 From: ykempf Date: Mon, 15 Apr 2019 12:05:20 +0300 Subject: [PATCH 320/602] setProjectBField for testAmr including new setCellRandomSeed interface --- projects/testAmr/testAmr.cpp | 41 ++++++++++++++++++++++++------------ projects/testAmr/testAmr.h | 1 + 2 files changed, 29 insertions(+), 13 deletions(-) diff --git a/projects/testAmr/testAmr.cpp b/projects/testAmr/testAmr.cpp index 45c2783bb..4da00888e 100644 --- a/projects/testAmr/testAmr.cpp +++ b/projects/testAmr/testAmr.cpp @@ -214,23 +214,12 @@ namespace projects { } void testAmr::calcCellParameters(spatial_cell::SpatialCell* cell,creal& t) { - Real* cellParams = cell->get_cell_parameters(); - setRandomCellSeed(cell,cellParams); - - if (this->lambda != 0.0) { - cellParams[CellParams::PERBX] = this->dBx*cos(2.0 * M_PI * cellParams[CellParams::XCRD] / this->lambda); - cellParams[CellParams::PERBY] = this->dBy*sin(2.0 * M_PI * cellParams[CellParams::XCRD] / this->lambda); - cellParams[CellParams::PERBZ] = this->dBz*cos(2.0 * M_PI * cellParams[CellParams::XCRD] / this->lambda); - } - - cellParams[CellParams::PERBX] += this->magXPertAbsAmp * (0.5 - getRandomNumber()); - cellParams[CellParams::PERBY] += this->magYPertAbsAmp * (0.5 - getRandomNumber()); - cellParams[CellParams::PERBZ] += this->magZPertAbsAmp * (0.5 - getRandomNumber()); - + setRandomCellSeed(cell); rhoRnd = 0.5 - getRandomNumber(); } void testAmr::setProjectBField( + FsGrid< std::array, 2> & perBGrid, FsGrid< std::array, 2>& BgBGrid, FsGrid< fsgrids::technical, 2>& technicalGrid ) { @@ -240,6 +229,32 @@ namespace projects { this->Bz); setBackgroundField(bgField, BgBGrid); + + auto localSize = perBGrid.getLocalSize(); + + #pragma omp parallel for collapse(3) + for (int x = 0; x < localSize[0]; ++x) { + for (int y = 0; y < localSize[1]; ++y) { + for (int z = 0; z < localSize[2]; ++z) { + const std::array xyz = perBGrid.getPhysicalCoords(x, y, z); + std::array* cell = perBGrid.get(x, y, z); + + const int64_t cellid = perBGrid.GlobalIDForCoords(x, y, z); + + setRandomSeed(cellid); + + if (this->lambda != 0.0) { + cell->at(fsgrids::bfield::PERBX) = this->dBx*cos(2.0 * M_PI * xyz[0] / this->lambda); + cell->at(fsgrids::bfield::PERBY) = this->dBy*sin(2.0 * M_PI * xyz[0] / this->lambda); + cell->at(fsgrids::bfield::PERBZ) = this->dBz*cos(2.0 * M_PI * xyz[0] / this->lambda); + } + + cell->at(fsgrids::bfield::PERBX) += this->magXPertAbsAmp * (0.5 - getRandomNumber()); + cell->at(fsgrids::bfield::PERBY) += this->magYPertAbsAmp * (0.5 - getRandomNumber()); + cell->at(fsgrids::bfield::PERBZ) += this->magZPertAbsAmp * (0.5 - getRandomNumber()); + } + } + } } std::vector > testAmr::getV0( diff --git a/projects/testAmr/testAmr.h b/projects/testAmr/testAmr.h index 9a7318b05..19e9c6481 100644 --- a/projects/testAmr/testAmr.h +++ b/projects/testAmr/testAmr.h @@ -62,6 +62,7 @@ namespace projects { static void addParameters(void); virtual void getParameters(void); virtual void setProjectBField( + FsGrid< std::array, 2> & perBGrid, FsGrid< std::array, 2>& BgBGrid, FsGrid< fsgrids::technical, 2>& technicalGrid ); From afd0ec9b51a8fa5e960b1050f3a69f9f2a680369 Mon Sep 17 00:00:00 2001 From: ykempf Date: Mon, 15 Apr 2019 13:30:56 +0300 Subject: [PATCH 321/602] setProjectBField in testHall --- projects/testHall/testHall.cpp | 98 ++++++++++------------------------ projects/testHall/testHall.h | 8 ++- 2 files changed, 34 insertions(+), 72 deletions(-) diff --git a/projects/testHall/testHall.cpp b/projects/testHall/testHall.cpp index e703c22a4..24bef05a9 100644 --- a/projects/testHall/testHall.cpp +++ b/projects/testHall/testHall.cpp @@ -92,73 +92,7 @@ namespace projects { exp(- mass * (pow(vx + 0.5 * dvx - this->VX0, 2.0) + pow(vy + 0.5 * dvy - this->VY0, 2.0) + pow(vz + 0.5 * dvz - this->VZ0, 2.0)) / (2.0 * kb * this->TEMPERATURE))); } -// void TestHall::setProjectBField(SpatialCell *cell){ -// Dipole bgField; -// bgField.initialize(8e15 *this->dipoleScalingFactor,this->dipoleTilt); //set dipole moment -// if(cell->sysBoundaryFlag == sysboundarytype::SET_MAXWELLIAN && this->noDipoleInSW) { -// setBackgroundFieldToZero(cell->parameters, cell->derivatives,cell->derivativesBVOL); -// } else { -// setBackgroundField(bgField,cell->parameters, cell->derivatives,cell->derivativesBVOL); -// } -// -// cell->parameters[CellParams::EX ] = 0.0; -// cell->parameters[CellParams::EY ] = 0.0; -// cell->parameters[CellParams::EZ ] = 0.0; -// cell->parameters[CellParams::PERBX ] = cell->parameters[CellParams::BGBX]; -// cell->parameters[CellParams::BGBX ] = 0.0; -// cell->parameters[CellParams::BGBXVOL] = 0.0; -// cell->parameters[CellParams::PERBY ] = cell->parameters[CellParams::BGBY]; -// cell->parameters[CellParams::BGBY ] = 0.0; -// cell->parameters[CellParams::BGBYVOL] = 0.0; -// cell->parameters[CellParams::PERBZ ] = cell->parameters[CellParams::BGBZ]; -// cell->parameters[CellParams::BGBZ ] = 0.0; -// cell->parameters[CellParams::BGBZVOL] = 0.0; -// -// cell->derivatives[fieldsolver::dBGBydx]=0.0; -// cell->derivatives[fieldsolver::dBGBzdx]=0.0; -// cell->derivatives[fieldsolver::dBGBxdy]=0.0; -// cell->derivatives[fieldsolver::dBGBxdz]=0.0; -// cell->derivativesBVOL[bvolderivatives::dBGBYVOLdx]=0.0; -// cell->derivativesBVOL[bvolderivatives::dBGBZVOLdx]=0.0; -// cell->derivativesBVOL[bvolderivatives::dBGBXVOLdy]=0.0; -// cell->derivativesBVOL[bvolderivatives::dBGBXVOLdz]=0.0; -// cell->derivatives[fieldsolver::dBGBxdy]=0.0; -// cell->derivatives[fieldsolver::dBGBzdy]=0.0; -// cell->derivatives[fieldsolver::dBGBydx]=0.0; -// cell->derivatives[fieldsolver::dBGBydz]=0.0; -// cell->derivativesBVOL[bvolderivatives::dBGBXVOLdy]=0.0; -// cell->derivativesBVOL[bvolderivatives::dBGBZVOLdy]=0.0; -// cell->derivativesBVOL[bvolderivatives::dBGBYVOLdx]=0.0; -// cell->derivativesBVOL[bvolderivatives::dBGBYVOLdz]=0.0; -// cell->derivatives[fieldsolver::dBGBxdy]=0.0; -// cell->derivatives[fieldsolver::dBGBxdz]=0.0; -// cell->derivatives[fieldsolver::dBGBydx]=0.0; -// cell->derivatives[fieldsolver::dBGBydz]=0.0; -// cell->derivativesBVOL[bvolderivatives::dBGBXVOLdy]=0.0; -// cell->derivativesBVOL[bvolderivatives::dBGBXVOLdz]=0.0; -// cell->derivativesBVOL[bvolderivatives::dBGBYVOLdx]=0.0; -// cell->derivativesBVOL[bvolderivatives::dBGBYVOLdz]=0.0; -// -// for(uint component=0; component<3; component++) { -// if(this->constBgB[component] != 0.0) { -// cell->parameters[CellParams::BGBX+component] += this->constBgB[component]; -// cell->parameters[CellParams::BGBXVOL+component] += this->constBgB[component]; -// } -// } -// } -// - void TestHall::calcCellParameters(spatial_cell::SpatialCell* cell,creal& t) { - Real* cellParams = cell->get_cell_parameters(); - creal x = cellParams[CellParams::XCRD]; - creal dx = cellParams[CellParams::DX]; - creal y = cellParams[CellParams::YCRD]; - creal dy = cellParams[CellParams::DY]; - creal z = cellParams[CellParams::ZCRD]; - creal dz = cellParams[CellParams::DZ]; - - creal Dx = 0.5*dx; - creal Dy = 0.5*dy; - creal Dz = 0.5*dz; + void TestHall::calcCellParameters(spatial_cell::SpatialCell* cell,creal& t) { } // creal r = sqrt((x+Dx)*(x+Dx) + (y+Dy)*(y+Dy)); // creal theta = atan2(y+Dy, x+Dx); @@ -174,9 +108,9 @@ namespace projects { // cellParams[CellParams::PERBY] = this->BY0 * z; // cellParams[CellParams::PERBZ] = this->BZ0 * x; - cellParams[CellParams::PERBX] = this->BX0 * cos(2.0*M_PI * 1.0 * x / (P::xmax - P::xmin)) * cos(2.0*M_PI * 1.0 * y / (P::ymax - P::ymin)) * cos(2.0*M_PI * 1.0 * z / (P::zmax - P::zmin)); - cellParams[CellParams::PERBY] = this->BY0 * cos(2.0*M_PI * 1.0 * x / (P::xmax - P::xmin)) * cos(2.0*M_PI * 1.0 * y / (P::ymax - P::ymin)) * cos(2.0*M_PI * 1.0 * z / (P::zmax - P::zmin)); - cellParams[CellParams::PERBZ] = this->BZ0 * cos(2.0*M_PI * 1.0 * x / (P::xmax - P::xmin)) * cos(2.0*M_PI * 1.0 * y / (P::ymax - P::ymin)) * cos(2.0*M_PI * 1.0 * z / (P::zmax - P::zmin)); +// cellParams[CellParams::PERBX] = this->BX0 * cos(2.0*M_PI * 1.0 * x / (P::xmax - P::xmin)) * cos(2.0*M_PI * 1.0 * y / (P::ymax - P::ymin)) * cos(2.0*M_PI * 1.0 * z / (P::zmax - P::zmin)); +// cellParams[CellParams::PERBY] = this->BY0 * cos(2.0*M_PI * 1.0 * x / (P::xmax - P::xmin)) * cos(2.0*M_PI * 1.0 * y / (P::ymax - P::ymin)) * cos(2.0*M_PI * 1.0 * z / (P::zmax - P::zmin)); +// cellParams[CellParams::PERBZ] = this->BZ0 * cos(2.0*M_PI * 1.0 * x / (P::xmax - P::xmin)) * cos(2.0*M_PI * 1.0 * y / (P::ymax - P::ymin)) * cos(2.0*M_PI * 1.0 * z / (P::zmax - P::zmin)); // cellParams[CellParams::PERBX] = -1.0*(y+Dy) / ((x+Dx)*(x+Dx) + (y+Dy)*(y+Dy)); // cellParams[CellParams::PERBY] = (x+Dx) / ((x+Dx)*(x+Dx) + (y+Dy)*(y+Dy)); @@ -195,5 +129,29 @@ namespace projects { // cellParams[CellParams::PERBX ] = this->BX0 * (x+0.5*Dx)*(y+0.5*Dy)*(z+0.5*Dz); // cellParams[CellParams::PERBY ] = this->BY0 * (x+0.5*Dx)*(y+0.5*Dy)*(z+0.5*Dz)*(x+0.5*Dx)*(y+0.5*Dy)*(z+0.5*Dz); // cellParams[CellParams::PERBZ ] = this->BZ0 * (x+0.5*Dx)*(y+0.5*Dy)*(z+0.5*Dz)*(x+0.5*Dx)*(y+0.5*Dy)*(z+0.5*Dz)*(x+0.5*Dx)*(y+0.5*Dy)*(z+0.5*Dz); + + void TestHall::setProjectBField( + FsGrid< std::array, 2> & perBGrid, + FsGrid< std::array, 2>& BgBGrid, + FsGrid< fsgrids::technical, 2>& technicalGrid + ) { + setBackgroundFieldToZero(BgBGrid); + + auto localSize = perBGrid.getLocalSize(); + + #pragma omp parallel for collapse(3) + for (int x = 0; x < localSize[0]; ++x) { + for (int y = 0; y < localSize[1]; ++y) { + for (int z = 0; z < localSize[2]; ++z) { + const std::array xyz = perBGrid.getPhysicalCoords(x, y, z); + std::array* cell = perBGrid.get(x, y, z); + + cell->at(fsgrids::bfield::PERBX) = this->BX0 * cos(2.0*M_PI * 1.0 * xyz[0] / (P::xmax - P::xmin)) * cos(2.0*M_PI * 1.0 * xyz[1] / (P::ymax - P::ymin)) * cos(2.0*M_PI * 1.0 * xyz[2] / (P::zmax - P::zmin)); + cell->at(fsgrids::bfield::PERBY) = this->BY0 * cos(2.0*M_PI * 1.0 * xyz[0] / (P::xmax - P::xmin)) * cos(2.0*M_PI * 1.0 * xyz[1] / (P::ymax - P::ymin)) * cos(2.0*M_PI * 1.0 * xyz[2] / (P::zmax - P::zmin)); + cell->at(fsgrids::bfield::PERBZ) = this->BZ0 * cos(2.0*M_PI * 1.0 * xyz[0] / (P::xmax - P::xmin)) * cos(2.0*M_PI * 1.0 * xyz[1] / (P::ymax - P::ymin)) * cos(2.0*M_PI * 1.0 * xyz[2] / (P::zmax - P::zmin)); + } + } + } } + } // namespace projects diff --git a/projects/testHall/testHall.h b/projects/testHall/testHall.h index 6de5eecf8..cf5a1c5b2 100644 --- a/projects/testHall/testHall.h +++ b/projects/testHall/testHall.h @@ -35,8 +35,12 @@ namespace projects { virtual bool initialize(void); static void addParameters(void); virtual void getParameters(void); -// virtual void setProjectBField(SpatialCell* cell); - virtual void calcCellParameters(spatial_cell::SpatialCell* cell,creal& t); + virtual void calcCellParameters(spatial_cell::SpatialCell* cell,creal& t); + virtual void setProjectBField( + FsGrid< std::array, 2> & perBGrid, + FsGrid< std::array, 2>& BgBGrid, + FsGrid< fsgrids::technical, 2>& technicalGrid + ); virtual Real calcPhaseSpaceDensity( creal& x, creal& y, creal& z, creal& dx, creal& dy, creal& dz, From 6d3e3021d0ebf0482e6a12f50a5a9b6fb5a1dd21 Mon Sep 17 00:00:00 2001 From: ykempf Date: Mon, 15 Apr 2019 14:10:40 +0300 Subject: [PATCH 322/602] setProjectBField at top level. Compiles, testing pending. --- grid.cpp | 6 ++---- projects/project.cpp | 1 + projects/project.h | 20 +++++++++----------- 3 files changed, 12 insertions(+), 15 deletions(-) diff --git a/grid.cpp b/grid.cpp index 01aa76af5..3f3e6a27f 100644 --- a/grid.cpp +++ b/grid.cpp @@ -311,13 +311,11 @@ void initializeGrids( phiprof::stop("setupTechnicalFsGrid"); phiprof::start("setProjectBField"); - project.setProjectBField(BgBGrid, technicalGrid); + project.setProjectBField(perBGrid, BgBGrid, technicalGrid); phiprof::stop("setProjectBField"); phiprof::start("Finish fsgrid setup"); - // Transfer initial field configuration into the FsGrids - feedFieldDataIntoFsGrid(mpiGrid,cells,CellParams::PERBX,perBGrid); - + getFieldDataFromFsGrid(perBGrid, mpiGrid, cells, CellParams::PERBX); getBgFieldsAndDerivativesFromFsGrid(BgBGrid, mpiGrid, cells); BgBGrid.updateGhostCells(); diff --git a/projects/project.cpp b/projects/project.cpp index b72e55107..f13da0ecc 100644 --- a/projects/project.cpp +++ b/projects/project.cpp @@ -184,6 +184,7 @@ namespace projects { /*! Print a warning message to stderr and abort, one should not use the base class functions. */ void Project::setProjectBField( + FsGrid< std::array, 2> & perBGrid, FsGrid< std::array, 2>& BgBGrid, FsGrid< fsgrids::technical, 2>& technicalGrid ) { diff --git a/projects/project.h b/projects/project.h index 2a2e3cd32..9765a728a 100644 --- a/projects/project.h +++ b/projects/project.h @@ -53,12 +53,15 @@ namespace projects { bool initialized(); - /*! set background field on the background field fsgrid. - * Currently this function is only called during the initialization. - * @param BgBGrid Background field fsgrid - * @param technicalGrid Technical fsgrid + /** Set the background and perturbed magnetic fields for this project. + * \param perBGrid Grid on which values of the perturbed field can be set if needed. + * \param BgBGrid Grid on which values for the background field can be set if needed, e.g. using the background field functions. + * \param technicalGrid Technical fsgrid, available if some of its data is necessary. + * + * \sa setBackgroundField, setBackgroundFieldToZero */ virtual void setProjectBField( + FsGrid< std::array, 2> & perBGrid, FsGrid< std::array, 2>& BgBGrid, FsGrid< fsgrids::technical, 2>& technicalGrid ); @@ -100,19 +103,14 @@ namespace projects { */ void setVelocitySpace(const uint popID,spatial_cell::SpatialCell* cell) const; - /** Calculate parameters for the given spatial cell at the given time. - * Here you need to set values for the following array indices: - * CellParams::PERBX, CellParams::PERBY, and CellParams::PERBZ - * Set the following only if the field solver is not turned on and not initialising - * (if it is turned off, by default it will still compute the self-consistent values from RHO, RHO_V, B): - * CellParams::EX, CellParams::EY, CellParams::EZ + /** Calculate potentially needed parameters for the given spatial cell at the given time. * * Currently this function is only called during initialization. * * The following array indices contain the coordinates of the "lower left corner" of the cell: * CellParams::XCRD, CellParams::YCRD, and CellParams::ZCRD. * The cell size is given in the following array indices: CellParams::DX, CellParams::DY, and CellParams::DZ. - * @param cellParams Array containing cell parameters. + * @param cell Pointer to the spatial cell to be handled. * @param t The current value of time. This is passed as a convenience. If you need more detailed information * of the state of the simulation, you can read it from Parameters. */ From 77557bf3ea00307f00c4613a5b7ade4b252f90fb Mon Sep 17 00:00:00 2001 From: ykempf Date: Mon, 15 Apr 2019 16:15:52 +0300 Subject: [PATCH 323/602] Fix initialisation of periodicity. --- vlasiator.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/vlasiator.cpp b/vlasiator.cpp index 0836ab101..a116073ae 100644 --- a/vlasiator.cpp +++ b/vlasiator.cpp @@ -379,9 +379,9 @@ int main(int argn,char* args[]) { convert(P::ycells_ini) * pow(2,P::amrMaxSpatialRefLevel), convert(P::zcells_ini) * pow(2,P::amrMaxSpatialRefLevel)}; - std::array periodicity{mpiGrid.topology.is_periodic(0), - mpiGrid.topology.is_periodic(1), - mpiGrid.topology.is_periodic(2)}; + std::array periodicity{sysBoundaries.isBoundaryPeriodic(0), + sysBoundaries.isBoundaryPeriodic(1), + sysBoundaries.isBoundaryPeriodic(2)}; FsGridCouplingInformation gridCoupling; FsGrid< std::array, 2> perBGrid(fsGridDimensions, comm, periodicity,gridCoupling); From c4b5dd4d7abac93e3f842921c40388534e383839 Mon Sep 17 00:00:00 2001 From: ykempf Date: Tue, 16 Apr 2019 09:08:39 +0300 Subject: [PATCH 324/602] Fix assignation of DO_NOT_COMPUTE in gridGlue --- fieldsolver/gridGlue.cpp | 25 +++++++++++++------------ 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/fieldsolver/gridGlue.cpp b/fieldsolver/gridGlue.cpp index a6488527f..1eb6f03ea 100644 --- a/fieldsolver/gridGlue.cpp +++ b/fieldsolver/gridGlue.cpp @@ -465,7 +465,7 @@ void setupTechnicalFsGrid(dccrg::Dccrg& m technicalGrid.get(x,y,z)->sysBoundaryLayer = layer; if (layer > 1) { - technicalGrid.get(x,y,z)->sysBoundaryFlag == sysboundarytype::DO_NOT_COMPUTE; + technicalGrid.get(x,y,z)->sysBoundaryFlag = sysboundarytype::DO_NOT_COMPUTE; } } } @@ -474,17 +474,18 @@ void setupTechnicalFsGrid(dccrg::Dccrg& m } } - // for (int x = 0; x < localSize[0]; ++x) { - // for (int y = 0; y < localSize[1]; ++y) { - // for (int z = 0; z < localSize[2]; ++z) { - // std::cout << "boundary layer+flag at " << x << ", " << y << ", " << z << " = "; - // std::cout << technicalGrid.get(x,y,z)->sysBoundaryLayer; - // std::cout << " "; - // std::cout << technicalGrid.get(x,y,z)->sysBoundaryFlag; - // } - // } - // } - //abort(); +// for (int x = 0; x < localSize[0]; ++x) { +// for (int y = 0; y < localSize[1]; ++y) { +// for (int z = 0; z < localSize[2]; ++z) { +// std::cout << "boundary layer+flag at " << x << ", " << y << ", " << z << " = "; +// std::cout << technicalGrid.get(x,y,z)->sysBoundaryLayer; +// std::cout << " "; +// std::cout << technicalGrid.get(x,y,z)->sysBoundaryFlag; +// std::cout << std::endl; +// } +// } +// } +// abort(); } void getFsGridMaxDt(FsGrid< fsgrids::technical, 2>& technicalGrid, From e8a6c24d41965794087836b740174c8d39650d76 Mon Sep 17 00:00:00 2001 From: ykempf Date: Tue, 16 Apr 2019 15:25:18 +0300 Subject: [PATCH 325/602] Removed misleading indentation to remove warnings, likely introduced by me a few commits ago anyway. --- projects/test_fp/test_fp.cpp | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/projects/test_fp/test_fp.cpp b/projects/test_fp/test_fp.cpp index ab5dab88e..80030e5a1 100644 --- a/projects/test_fp/test_fp.cpp +++ b/projects/test_fp/test_fp.cpp @@ -138,21 +138,21 @@ namespace projects { if (y >= -dy && y <= dy) if (z >= -dz && z <= dz) cell->at(fsgrids::bfield::PERBX) = this->B0 * areaFactor; - break; + break; case BYCASE: cell->at(fsgrids::bfield::PERBY) = 0.1 * this->B0 * areaFactor; //areaFactor = (CellParams::DX * CellParams::DZ) / (dx * dz); if (x >= -dx && x <= dx) if (z >= -dz && z <= dz) cell->at(fsgrids::bfield::PERBY) = this->B0 * areaFactor; - break; + break; case BZCASE: cell->at(fsgrids::bfield::PERBZ) = 0.1 * this->B0 * areaFactor; //areaFactor = (CellParams::DX * CellParams::DY) / (dx * dy); if (x >= -dx && x <= dx) if (y >= -dy && y <= dy) cell->at(fsgrids::bfield::PERBZ) = this->B0 * areaFactor; - break; + break; case BALLCASE: cell->at(fsgrids::bfield::PERBX) = 0.1 * this->B0 * areaFactor; cell->at(fsgrids::bfield::PERBY) = 0.1 * this->B0 * areaFactor; @@ -163,13 +163,13 @@ namespace projects { if (y >= -dy && y <= dy) if (z >= -dz && z <= dz) cell->at(fsgrids::bfield::PERBX) = this->B0 * areaFactor; - if (x >= -dx && x <= dx) - if (z >= -dz && z <= dz) - cell->at(fsgrids::bfield::PERBY) = this->B0 * areaFactor; - if (x >= -dx && x <= dx) - if (y >= -dy && y <= dy) - cell->at(fsgrids::bfield::PERBZ) = this->B0 * areaFactor; - break; + if (x >= -dx && x <= dx) + if (z >= -dz && z <= dz) + cell->at(fsgrids::bfield::PERBY) = this->B0 * areaFactor; + if (x >= -dx && x <= dx) + if (y >= -dy && y <= dy) + cell->at(fsgrids::bfield::PERBZ) = this->B0 * areaFactor; + break; } } } From 62318c0c611018910bf1aa70bbcc952f586e1b06 Mon Sep 17 00:00:00 2001 From: ykempf Date: Tue, 16 Apr 2019 15:26:20 +0300 Subject: [PATCH 326/602] Fixed boundary face identification for outflow, setbyuser and project_boundary. --- sysboundary/outflow.cpp | 12 ++++++------ sysboundary/project_boundary.cpp | 12 ++++++------ sysboundary/setbyuser.cpp | 12 ++++++------ 3 files changed, 18 insertions(+), 18 deletions(-) diff --git a/sysboundary/outflow.cpp b/sysboundary/outflow.cpp index f42dacfd4..0feb76ecb 100644 --- a/sysboundary/outflow.cpp +++ b/sysboundary/outflow.cpp @@ -302,13 +302,13 @@ namespace SBC { ) { Real fieldValue = -1.0; - creal dx =technicalGrid.DX; - creal dy =technicalGrid.DY; - creal dz =technicalGrid.DZ; + creal dx =Parameters::dx_ini; + creal dy =Parameters::dy_ini; + creal dz =Parameters::dz_ini; const std::array globalIndices = technicalGrid.getGlobalIndices(i,j,k); - creal x = (convert(globalIndices[0])+0.5)*dx + Parameters::xmin; - creal y = (convert(globalIndices[1])+0.5)*dy + Parameters::ymin; - creal z = (convert(globalIndices[2])+0.5)*dz + Parameters::zmin; + creal x = (convert(globalIndices[0])+0.5)*technicalGrid.DX + Parameters::xmin; + creal y = (convert(globalIndices[1])+0.5)*technicalGrid.DY + Parameters::ymin; + creal z = (convert(globalIndices[2])+0.5)*technicalGrid.DZ + Parameters::zmin; bool isThisCellOnAFace[6]; determineFace(&isThisCellOnAFace[0], x, y, z, dx, dy, dz, true); diff --git a/sysboundary/project_boundary.cpp b/sysboundary/project_boundary.cpp index 95bcfc374..bb9f9432d 100644 --- a/sysboundary/project_boundary.cpp +++ b/sysboundary/project_boundary.cpp @@ -155,13 +155,13 @@ namespace SBC { cint k, cuint component ) { - creal dx = EGrid.DX; - creal dy = EGrid.DY; - creal dz = EGrid.DZ; + creal dx = Parameters::dx_ini; + creal dy = Parameters::dy_ini; + creal dz = Parameters::dz_ini; const std::array globalIndices = EGrid.getGlobalIndices(i,j,k); - creal x = (convert(globalIndices[0])+0.5)*dx + Parameters::xmin; - creal y = (convert(globalIndices[1])+0.5)*dy + Parameters::ymin; - creal z = (convert(globalIndices[2])+0.5)*dz + Parameters::zmin; + creal x = (convert(globalIndices[0])+0.5)*EGrid.DX + Parameters::xmin; + creal y = (convert(globalIndices[1])+0.5)*EGrid.DY + Parameters::ymin; + creal z = (convert(globalIndices[2])+0.5)*EGrid.DZ + Parameters::zmin; bool isThisCellOnAFace[6]; determineFace(&isThisCellOnAFace[0], x, y, z, dx, dy, dz); diff --git a/sysboundary/setbyuser.cpp b/sysboundary/setbyuser.cpp index 32c1a805d..d1fc44613 100644 --- a/sysboundary/setbyuser.cpp +++ b/sysboundary/setbyuser.cpp @@ -128,13 +128,13 @@ namespace SBC { cuint& component ) { Real result = 0.0; - creal dx = perBGrid.DX; - creal dy = perBGrid.DY; - creal dz = perBGrid.DZ; + creal dx = Parameters::dx_ini; + creal dy = Parameters::dy_ini; + creal dz = Parameters::dz_ini; const std::array globalIndices = technicalGrid.getGlobalIndices(i,j,k); - creal x = (convert(globalIndices[0])+0.5)*dx + Parameters::xmin; - creal y = (convert(globalIndices[1])+0.5)*dy + Parameters::ymin; - creal z = (convert(globalIndices[2])+0.5)*dz + Parameters::zmin; + creal x = (convert(globalIndices[0])+0.5)*technicalGrid.DX + Parameters::xmin; + creal y = (convert(globalIndices[1])+0.5)*technicalGrid.DY + Parameters::ymin; + creal z = (convert(globalIndices[2])+0.5)*technicalGrid.DZ + Parameters::zmin; bool isThisCellOnAFace[6]; determineFace(&isThisCellOnAFace[0], x, y, z, dx, dy, dz, true); From 29c46cc62da29bf882f6a71bf59ff28ad622e2bf Mon Sep 17 00:00:00 2001 From: ykempf Date: Tue, 16 Apr 2019 15:27:16 +0300 Subject: [PATCH 327/602] Corrected getVolumeFieldsFromFsGrid and DO_NOT_COMPUTE cell tagging in fsgrid. --- fieldsolver/gridGlue.cpp | 70 ++++++++++++++++++++++++++-------------- fieldsolver/gridGlue.hpp | 9 ++++-- vlasiator.cpp | 8 ++--- 3 files changed, 55 insertions(+), 32 deletions(-) diff --git a/fieldsolver/gridGlue.cpp b/fieldsolver/gridGlue.cpp index 1eb6f03ea..0b7283141 100644 --- a/fieldsolver/gridGlue.cpp +++ b/fieldsolver/gridGlue.cpp @@ -78,30 +78,38 @@ void feedMomentsIntoFsGrid(dccrg::Dccrg& } -void getVolumeFieldsFromFsGrid(FsGrid< std::array, 2>& volumeFieldsGrid, - dccrg::Dccrg& mpiGrid, - const std::vector& cells) { - - +void getVolumeFieldsFromFsGrid( + FsGrid< std::array, 2>& volumeFieldsGrid, + FsGrid< fsgrids::technical, 2>& technicalGrid, + dccrg::Dccrg& mpiGrid, + const std::vector& cells +) { // Setup transfer buffers int nCells = getNumberOfCellsOnMaxRefLvl(mpiGrid, cells); - std::vector< std::array > transferBuffer(nCells); - std::vector< std::array*> transferBufferPointer; - + std::vector< std::array > transferBufferVolFields(nCells); + std::vector< std::array*> transferBufferPointerVolFields; + std::vector< fsgrids::technical > transferBufferTechnical(nCells); + std::vector< fsgrids::technical*> transferBufferPointerTechnical; + // Setup transfer pointers volumeFieldsGrid.setupForTransferOut(nCells); int k = 0; for(auto dccrgId : cells) { const auto fsgridIds = mapDccrgIdToFsGridGlobalID(mpiGrid, dccrgId); // Store a pointer to the first fsgrid cell that maps to each dccrg Id - transferBufferPointer.push_back(&transferBuffer[k]); + transferBufferPointerVolFields.push_back(&transferBufferVolFields[k]); + transferBufferPointerTechnical.push_back(&transferBufferTechnical[k]); for (auto fsgridId : fsgridIds) { - std::array* thisCellData = &transferBuffer[k++]; - volumeFieldsGrid.transferDataOut(fsgridId, thisCellData); + std::array* thisCellDataVolFields = &transferBufferVolFields[k]; + volumeFieldsGrid.transferDataOut(fsgridId, thisCellDataVolFields); + fsgrids::technical* thisCellDataTechnical = &transferBufferTechnical[k]; + technicalGrid.transferDataOut(fsgridId, thisCellDataTechnical); + k++; } } // Do the transfer volumeFieldsGrid.finishTransfersOut(); + technicalGrid.finishTransfersOut(); // Build a list of index pairs to cellparams and fsgrid std::vector> iCellParams; @@ -124,14 +132,17 @@ void getVolumeFieldsFromFsGrid(FsGrid< std::arrayget_cell_parameters(); - // Calculate the number of fsgrid cells we need to average into the current dccrg cell + // Calculate the number of fsgrid cells we loop through int nCells = pow(pow(2,mpiGrid.mapping.get_maximum_refinement_level() - mpiGrid.mapping.get_refinement_level(dccrgId)),3); + // Count the number of fsgrid cells we need to average into the current dccrg cell + int nCellsToSum = 0; // TODO: Could optimize here by adding a separate branch for nCells == 1 with direct assignment of the value // Could also do the average in a temporary value and only access grid structure once. @@ -139,21 +150,30 @@ void getVolumeFieldsFromFsGrid(FsGrid< std::arrayderivativesBVOL[j.first] = 0.0; - + for(int iCell = 0; iCell < nCells; ++iCell) { // The fsgrid cells that cover the i'th dccrg cell are pointed at by - // transferBufferPointer[i] ... transferBufferPointer[i] + nCell. We want to average - // over all of them to get the value for the dccrg cell - std::array* thisCellData = transferBufferPointer[i] + iCell; - - for (auto j : iCellParams) cellParams[j.first] += thisCellData->at(j.second); - for (auto j : iDerivativesBVOL) mpiGrid[dccrgId]->derivativesBVOL[j.first] += thisCellData->at(j.second); + // transferBufferPointer[i] ... transferBufferPointer[i] + nCell. + // We want to average over those who are not DO_NOT_COMPUTE to get the value for the dccrg cell + if ((transferBufferPointerTechnical[i] + iCell)->sysBoundaryFlag == sysboundarytype::DO_NOT_COMPUTE) { + continue; + } else { + nCellsToSum++; + + std::array* thisCellData = transferBufferPointerVolFields[i] + iCell; + + for (auto j : iCellParams) cellParams[j.first] += thisCellData->at(j.second); + for (auto j : iDerivativesBVOL) mpiGrid[dccrgId]->derivativesBVOL[j.first] += thisCellData->at(j.second); + } + } + + std::cout << dccrgId << " " << nCellsToSum << std::endl; + + if (nCellsToSum > 0) { + // Divide by the number of cells to get the average + for (auto j : iCellParams) cellParams[j.first] /= nCellsToSum; + for (auto j : iDerivativesBVOL) mpiGrid[dccrgId]->derivativesBVOL[j.first] /= nCellsToSum; } - - // Divide by the number of cells to get the average - for (auto j : iCellParams) cellParams[j.first] /= nCells; - for (auto j : iDerivativesBVOL) mpiGrid[dccrgId]->derivativesBVOL[j.first] /= nCells; - } } @@ -464,7 +484,7 @@ void setupTechnicalFsGrid(dccrg::Dccrg& m technicalGrid.get(x,y,z)->sysBoundaryLayer = layer; - if (layer > 1) { + if (layer > 2 && technicalGrid.get(x,y,z)->sysBoundaryFlag != sysboundarytype::NOT_SYSBOUNDARY) { technicalGrid.get(x,y,z)->sysBoundaryFlag = sysboundarytype::DO_NOT_COMPUTE; } } diff --git a/fieldsolver/gridGlue.hpp b/fieldsolver/gridGlue.hpp index 34fd31c3d..1a0e4b8ac 100644 --- a/fieldsolver/gridGlue.hpp +++ b/fieldsolver/gridGlue.hpp @@ -26,9 +26,12 @@ void feedMomentsIntoFsGrid(dccrg::Dccrg& * * This function assumes that proper grid coupling has been set up. */ -void getVolumeFieldsFromFsGrid(FsGrid< std::array, 2>& volumeFieldsGrid, - dccrg::Dccrg& mpiGrid, - const std::vector& cells); +void getVolumeFieldsFromFsGrid( + FsGrid< std::array, 2>& volumeFieldsGrid, + FsGrid< fsgrids::technical, 2>& technicalGrid, + dccrg::Dccrg& mpiGrid, + const std::vector& cells +); /*! Copy background B fields and store them into DCCRG * \param mpiGrid The DCCRG grid carrying fields. diff --git a/vlasiator.cpp b/vlasiator.cpp index a116073ae..3a1322074 100644 --- a/vlasiator.cpp +++ b/vlasiator.cpp @@ -520,7 +520,7 @@ int main(int argn,char* args[]) { phiprof::start("getVolumeFieldsFromFsGrid"); // These should be done by initializeFieldPropagator() if the propagation is turned off. - getVolumeFieldsFromFsGrid(volGrid, mpiGrid, cells); + getVolumeFieldsFromFsGrid(volGrid, technicalGrid, mpiGrid, cells); phiprof::stop("getVolumeFieldsFromFsGrid"); // Save restart data @@ -1034,10 +1034,10 @@ int main(int argn,char* args[]) { P::fieldSolverSubcycles ); - phiprof::start("fsgrid-coupling-out"); + phiprof::start("getVolumeFieldsFromFsGrid"); // Copy results back from fsgrid. - getVolumeFieldsFromFsGrid(volGrid, mpiGrid, cells); - phiprof::stop("fsgrid-coupling-out"); + getVolumeFieldsFromFsGrid(volGrid, technicalGrid, mpiGrid, cells); + phiprof::stop("getVolumeFieldsFromFsGrid"); phiprof::stop("Propagate Fields",cells.size(),"SpatialCells"); addTimedBarrier("barrier-after-field-solver"); } From 796eeb6c1db76ff7ff6253a867e72690a8518800 Mon Sep 17 00:00:00 2001 From: ykempf Date: Tue, 16 Apr 2019 15:35:36 +0300 Subject: [PATCH 328/602] Removed obsolete feedFieldDataIntoFsGrid --- fieldsolver/gridGlue.hpp | 36 ------------------------------------ 1 file changed, 36 deletions(-) diff --git a/fieldsolver/gridGlue.hpp b/fieldsolver/gridGlue.hpp index 1a0e4b8ac..210b2f84a 100644 --- a/fieldsolver/gridGlue.hpp +++ b/fieldsolver/gridGlue.hpp @@ -79,42 +79,6 @@ void getFsGridMaxDt(FsGrid< fsgrids::technical, 2>& technicalGrid, int getNumberOfCellsOnMaxRefLvl(dccrg::Dccrg& mpiGrid, const std::vector& cells); -/*! Transfer field data from DCCRG cellparams into the appropriate FsGrid structure - * \param mpiGrid The DCCRG grid carrying fieldparam data - * \param cells List of local cells - * \param index Index into the cellparams array from which to copy - * \param targetGrid Fieldsolver grid for these quantities - * - * The cellparams with indices from index to index+numFields are copied over, and - * have to be continuous in memory. - * - * This function assumes that proper grid coupling has been set up. - */ -template< unsigned int numFields > void feedFieldDataIntoFsGrid( - dccrg::Dccrg& mpiGrid, - const std::vector& cells, int cellParamsIndex, - FsGrid< std::array, 2>& targetGrid) { - - int nCells = getNumberOfCellsOnMaxRefLvl(mpiGrid, cells); - targetGrid.setupForTransferIn(nCells); - - int myRank; - MPI_Comm_rank(MPI_COMM_WORLD,&myRank); - - for(CellID dccrgId : cells) { - const auto fsgridIds = mapDccrgIdToFsGridGlobalID(mpiGrid, dccrgId); - // TODO: This assumes that the field data are lying continuous in memory. - // Check definition of CellParams in common.h if unsure. - std::array* cellDataPointer = reinterpret_cast*>( - &(mpiGrid[dccrgId]->get_cell_parameters()[cellParamsIndex])); - for (auto fsgridId : fsgridIds) { - targetGrid.transferDataIn(fsgridId, cellDataPointer); - } - } - - targetGrid.finishTransfersIn(); -} - /*! Transfer field data from an FsGrid back into the appropriate CellParams slot in DCCRG * \param sourceGrid Fieldsolver grid for these quantities From 136b28eab26c4c20d45147d5731177191c78e7d9 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Tue, 16 Apr 2019 15:50:34 +0300 Subject: [PATCH 329/602] Fixed max number of boundary layers with refinement in fsgrid --- fieldsolver/gridGlue.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fieldsolver/gridGlue.cpp b/fieldsolver/gridGlue.cpp index 2ff2ba607..6b63d9357 100644 --- a/fieldsolver/gridGlue.cpp +++ b/fieldsolver/gridGlue.cpp @@ -433,7 +433,7 @@ void setupTechnicalFsGrid(dccrg::Dccrg& m } // In dccrg initialization the max number of boundary layers is set to 3. - const int MAX_NUMBER_OF_BOUNDARY_LAYERS = 3 * (mpiGrid.get_maximum_refinement_level() + 1); + const int MAX_NUMBER_OF_BOUNDARY_LAYERS = 3 * pow(2,mpiGrid.get_maximum_refinement_level()); // loop through max number of layers for(uint layer = 1; layer <= MAX_NUMBER_OF_BOUNDARY_LAYERS; ++layer) { From 9e099dd80debbc900f34d871052aec81cfd73208 Mon Sep 17 00:00:00 2001 From: ykempf Date: Tue, 16 Apr 2019 16:34:02 +0300 Subject: [PATCH 330/602] uint64_t -> CellID --- sysboundary/antisymmetric.cpp | 2 +- sysboundary/ionosphere.cpp | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/sysboundary/antisymmetric.cpp b/sysboundary/antisymmetric.cpp index a21f48d6f..6b72e94da 100644 --- a/sysboundary/antisymmetric.cpp +++ b/sysboundary/antisymmetric.cpp @@ -128,7 +128,7 @@ namespace SBC { const dccrg::Dccrg& mpiGrid, Project &project ) { - vector cells = mpiGrid.get_cells(); + vector cells = mpiGrid.get_cells(); #pragma omp parallel for for (uint i=0; i& mpiGrid, Project &project ) { - vector cells = mpiGrid.get_cells(); + vector cells = mpiGrid.get_cells(); #pragma omp parallel for for (uint i=0; i Date: Tue, 16 Apr 2019 16:34:44 +0300 Subject: [PATCH 331/602] Proper averaging for fsgrid->DCCRG functions and bugfixes. --- fieldsolver/gridGlue.cpp | 160 ++++++++++++++++++++++++--------------- fieldsolver/gridGlue.hpp | 77 ++++++++++++------- grid.cpp | 22 +++--- vlasiator.cpp | 24 +++--- 4 files changed, 172 insertions(+), 111 deletions(-) diff --git a/fieldsolver/gridGlue.cpp b/fieldsolver/gridGlue.cpp index 0b7283141..9943cf83d 100644 --- a/fieldsolver/gridGlue.cpp +++ b/fieldsolver/gridGlue.cpp @@ -29,15 +29,15 @@ void feedMomentsIntoFsGrid(dccrg::Dccrg& const std::vector& cells, FsGrid< std::array, 2>& momentsGrid, bool dt2 /*=false*/) { - int nCells = getNumberOfCellsOnMaxRefLvl(mpiGrid, cells); - momentsGrid.setupForTransferIn(nCells); + cint nCellsOnMaxRefLvl = getNumberOfCellsOnMaxRefLvl(mpiGrid, cells); + momentsGrid.setupForTransferIn(nCellsOnMaxRefLvl); std::vector< std::array > transferBuffer(cells.size()); // Fill from cellParams #pragma omp parallel for for(uint i = 0; i < cells.size(); ++i) { - CellID dccrgId = cells[i]; + const CellID dccrgId = cells[i]; auto cellParams = mpiGrid[dccrgId]->get_cell_parameters(); std::array* thisCellData = &transferBuffer[i]; @@ -66,7 +66,7 @@ void feedMomentsIntoFsGrid(dccrg::Dccrg& for (uint i = 0;i < cells.size(); ++i) { - CellID dccrgId = cells[i]; + const CellID dccrgId = cells[i]; const auto fsgridIds = mapDccrgIdToFsGridGlobalID(mpiGrid, dccrgId); for (auto fsgridId : fsgridIds) { momentsGrid.transferDataIn(fsgridId, &transferBuffer[i]); @@ -85,14 +85,15 @@ void getVolumeFieldsFromFsGrid( const std::vector& cells ) { // Setup transfer buffers - int nCells = getNumberOfCellsOnMaxRefLvl(mpiGrid, cells); - std::vector< std::array > transferBufferVolFields(nCells); + cint nCellsOnMaxRefLvl = getNumberOfCellsOnMaxRefLvl(mpiGrid, cells); + std::vector< std::array > transferBufferVolFields(nCellsOnMaxRefLvl); std::vector< std::array*> transferBufferPointerVolFields; - std::vector< fsgrids::technical > transferBufferTechnical(nCells); + std::vector< fsgrids::technical > transferBufferTechnical(nCellsOnMaxRefLvl); std::vector< fsgrids::technical*> transferBufferPointerTechnical; // Setup transfer pointers - volumeFieldsGrid.setupForTransferOut(nCells); + volumeFieldsGrid.setupForTransferOut(nCellsOnMaxRefLvl); + technicalGrid.setupForTransferOut(nCellsOnMaxRefLvl); int k = 0; for(auto dccrgId : cells) { const auto fsgridIds = mapDccrgIdToFsGridGlobalID(mpiGrid, dccrgId); @@ -136,11 +137,11 @@ void getVolumeFieldsFromFsGrid( #pragma omp parallel for for(uint i = 0; i < cells.size(); ++i) { - int dccrgId = cells[i]; + const CellID dccrgId = cells[i]; auto cellParams = mpiGrid[dccrgId]->get_cell_parameters(); // Calculate the number of fsgrid cells we loop through - int nCells = pow(pow(2,mpiGrid.mapping.get_maximum_refinement_level() - mpiGrid.mapping.get_refinement_level(dccrgId)),3); + cint nCells = pow(pow(2,mpiGrid.mapping.get_maximum_refinement_level() - mpiGrid.mapping.get_refinement_level(dccrgId)),3); // Count the number of fsgrid cells we need to average into the current dccrg cell int nCellsToSum = 0; @@ -167,8 +168,6 @@ void getVolumeFieldsFromFsGrid( } } - std::cout << dccrgId << " " << nCellsToSum << std::endl; - if (nCellsToSum > 0) { // Divide by the number of cells to get the average for (auto j : iCellParams) cellParams[j.first] /= nCellsToSum; @@ -180,28 +179,37 @@ void getVolumeFieldsFromFsGrid( void getBgFieldsAndDerivativesFromFsGrid( FsGrid< std::array, 2>& BgBGrid, + FsGrid< fsgrids::technical, 2>& technicalGrid, dccrg::Dccrg& mpiGrid, const std::vector& cells ) { // Setup transfer buffers - int nCells = getNumberOfCellsOnMaxRefLvl(mpiGrid, cells); - std::vector< std::array > transferBuffer(nCells); - std::vector< std::array*> transferBufferPointer; + cint nCellsOnMaxRefLvl = getNumberOfCellsOnMaxRefLvl(mpiGrid, cells); + std::vector< std::array > transferBufferBGB(nCellsOnMaxRefLvl); + std::vector< std::array*> transferBufferPointerBGB; + std::vector< fsgrids::technical > transferBufferTechnical(nCellsOnMaxRefLvl); + std::vector< fsgrids::technical*> transferBufferPointerTechnical; // Setup transfer pointers - BgBGrid.setupForTransferOut(nCells); + BgBGrid.setupForTransferOut(nCellsOnMaxRefLvl); + technicalGrid.setupForTransferOut(nCellsOnMaxRefLvl); int k = 0; for(auto dccrgId : cells) { const auto fsgridIds = mapDccrgIdToFsGridGlobalID(mpiGrid, dccrgId); // Store a pointer to the first fsgrid cell that maps to each dccrg Id - transferBufferPointer.push_back(&transferBuffer[k]); + transferBufferPointerBGB.push_back(&transferBufferBGB[k]); + transferBufferPointerTechnical.push_back(&transferBufferTechnical[k]); for (auto fsgridId : fsgridIds) { - std::array* thisCellData = &transferBuffer[k++]; + std::array* thisCellData = &transferBufferBGB[k]; BgBGrid.transferDataOut(fsgridId, thisCellData); + fsgrids::technical* thisCellDataTechnical = &transferBufferTechnical[k]; + technicalGrid.transferDataOut(fsgridId, thisCellDataTechnical); + k++; } } // Do the transfer BgBGrid.finishTransfersOut(); + technicalGrid.finishTransfersOut(); // Build lists of index pairs to dccrg and fsgrid std::vector> iCellParams; @@ -230,14 +238,17 @@ void getBgFieldsAndDerivativesFromFsGrid( iDerivativesBVOL.push_back(std::make_pair(bvolderivatives::dBGBZVOLdy, fsgrids::bgbfield::dBGBZVOLdy)); // Distribute data from the transfer buffer back into the appropriate mpiGrid places + // Disregard DO_NOT_COMPUTE cells #pragma omp parallel for for(uint i = 0; i < cells.size(); ++i) { - int dccrgId = cells[i]; + const CellID dccrgId = cells[i]; auto cellParams = mpiGrid[dccrgId]->get_cell_parameters(); - // Calculate the number of fsgrid cells we need to average into the current dccrg cell - int nCells = pow(pow(2,mpiGrid.mapping.get_maximum_refinement_level() - mpiGrid.mapping.get_refinement_level(dccrgId)),3); + // Calculate the number of fsgrid cells we loop through + cint nCells = pow(pow(2,mpiGrid.mapping.get_maximum_refinement_level() - mpiGrid.mapping.get_refinement_level(dccrgId)),3); + // Count the number of fsgrid cells we need to average into the current dccrg cell + int nCellsToSum = 0; // TODO: Could optimize here by adding a separate branch for nCells == 1 with direct assignment of the value // Could also do the average in a temporary value and only access grid structure once. @@ -249,39 +260,53 @@ void getBgFieldsAndDerivativesFromFsGrid( for(int iCell = 0; iCell < nCells; ++iCell) { // The fsgrid cells that cover the i'th dccrg cell are pointed at by - // transferBufferPointer[i] ... transferBufferPointer[i] + nCell. We want to average - // over all of them to get the value for the dccrg cell - std::array* thisCellData = transferBufferPointer[i] + iCell; - - for (auto j : iCellParams) cellParams[j.first] += thisCellData->at(j.second); - for (auto j : iDerivatives) mpiGrid[dccrgId]->derivatives[j.first] += thisCellData->at(j.second); - for (auto j : iDerivativesBVOL) mpiGrid[dccrgId]->derivativesBVOL[j.first] += thisCellData->at(j.second); + // transferBufferPointer[i] ... transferBufferPointer[i] + nCell. + // We want to average over those who are not DO_NOT_COMPUTE to get the value for the dccrg cell + if ((transferBufferPointerTechnical[i] + iCell)->sysBoundaryFlag == sysboundarytype::DO_NOT_COMPUTE) { + continue; + } else { + nCellsToSum++; + + std::array* thisCellData = transferBufferPointerBGB[i] + iCell; + + for (auto j : iCellParams) cellParams[j.first] += thisCellData->at(j.second); + for (auto j : iDerivatives) mpiGrid[dccrgId]->derivatives[j.first] += thisCellData->at(j.second); + for (auto j : iDerivativesBVOL) mpiGrid[dccrgId]->derivativesBVOL[j.first] += thisCellData->at(j.second); + } } - // Divide by the number of cells to get the average - for (auto j : iCellParams) cellParams[j.first] /= nCells; - for (auto j : iDerivatives) mpiGrid[dccrgId]->derivatives[j.first] /= nCells; - for (auto j : iDerivativesBVOL) mpiGrid[dccrgId]->derivativesBVOL[j.first] /= nCells; - + if (nCellsToSum > 0) { + // Divide by the number of cells to get the average + for (auto j : iCellParams) cellParams[j.first] /= nCellsToSum; + for (auto j : iDerivatives) mpiGrid[dccrgId]->derivatives[j.first] /= nCellsToSum; + for (auto j : iDerivativesBVOL) mpiGrid[dccrgId]->derivativesBVOL[j.first] /= nCellsToSum; + } } } -void getDerivativesFromFsGrid(FsGrid< std::array, 2>& dperbGrid, - FsGrid< std::array, 2>& dmomentsGrid, - dccrg::Dccrg& mpiGrid, - const std::vector& cells) { +void getDerivativesFromFsGrid( + FsGrid< std::array, 2>& dperbGrid, + FsGrid< std::array, 2>& dmomentsGrid, + FsGrid< fsgrids::technical, 2>& technicalGrid, + dccrg::Dccrg& mpiGrid, + const std::vector& cells +) { // Setup transfer buffers - int nCellsOnMaxRefLvl = getNumberOfCellsOnMaxRefLvl(mpiGrid, cells); + cint nCellsOnMaxRefLvl = getNumberOfCellsOnMaxRefLvl(mpiGrid, cells); std::vector< std::array > dperbTransferBuffer(nCellsOnMaxRefLvl); std::vector< std::array > dmomentsTransferBuffer(nCellsOnMaxRefLvl); std::vector< std::array*> dperbTransferBufferPointer; std::vector< std::array*> dmomentsTransferBufferPointer; + std::vector< fsgrids::technical > transferBufferTechnical(nCellsOnMaxRefLvl); + std::vector< fsgrids::technical*> transferBufferPointerTechnical; + dperbGrid.setupForTransferOut(nCellsOnMaxRefLvl); dmomentsGrid.setupForTransferOut(nCellsOnMaxRefLvl); + technicalGrid.setupForTransferOut(nCellsOnMaxRefLvl); int k = 0; for (auto dccrgId : cells) { @@ -291,6 +316,7 @@ void getDerivativesFromFsGrid(FsGrid< std::array, // Store a pointer to the first fsgrid cell that maps to each dccrg Id dperbTransferBufferPointer.push_back(&dperbTransferBuffer[k]); dmomentsTransferBufferPointer.push_back(&dmomentsTransferBuffer[k]); + transferBufferPointerTechnical.push_back(&transferBufferTechnical[k]); for (auto fsgridId : fsgridIds) { @@ -298,12 +324,16 @@ void getDerivativesFromFsGrid(FsGrid< std::array, dperbGrid.transferDataOut(fsgridId, dperbCellData); std::array* dmomentsCellData = &dmomentsTransferBuffer[k]; dmomentsGrid.transferDataOut(fsgridId, dmomentsCellData); + fsgrids::technical* thisCellDataTechnical = &transferBufferTechnical[k]; + technicalGrid.transferDataOut(fsgridId, thisCellDataTechnical); + k++; } } // Do the transfer dperbGrid.finishTransfersOut(); dmomentsGrid.finishTransfersOut(); + technicalGrid.finishTransfersOut(); std::vector> iDmoments; std::vector> iDperb; @@ -351,33 +381,41 @@ void getDerivativesFromFsGrid(FsGrid< std::array, iDperb.push_back(std::make_pair(fieldsolver::dPERBzdxy, fsgrids::dperb::dPERBzdxy)); // Distribute data from the transfer buffers back into the appropriate mpiGrid places + // Disregard DO_NOT_COMPUTE cells #pragma omp parallel for for(uint i = 0; i < cells.size(); ++i) { - int dccrgId = cells[i]; + const CellID dccrgId = cells[i]; - // Calculate the number of fsgrid cells we need to average into the current dccrg cell - auto refLvl = mpiGrid.mapping.get_refinement_level(dccrgId); - int nCells = pow(pow(2,mpiGrid.mapping.get_maximum_refinement_level() - mpiGrid.mapping.get_refinement_level(dccrgId)),3); + // Calculate the number of fsgrid cells we loop through + cint nCells = pow(pow(2,mpiGrid.mapping.get_maximum_refinement_level() - mpiGrid.mapping.get_refinement_level(dccrgId)),3); + // Count the number of fsgrid cells we need to average into the current dccrg cell + int nCellsToSum = 0; for (auto j : iDmoments) mpiGrid[dccrgId]->derivatives[j.first] = 0.0; for (auto j : iDperb ) mpiGrid[dccrgId]->derivatives[j.first] = 0.0; for(int iCell = 0; iCell < nCells; ++iCell) { // The fsgrid cells that cover the i'th dccrg cell are pointed at by - // transferBufferPointer[i] ... transferBufferPointer[i] + nCell. We want to average - // over all of them to get the value for the dccrg cell - - std::array* dperb = dperbTransferBufferPointer[i] + iCell; - std::array* dmoments = dmomentsTransferBufferPointer[i] + iCell; + // transferBufferPointer[i] ... transferBufferPointer[i] + nCell. + // We want to average over those who are not DO_NOT_COMPUTE to get the value for the dccrg cell + if ((transferBufferPointerTechnical[i] + iCell)->sysBoundaryFlag == sysboundarytype::DO_NOT_COMPUTE) { + continue; + } else { + nCellsToSum++; + + std::array* dperb = dperbTransferBufferPointer[i] + iCell; + std::array* dmoments = dmomentsTransferBufferPointer[i] + iCell; + + for (auto j : iDmoments) mpiGrid[dccrgId]->derivatives[j.first] += dmoments->at(j.second); + for (auto j : iDperb ) mpiGrid[dccrgId]->derivatives[j.first] += dperb ->at(j.second); + } + } - for (auto j : iDmoments) mpiGrid[dccrgId]->derivatives[j.first] += dmoments->at(j.second); - for (auto j : iDperb ) mpiGrid[dccrgId]->derivatives[j.first] += dperb ->at(j.second); + if (nCellsToSum > 0) { + for (auto j : iDmoments) mpiGrid[dccrgId]->derivatives[j.first] /= nCellsToSum; + for (auto j : iDperb ) mpiGrid[dccrgId]->derivatives[j.first] /= nCellsToSum; } - - for (auto j : iDmoments) mpiGrid[dccrgId]->derivatives[j.first] /= nCells; - for (auto j : iDperb ) mpiGrid[dccrgId]->derivatives[j.first] /= nCells; - } } @@ -417,8 +455,8 @@ bool belongsToLayer(const int layer, const int x, const int y, const int z, void setupTechnicalFsGrid(dccrg::Dccrg& mpiGrid, const std::vector& cells, FsGrid< fsgrids::technical, 2>& technicalGrid) { - int nCells = getNumberOfCellsOnMaxRefLvl(mpiGrid, cells); - technicalGrid.setupForTransferIn(nCells); + cint nCellsOnMaxRefLvl = getNumberOfCellsOnMaxRefLvl(mpiGrid, cells); + technicalGrid.setupForTransferIn(nCellsOnMaxRefLvl); // Setup transfer buffers std::vector< fsgrids::technical > transferBuffer(cells.size()); @@ -512,11 +550,11 @@ void getFsGridMaxDt(FsGrid< fsgrids::technical, 2>& technicalGrid, dccrg::Dccrg& mpiGrid, const std::vector& cells) { - int nCells = getNumberOfCellsOnMaxRefLvl(mpiGrid, cells); - technicalGrid.setupForTransferOut(nCells); + cint nCellsOnMaxRefLvl = getNumberOfCellsOnMaxRefLvl(mpiGrid, cells); + technicalGrid.setupForTransferOut(nCellsOnMaxRefLvl); // Buffer to store contents of the grid - std::vector transferBuffer(nCells); + std::vector transferBuffer(nCellsOnMaxRefLvl); std::vector transferBufferPointer; int k = 0; @@ -536,11 +574,11 @@ void getFsGridMaxDt(FsGrid< fsgrids::technical, 2>& technicalGrid, #pragma omp parallel for for(int i=0; i< cells.size(); i++) { - int dccrgId = cells[i]; + const CellID dccrgId = cells[i]; auto cellParams = mpiGrid[dccrgId]->get_cell_parameters(); - // Calculate the number of fsgrid cells we need to average into the current dccrg cell - int nCells = pow(pow(2,mpiGrid.get_maximum_refinement_level() - mpiGrid.get_refinement_level(dccrgId)),3); + // Calculate the number of fsgrid cells we need to loop through + cint nCells = pow(pow(2,mpiGrid.get_maximum_refinement_level() - mpiGrid.get_refinement_level(dccrgId)),3); cellParams[CellParams::MAXFDT] = std::numeric_limits::max(); //cellParams[CellParams::FSGRID_RANK] = 0; diff --git a/fieldsolver/gridGlue.hpp b/fieldsolver/gridGlue.hpp index 210b2f84a..f40d04db6 100644 --- a/fieldsolver/gridGlue.hpp +++ b/fieldsolver/gridGlue.hpp @@ -42,6 +42,7 @@ void getVolumeFieldsFromFsGrid( */ void getBgFieldsAndDerivativesFromFsGrid( FsGrid< std::array, 2>& BgBGrid, + FsGrid< fsgrids::technical, 2>& technicalGrid, dccrg::Dccrg& mpiGrid, const std::vector& cells ); @@ -50,10 +51,13 @@ void getBgFieldsAndDerivativesFromFsGrid( * * This should only be neccessary for debugging. */ -void getDerivativesFromFsGrid(FsGrid< std::array, 2>& dperbGrid, - FsGrid< std::array, 2>& dmomentsGrid, - dccrg::Dccrg& mpiGrid, - const std::vector& cells); +void getDerivativesFromFsGrid( + FsGrid< std::array, 2>& dperbGrid, + FsGrid< std::array, 2>& dmomentsGrid, + FsGrid< fsgrids::technical, 2>& technicalGrid, + dccrg::Dccrg& mpiGrid, + const std::vector& cells +); /*! Transfer data into technical grid (boundary info etc.) * \param mpiGrid The DCCRG grid carrying rho, rhoV and P @@ -92,14 +96,20 @@ int getNumberOfCellsOnMaxRefLvl(dccrg::Dccrg void getFieldDataFromFsGrid( - FsGrid< std::array, 2>& sourceGrid, - dccrg::Dccrg& mpiGrid, - const std::vector& cells, int index) { + FsGrid< std::array, 2>& sourceGrid, + FsGrid< fsgrids::technical, 2>& technicalGrid, + dccrg::Dccrg& mpiGrid, + const std::vector& cells, + int index +) { - int nCells = getNumberOfCellsOnMaxRefLvl(mpiGrid, cells); - std::vector< std::array > transferBuffer(nCells); - std::vector< std::array*> transferBufferPointer; - sourceGrid.setupForTransferOut(nCells); + cint nCellsOnMaxRefLvl = getNumberOfCellsOnMaxRefLvl(mpiGrid, cells); + std::vector< std::array > transferBufferData(nCellsOnMaxRefLvl); + std::vector< std::array*> transferBufferPointerData; + std::vector< fsgrids::technical > transferBufferTechnical(nCellsOnMaxRefLvl); + std::vector< fsgrids::technical*> transferBufferPointerTechnical; + sourceGrid.setupForTransferOut(nCellsOnMaxRefLvl); + technicalGrid.setupForTransferOut(nCellsOnMaxRefLvl); int k = 0; for(CellID dccrgId : cells) { @@ -109,44 +119,57 @@ template< unsigned int numFields > void getFieldDataFromFsGrid( //std::array* cellDataPointer = reinterpret_cast*>( // &(mpiGrid[dccrgId]->get_cell_parameters()[index])); - transferBufferPointer.push_back(&transferBuffer[k]); + transferBufferPointerData.push_back(&transferBufferData[k]); + transferBufferPointerTechnical.push_back(&transferBufferTechnical[k]); const auto fsgridIds = mapDccrgIdToFsGridGlobalID(mpiGrid, dccrgId); for (auto fsgridId : fsgridIds) { - std::array* cellDataPointer = &transferBuffer[k++]; + std::array* cellDataPointer = &transferBufferData[k]; sourceGrid.transferDataOut(fsgridId, cellDataPointer); + fsgrids::technical* thisCellDataTechnical = &transferBufferTechnical[k]; + technicalGrid.transferDataOut(fsgridId, thisCellDataTechnical); + k++; } } sourceGrid.finishTransfersOut(); + technicalGrid.finishTransfersOut(); // Average data in transferBuffer + // Disregard DO_NOT_COMPUTE cells #pragma omp parallel for for(uint i = 0; i < cells.size(); ++i) { - CellID dccrgId = cells[i]; - + const CellID dccrgId = cells[i]; + // Set cell data to 0 for (int iField = 0; iField < numFields; ++iField) { mpiGrid[dccrgId]->get_cell_parameters()[index+iField] = 0.0; } - // Calculate the number of fsgrid cells we need to average into the current dccrg cell - auto refLvl = mpiGrid.mapping.get_refinement_level(dccrgId); - int nCells = pow(pow(2,mpiGrid.mapping.get_maximum_refinement_level() - mpiGrid.mapping.get_refinement_level(dccrgId)),3); - + // Calculate the number of fsgrid cells we loop through + cint nCells = pow(pow(2,mpiGrid.mapping.get_maximum_refinement_level() - mpiGrid.mapping.get_refinement_level(dccrgId)),3); + // Count the number of fsgrid cells we need to average into the current dccrg cell + int nCellsToSum = 0; + for(int iCell = 0; iCell < nCells; ++iCell) { - - std::array* cellDataPointer = transferBufferPointer[i] + iCell; - + if ((transferBufferPointerTechnical[i] + iCell)->sysBoundaryFlag == sysboundarytype::DO_NOT_COMPUTE) { + continue; + } else { + nCellsToSum++; + std::array* cellDataPointer = transferBufferPointerData[i] + iCell; + + for (int iField = 0; iField < numFields; ++iField) { + mpiGrid[dccrgId]->get_cell_parameters()[index+iField] += cellDataPointer->at(iField); + } + } + } + + if (nCellsToSum > 0) { for (int iField = 0; iField < numFields; ++iField) { - mpiGrid[dccrgId]->get_cell_parameters()[index+iField] += cellDataPointer->at(iField); + mpiGrid[dccrgId]->get_cell_parameters()[index+iField] /= nCellsToSum; } } - - for (int iField = 0; iField < numFields; ++iField) { - mpiGrid[dccrgId]->get_cell_parameters()[index+iField] /= nCells; - } } } diff --git a/grid.cpp b/grid.cpp index 3f3e6a27f..b138890f6 100644 --- a/grid.cpp +++ b/grid.cpp @@ -315,8 +315,8 @@ void initializeGrids( phiprof::stop("setProjectBField"); phiprof::start("Finish fsgrid setup"); - getFieldDataFromFsGrid(perBGrid, mpiGrid, cells, CellParams::PERBX); - getBgFieldsAndDerivativesFromFsGrid(BgBGrid, mpiGrid, cells); + getFieldDataFromFsGrid(perBGrid, technicalGrid, mpiGrid, cells, CellParams::PERBX); + getBgFieldsAndDerivativesFromFsGrid(BgBGrid, technicalGrid, mpiGrid, cells); BgBGrid.updateGhostCells(); // WARNING this means moments and dt2 moments are the same here. @@ -436,11 +436,11 @@ void balanceLoad(dccrg::Dccrg& mpiGrid, S mpiGrid.initialize_balance_load(true); phiprof::stop("dccrg.initialize_balance_load"); - const std::unordered_set& incoming_cells = mpiGrid.get_cells_added_by_balance_load(); - std::vector incoming_cells_list (incoming_cells.begin(),incoming_cells.end()); + const std::unordered_set& incoming_cells = mpiGrid.get_cells_added_by_balance_load(); + std::vector incoming_cells_list (incoming_cells.begin(),incoming_cells.end()); - const std::unordered_set& outgoing_cells = mpiGrid.get_cells_removed_by_balance_load(); - std::vector outgoing_cells_list (outgoing_cells.begin(),outgoing_cells.end()); + const std::unordered_set& outgoing_cells = mpiGrid.get_cells_removed_by_balance_load(); + std::vector outgoing_cells_list (outgoing_cells.begin(),outgoing_cells.end()); /*transfer cells in parts to preserve memory*/ phiprof::start("Data transfers"); @@ -448,7 +448,7 @@ void balanceLoad(dccrg::Dccrg& mpiGrid, S for (uint64_t transfer_part=0; transfer_partset_mpi_transfer_enabled(false); @@ -459,7 +459,7 @@ void balanceLoad(dccrg::Dccrg& mpiGrid, S //Set transfers on/off for the outgoing cells in this transfer set for (unsigned int i=0; iset_mpi_transfer_enabled(false); @@ -480,7 +480,7 @@ void balanceLoad(dccrg::Dccrg& mpiGrid, S int receives = 0; for (unsigned int i=0; i& mpiGrid, S // Free memory for cells that have been sent (the block data) for (unsigned int i=0;i void report_grid_memory_consumption(dccrg::Dccrg& mpiGrid) { /*now report memory consumption into logfile*/ const vector& cells = getLocalCells(); - const std::vector remote_cells = mpiGrid.get_remote_cells_on_process_boundary(); + const std::vector remote_cells = mpiGrid.get_remote_cells_on_process_boundary(); int rank,n_procs; MPI_Comm_size(MPI_COMM_WORLD, &n_procs); MPI_Comm_rank(MPI_COMM_WORLD, &rank); diff --git a/vlasiator.cpp b/vlasiator.cpp index 3a1322074..2f844ce78 100644 --- a/vlasiator.cpp +++ b/vlasiator.cpp @@ -527,11 +527,11 @@ int main(int argn,char* args[]) { if (P::writeInitialState) { phiprof::start("write-initial-state"); phiprof::start("fsgrid-coupling-out"); - getFieldDataFromFsGrid(perBGrid,mpiGrid,cells,CellParams::PERBX); - getFieldDataFromFsGrid(EGrid,mpiGrid,cells,CellParams::EX); - getFieldDataFromFsGrid(EHallGrid,mpiGrid,cells,CellParams::EXHALL_000_100); - getFieldDataFromFsGrid(EGradPeGrid,mpiGrid,cells,CellParams::EXGRADPE); - getDerivativesFromFsGrid(dPerBGrid, dMomentsGrid, mpiGrid, cells); + getFieldDataFromFsGrid(perBGrid,technicalGrid,mpiGrid,cells,CellParams::PERBX); + getFieldDataFromFsGrid(EGrid,technicalGrid,mpiGrid,cells,CellParams::EX); + getFieldDataFromFsGrid(EHallGrid,technicalGrid,mpiGrid,cells,CellParams::EXHALL_000_100); + getFieldDataFromFsGrid(EGradPeGrid,technicalGrid,mpiGrid,cells,CellParams::EXGRADPE); + getDerivativesFromFsGrid(dPerBGrid, dMomentsGrid, technicalGrid, mpiGrid, cells); phiprof::stop("fsgrid-coupling-out"); if (myRank == MASTER_RANK) @@ -707,12 +707,12 @@ int main(int argn,char* args[]) { it++) { if (*it == "FluxB") { phiprof::start("fsgrid-coupling-out"); - getFieldDataFromFsGrid(perBGrid,mpiGrid,cells,CellParams::PERBX); + getFieldDataFromFsGrid(perBGrid,technicalGrid,mpiGrid,cells,CellParams::PERBX); phiprof::stop("fsgrid-coupling-out"); } if (*it == "FluxE") { phiprof::start("fsgrid-coupling-out"); - getFieldDataFromFsGrid(EGrid,mpiGrid,cells,CellParams::EX); + getFieldDataFromFsGrid(EGrid,technicalGrid,mpiGrid,cells,CellParams::EX); phiprof::stop("fsgrid-coupling-out"); } } @@ -739,27 +739,27 @@ int main(int argn,char* args[]) { *it == "PerturbedB" ) { phiprof::start("fsgrid-coupling-out"); - getFieldDataFromFsGrid(perBGrid,mpiGrid,cells,CellParams::PERBX); + getFieldDataFromFsGrid(perBGrid,technicalGrid,mpiGrid,cells,CellParams::PERBX); phiprof::stop("fsgrid-coupling-out"); } if (*it == "E") { phiprof::start("fsgrid-coupling-out"); - getFieldDataFromFsGrid(EGrid,mpiGrid,cells,CellParams::EX); + getFieldDataFromFsGrid(EGrid,technicalGrid,mpiGrid,cells,CellParams::EX); phiprof::stop("fsgrid-coupling-out"); } if (*it == "HallE") { phiprof::start("fsgrid-coupling-out"); - getFieldDataFromFsGrid(EHallGrid,mpiGrid,cells,CellParams::EXHALL_000_100); + getFieldDataFromFsGrid(EHallGrid,technicalGrid,mpiGrid,cells,CellParams::EXHALL_000_100); phiprof::stop("fsgrid-coupling-out"); } if (*it == "GradPeE") { phiprof::start("fsgrid-coupling-out"); - getFieldDataFromFsGrid(EGradPeGrid,mpiGrid,cells,CellParams::EXGRADPE); + getFieldDataFromFsGrid(EGradPeGrid,technicalGrid,mpiGrid,cells,CellParams::EXGRADPE); phiprof::stop("fsgrid-coupling-out"); } if (*it == "derivs") { phiprof::start("fsgrid-coupling-out"); - getDerivativesFromFsGrid(dPerBGrid, dMomentsGrid, mpiGrid, cells); + getDerivativesFromFsGrid(dPerBGrid, dMomentsGrid, technicalGrid, mpiGrid, cells); phiprof::stop("fsgrid-coupling-out"); } } From b0eb522324f67bd4dd2ce6cd42835975604ff83f Mon Sep 17 00:00:00 2001 From: Tuomas Koskela Date: Wed, 17 Apr 2019 14:25:55 +0300 Subject: [PATCH 332/602] Added extra memory diagnostics --- grid.cpp | 24 +++++++++++++++++++++++- memoryallocation.cpp | 6 +++--- memoryallocation.h | 2 +- vlasiator.cpp | 10 ++++++++-- 4 files changed, 35 insertions(+), 7 deletions(-) diff --git a/grid.cpp b/grid.cpp index a742d8ab3..9077ca128 100644 --- a/grid.cpp +++ b/grid.cpp @@ -123,12 +123,16 @@ void initializeGrid( .set_geometry(geom_params); + report_process_memory_consumption("grid: after initialize"); + phiprof::start("Refine spatial cells"); if(P::amrMaxSpatialRefLevel > 0 && project.refineSpatialCells(mpiGrid)) { recalculateLocalCellsCache(); } phiprof::stop("Refine spatial cells"); + report_process_memory_consumption("grid: after refine"); + // Init velocity mesh on all cells initVelocityGridGeometry(mpiGrid); initializeStencils(mpiGrid); @@ -143,6 +147,8 @@ void initializeGrid( } phiprof::stop("Initial load-balancing"); + report_process_memory_consumption("grid: after LB"); + if (myRank == MASTER_RANK) logFile << "(INIT): Set initial state." << endl << writeVerbose; phiprof::start("Set initial state"); @@ -166,16 +172,20 @@ void initializeGrid( phiprof::stop("Classify cells (sys boundary conditions)"); + report_process_memory_consumption("grid: after boundary"); + // Check refined cells do not touch boundary cells phiprof::start("Check boundary refinement"); - if(!sysBoundaries.checkRefinement(mpiGrid)) { + if(P::amrMaxSpatialRefLevel > 0 && !sysBoundaries.checkRefinement(mpiGrid)) { cerr << "(MAIN) ERROR: Boundary cells must have identical refinement level " << endl; exit(1); } phiprof::stop("Check boundary refinement"); + report_process_memory_consumption("grid: after check refinement"); + if (P::isRestart) { logFile << "Restart from "<< P::restartFileName << std::endl << writeVerbose; phiprof::start("Read restart"); @@ -201,6 +211,8 @@ void initializeGrid( phiprof::stop("Apply system boundary conditions state"); } + report_process_memory_consumption("grid: before initial state"); + if (!P::isRestart) { //Initial state based on project, background field in all cells //and other initial values in non-sysboundary cells @@ -214,6 +226,8 @@ void initializeGrid( // Allow the project to set up data structures for it's setCell calls project.setupBeforeSetCell(cells); + report_process_memory_consumption("grid: before setCell"); + #pragma omp parallel for schedule(dynamic) for (size_t i=0; i Parameters::bailout_max_memory, "Memory high water mark per node exceeds bailout threshold", __FILE__, __LINE__); diff --git a/memoryallocation.h b/memoryallocation.h index 985f48dc1..aa01bb1db 100644 --- a/memoryallocation.h +++ b/memoryallocation.h @@ -35,7 +35,7 @@ uint64_t get_node_free_memory(); /*! Measures memory consumption and writes it into logfile. Collective * operation on MPI_COMM_WORLD */ -void report_process_memory_consumption(); +void report_process_memory_consumption(const char* message); /*! Alligned malloc, could be done using aligned_alloc*/ inline void * aligned_malloc(size_t size,std::size_t align) { diff --git a/vlasiator.cpp b/vlasiator.cpp index 72a8e8980..6fb8b19fa 100644 --- a/vlasiator.cpp +++ b/vlasiator.cpp @@ -372,6 +372,8 @@ int main(int argn,char* args[]) { // Add AMR refinement criterias: amr_ref_criteria::addRefinementCriteria(); + report_process_memory_consumption("b4 grid"); + // Initialize grid. After initializeGrid local cells have dist // functions, and B fields set. Cells have also been classified for // the various sys boundary conditions. All remote cells have been @@ -385,6 +387,8 @@ int main(int argn,char* args[]) { phiprof::stop("Init grid"); + report_process_memory_consumption("after grid"); + // Initialize data reduction operators. This should be done elsewhere in order to initialize // user-defined operators: phiprof::start("Init DROs"); @@ -470,6 +474,8 @@ int main(int argn,char* args[]) { feedMomentsIntoFsGrid(mpiGrid, cells, momentsGrid,false); feedMomentsIntoFsGrid(mpiGrid, cells, momentsDt2Grid,false); + report_process_memory_consumption("after fsgrid"); + phiprof::start("Init field propagator"); if ( initializeFieldPropagator( @@ -618,7 +624,7 @@ int main(int argn,char* args[]) { if (myRank == MASTER_RANK) logFile << "(MAIN): Starting main simulation loop." << endl << writeVerbose; phiprof::start("report-memory-consumption"); - report_process_memory_consumption(); + report_process_memory_consumption("init"); phiprof::stop("report-memory-consumption"); unsigned int computedCells=0; @@ -713,7 +719,7 @@ int main(int argn,char* args[]) { beforeSimulationTime=P::t; beforeStep=P::tstep; //report_grid_memory_consumption(mpiGrid); - report_process_memory_consumption(); + report_process_memory_consumption("step"); } logFile << writeVerbose; phiprof::stop("logfile-io"); From e63cf013b277022a45f161158280750cc040e569 Mon Sep 17 00:00:00 2001 From: Tuomas Koskela Date: Thu, 18 Apr 2019 09:35:47 +0300 Subject: [PATCH 333/602] Changed DISTRIBUTION_FP_PRECISION to SPF --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 7747028f8..db169a4f0 100644 --- a/Makefile +++ b/Makefile @@ -3,7 +3,7 @@ ARCH = ${VLASIATOR_ARCH} #set FP precision to SP (single) or DP (double) FP_PRECISION = DP #Set floating point precision for distribution function to SPF (single) or DPF (double) -DISTRIBUTION_FP_PRECISION = DPF +DISTRIBUTION_FP_PRECISION = SPF #override flags if we are building testpackage: ifneq (,$(findstring testpackage,$(MAKECMDGOALS))) From 257849b4a25a7ad8aa072dc8832323a8e84aecd3 Mon Sep 17 00:00:00 2001 From: Tuomas Koskela Date: Thu, 18 Apr 2019 10:58:01 +0300 Subject: [PATCH 334/602] Revert "Added extra memory diagnostics" This reverts commit b0eb522324f67bd4dd2ce6cd42835975604ff83f. --- grid.cpp | 24 +----------------------- memoryallocation.cpp | 6 +++--- memoryallocation.h | 2 +- vlasiator.cpp | 10 ++-------- 4 files changed, 7 insertions(+), 35 deletions(-) diff --git a/grid.cpp b/grid.cpp index 9077ca128..a742d8ab3 100644 --- a/grid.cpp +++ b/grid.cpp @@ -123,16 +123,12 @@ void initializeGrid( .set_geometry(geom_params); - report_process_memory_consumption("grid: after initialize"); - phiprof::start("Refine spatial cells"); if(P::amrMaxSpatialRefLevel > 0 && project.refineSpatialCells(mpiGrid)) { recalculateLocalCellsCache(); } phiprof::stop("Refine spatial cells"); - report_process_memory_consumption("grid: after refine"); - // Init velocity mesh on all cells initVelocityGridGeometry(mpiGrid); initializeStencils(mpiGrid); @@ -147,8 +143,6 @@ void initializeGrid( } phiprof::stop("Initial load-balancing"); - report_process_memory_consumption("grid: after LB"); - if (myRank == MASTER_RANK) logFile << "(INIT): Set initial state." << endl << writeVerbose; phiprof::start("Set initial state"); @@ -172,20 +166,16 @@ void initializeGrid( phiprof::stop("Classify cells (sys boundary conditions)"); - report_process_memory_consumption("grid: after boundary"); - // Check refined cells do not touch boundary cells phiprof::start("Check boundary refinement"); - if(P::amrMaxSpatialRefLevel > 0 && !sysBoundaries.checkRefinement(mpiGrid)) { + if(!sysBoundaries.checkRefinement(mpiGrid)) { cerr << "(MAIN) ERROR: Boundary cells must have identical refinement level " << endl; exit(1); } phiprof::stop("Check boundary refinement"); - report_process_memory_consumption("grid: after check refinement"); - if (P::isRestart) { logFile << "Restart from "<< P::restartFileName << std::endl << writeVerbose; phiprof::start("Read restart"); @@ -211,8 +201,6 @@ void initializeGrid( phiprof::stop("Apply system boundary conditions state"); } - report_process_memory_consumption("grid: before initial state"); - if (!P::isRestart) { //Initial state based on project, background field in all cells //and other initial values in non-sysboundary cells @@ -226,8 +214,6 @@ void initializeGrid( // Allow the project to set up data structures for it's setCell calls project.setupBeforeSetCell(cells); - report_process_memory_consumption("grid: before setCell"); - #pragma omp parallel for schedule(dynamic) for (size_t i=0; i Parameters::bailout_max_memory, "Memory high water mark per node exceeds bailout threshold", __FILE__, __LINE__); diff --git a/memoryallocation.h b/memoryallocation.h index aa01bb1db..985f48dc1 100644 --- a/memoryallocation.h +++ b/memoryallocation.h @@ -35,7 +35,7 @@ uint64_t get_node_free_memory(); /*! Measures memory consumption and writes it into logfile. Collective * operation on MPI_COMM_WORLD */ -void report_process_memory_consumption(const char* message); +void report_process_memory_consumption(); /*! Alligned malloc, could be done using aligned_alloc*/ inline void * aligned_malloc(size_t size,std::size_t align) { diff --git a/vlasiator.cpp b/vlasiator.cpp index 6fb8b19fa..72a8e8980 100644 --- a/vlasiator.cpp +++ b/vlasiator.cpp @@ -372,8 +372,6 @@ int main(int argn,char* args[]) { // Add AMR refinement criterias: amr_ref_criteria::addRefinementCriteria(); - report_process_memory_consumption("b4 grid"); - // Initialize grid. After initializeGrid local cells have dist // functions, and B fields set. Cells have also been classified for // the various sys boundary conditions. All remote cells have been @@ -387,8 +385,6 @@ int main(int argn,char* args[]) { phiprof::stop("Init grid"); - report_process_memory_consumption("after grid"); - // Initialize data reduction operators. This should be done elsewhere in order to initialize // user-defined operators: phiprof::start("Init DROs"); @@ -474,8 +470,6 @@ int main(int argn,char* args[]) { feedMomentsIntoFsGrid(mpiGrid, cells, momentsGrid,false); feedMomentsIntoFsGrid(mpiGrid, cells, momentsDt2Grid,false); - report_process_memory_consumption("after fsgrid"); - phiprof::start("Init field propagator"); if ( initializeFieldPropagator( @@ -624,7 +618,7 @@ int main(int argn,char* args[]) { if (myRank == MASTER_RANK) logFile << "(MAIN): Starting main simulation loop." << endl << writeVerbose; phiprof::start("report-memory-consumption"); - report_process_memory_consumption("init"); + report_process_memory_consumption(); phiprof::stop("report-memory-consumption"); unsigned int computedCells=0; @@ -719,7 +713,7 @@ int main(int argn,char* args[]) { beforeSimulationTime=P::t; beforeStep=P::tstep; //report_grid_memory_consumption(mpiGrid); - report_process_memory_consumption("step"); + report_process_memory_consumption(); } logFile << writeVerbose; phiprof::stop("logfile-io"); From a056b2d50e9e30750deb4eb9464d71f9ed8140b4 Mon Sep 17 00:00:00 2001 From: Tuomas Koskela Date: Thu, 18 Apr 2019 11:18:53 +0300 Subject: [PATCH 335/602] Removed MPI_Comm_rank calls that were left over from debugging. --- vlasovsolver/cpu_trans_map.cpp | 5 ++--- vlasovsolver/cpu_trans_map_amr.cpp | 4 ++-- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/vlasovsolver/cpu_trans_map.cpp b/vlasovsolver/cpu_trans_map.cpp index c38b222db..7500ec6e1 100644 --- a/vlasovsolver/cpu_trans_map.cpp +++ b/vlasovsolver/cpu_trans_map.cpp @@ -650,9 +650,8 @@ void update_remote_mapping_contribution( vector send_cells; vector receiveBuffers; - int myRank; - - MPI_Comm_rank(MPI_COMM_WORLD,&myRank); +// int myRank; +// MPI_Comm_rank(MPI_COMM_WORLD,&myRank); // MPI_Barrier(MPI_COMM_WORLD); // cout << "begin update_remote_mapping_contribution, dimension = " << dimension << ", direction = " << direction << endl; diff --git a/vlasovsolver/cpu_trans_map_amr.cpp b/vlasovsolver/cpu_trans_map_amr.cpp index ea5394dae..7426f5942 100644 --- a/vlasovsolver/cpu_trans_map_amr.cpp +++ b/vlasovsolver/cpu_trans_map_amr.cpp @@ -1318,8 +1318,8 @@ void update_remote_mapping_contribution_amr( int neighborhood = 0; // For debugging - int myRank; - MPI_Comm_rank(MPI_COMM_WORLD,&myRank); +// int myRank; +// MPI_Comm_rank(MPI_COMM_WORLD,&myRank); //normalize and set neighborhoods if(direction > 0) { From 6954ad916a71b7260c70f020606592091bac115f Mon Sep 17 00:00:00 2001 From: ykempf Date: Thu, 18 Apr 2019 16:47:02 +0300 Subject: [PATCH 336/602] Fix compilation problem in 1st order field solver compilation. --- fieldsolver/derivatives.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/fieldsolver/derivatives.cpp b/fieldsolver/derivatives.cpp index 58165d533..5aa6f45c3 100644 --- a/fieldsolver/derivatives.cpp +++ b/fieldsolver/derivatives.cpp @@ -88,13 +88,13 @@ void calculateDerivatives( #ifdef DEBUG_SOLVERS if (leftMoments->at(fsgrids::moments::RHOM) <= 0) { std::cerr << __FILE__ << ":" << __LINE__ - << (leftMoments->at(fsgrids::moments::RHOM) < 0 ? " Negative" : " Zero") << " density in spatial cell " << leftNbrID + << (leftMoments->at(fsgrids::moments::RHOM) < 0 ? " Negative" : " Zero") << " density in spatial cell " //<< leftNbrID << std::endl; abort(); } - if (rightMoments->at(fsgrids::moments::RHOM) <= 0) { + if (rghtMoments->at(fsgrids::moments::RHOM) <= 0) { std::cerr << __FILE__ << ":" << __LINE__ - << (rightMoments->at(fsgrids::moments::RHOM) < 0 ? " Negative" : " Zero") << " density in spatial cell " << rightNbrID + << (rghtMoments->at(fsgrids::moments::RHOM) < 0 ? " Negative" : " Zero") << " density in spatial cell " //<< rightNbrID << std::endl; abort(); } From fda417740e47136b648aae5fa6bc2109f08f59f9 Mon Sep 17 00:00:00 2001 From: ykempf Date: Thu, 18 Apr 2019 16:47:45 +0300 Subject: [PATCH 337/602] Fix incorrect initialisation of template cell for setMaxwellian boundary. --- sysboundary/setmaxwellian.cpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/sysboundary/setmaxwellian.cpp b/sysboundary/setmaxwellian.cpp index 4b4c59740..2b59b88bb 100644 --- a/sysboundary/setmaxwellian.cpp +++ b/sysboundary/setmaxwellian.cpp @@ -156,7 +156,7 @@ namespace SBC { popID, rho, T, - counter*cell.get_velocity_grid_block_size(popID,refLevel)[0], 0.0, 0.0 + VX0 + counter*cell.get_velocity_grid_block_size(popID,refLevel)[0], VY0, VZ0 ) || counter > vblocks_ini[0] @@ -287,9 +287,9 @@ namespace SBC { popID, rho, T, - vxCell + 0.5*dvxCell, - vyCell + 0.5*dvyCell, - vzCell + 0.5*dvzCell + vxCell + 0.5*dvxCell - Vx, + vyCell + 0.5*dvyCell - Vy, + vzCell + 0.5*dvzCell - Vz ); } From 9a56aa5a1015eac907af1624eed0a81b86951747 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Tue, 23 Apr 2019 15:46:13 +0300 Subject: [PATCH 338/602] Added amr Flowthrough test to testpackage --- .../tests/Flowthrough_amr/Flowthrough_amr.cfg | 104 ++++++++++++++++++ testpackage/tests/Flowthrough_amr/sw1.dat | 1 + 2 files changed, 105 insertions(+) create mode 100644 testpackage/tests/Flowthrough_amr/Flowthrough_amr.cfg create mode 100644 testpackage/tests/Flowthrough_amr/sw1.dat diff --git a/testpackage/tests/Flowthrough_amr/Flowthrough_amr.cfg b/testpackage/tests/Flowthrough_amr/Flowthrough_amr.cfg new file mode 100644 index 000000000..c471090e1 --- /dev/null +++ b/testpackage/tests/Flowthrough_amr/Flowthrough_amr.cfg @@ -0,0 +1,104 @@ +ParticlePopulations = proton + +project = Flowthrough +propagate_field = 1 +propagate_vlasov_acceleration = 1 +propagate_vlasov_translation = 1 +dynamic_timestep = 1 + +[proton_properties] +mass = 1 +mass_units = PROTON +charge = 1 + +[AMR] +max_spatial_level = 2 +box_half_width_x = 2 +box_half_width_z = 2 +box_half_width_y = 2 + +[gridbuilder] +x_length = 16 +y_length = 8 +z_length = 8 +x_min = -8e7 +x_max = 8e7 +y_min = -4e7 +y_max = 4e7 +z_min = -4e7 +z_max = 4e7 +t_max = 162.0 +dt = 2.0 + +[proton_vspace] +vx_min = -2e6 +vx_max = +2e6 +vy_min = -2e6 +vy_max = +2e6 +vz_min = -2e6 +vz_max = +2e6 +vx_length = 15 +vy_length = 15 +vz_length = 15 + +[io] +write_initial_state = 1 + +system_write_t_interval = 160.0 +system_write_file_name = bulk +system_write_distribution_stride = 0 +system_write_distribution_xline_stride = 0 +system_write_distribution_yline_stride = 0 +system_write_distribution_zline_stride = 0 + +[variables] +output = populations_Rho +output = E +output = B +output = BoundaryType +output = MPIrank +output = populations_Blocks +diagnostic = populations_Blocks + +[boundaries] +periodic_x = no +periodic_y = yes +periodic_z = yes +boundary = Outflow +boundary = Maxwellian + +[outflow] +precedence = 3 + +[proton_outflow] +face = x+ +#face = y- +#face = y+ +#face = z- +#face = z+ + +[maxwellian] +precedence = 4 +face = x- + +[proton_maxwellian] +dynamic = 0 +file_x- = sw1.dat + +[proton_sparse] +minValue = 1.0e-15 + +[Flowthrough] +Bx = 1.0e-9 +By = 1.0e-9 +Bz = 1.0e-9 + +[proton_Flowthrough] +T = 1.0e5 +rho = 1.0e6 + +nSpaceSamples = 2 +nVelocitySamples = 2 + +[loadBalance] +algorithm = RANDOM diff --git a/testpackage/tests/Flowthrough_amr/sw1.dat b/testpackage/tests/Flowthrough_amr/sw1.dat new file mode 100644 index 000000000..0a266607f --- /dev/null +++ b/testpackage/tests/Flowthrough_amr/sw1.dat @@ -0,0 +1 @@ +0.0 2.0e6 1.0e6 1.0e5 0.0 0.0 1.0e-9 1.0e-9 1.0e-9 From c230d1b36abc903e95e7dade9967034c8dfffba4 Mon Sep 17 00:00:00 2001 From: ykempf Date: Tue, 16 Apr 2019 09:08:39 +0300 Subject: [PATCH 339/602] Fix assignation of DO_NOT_COMPUTE in gridGlue --- fieldsolver/gridGlue.cpp | 25 +++++++++++++------------ 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/fieldsolver/gridGlue.cpp b/fieldsolver/gridGlue.cpp index 6b63d9357..80f4053dd 100644 --- a/fieldsolver/gridGlue.cpp +++ b/fieldsolver/gridGlue.cpp @@ -453,7 +453,7 @@ void setupTechnicalFsGrid(dccrg::Dccrg& m technicalGrid.get(x,y,z)->sysBoundaryLayer = layer; if (layer > 1) { - technicalGrid.get(x,y,z)->sysBoundaryFlag == sysboundarytype::DO_NOT_COMPUTE; + technicalGrid.get(x,y,z)->sysBoundaryFlag = sysboundarytype::DO_NOT_COMPUTE; } } } @@ -462,17 +462,18 @@ void setupTechnicalFsGrid(dccrg::Dccrg& m } } - // for (int x = 0; x < localSize[0]; ++x) { - // for (int y = 0; y < localSize[1]; ++y) { - // for (int z = 0; z < localSize[2]; ++z) { - // std::cout << "boundary layer+flag at " << x << ", " << y << ", " << z << " = "; - // std::cout << technicalGrid.get(x,y,z)->sysBoundaryLayer; - // std::cout << " "; - // std::cout << technicalGrid.get(x,y,z)->sysBoundaryFlag; - // } - // } - // } - //abort(); +// for (int x = 0; x < localSize[0]; ++x) { +// for (int y = 0; y < localSize[1]; ++y) { +// for (int z = 0; z < localSize[2]; ++z) { +// std::cout << "boundary layer+flag at " << x << ", " << y << ", " << z << " = "; +// std::cout << technicalGrid.get(x,y,z)->sysBoundaryLayer; +// std::cout << " "; +// std::cout << technicalGrid.get(x,y,z)->sysBoundaryFlag; +// std::cout << std::endl; +// } +// } +// } +// abort(); } void getFsGridMaxDt(FsGrid< fsgrids::technical, 2>& technicalGrid, From cc1156aa510245d431ecc3d460c59699f79c429d Mon Sep 17 00:00:00 2001 From: ykempf Date: Mon, 15 Apr 2019 16:15:52 +0300 Subject: [PATCH 340/602] Fix initialisation of periodicity. --- vlasiator.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/vlasiator.cpp b/vlasiator.cpp index 72a8e8980..74745d247 100644 --- a/vlasiator.cpp +++ b/vlasiator.cpp @@ -398,9 +398,9 @@ int main(int argn,char* args[]) { convert(P::ycells_ini) * pow(2,P::amrMaxSpatialRefLevel), convert(P::zcells_ini) * pow(2,P::amrMaxSpatialRefLevel)}; - std::array periodicity{mpiGrid.topology.is_periodic(0), - mpiGrid.topology.is_periodic(1), - mpiGrid.topology.is_periodic(2)}; + std::array periodicity{sysBoundaries.isBoundaryPeriodic(0), + sysBoundaries.isBoundaryPeriodic(1), + sysBoundaries.isBoundaryPeriodic(2)}; FsGridCouplingInformation gridCoupling; FsGrid< std::array, 2> perBGrid(fsGridDimensions, comm, periodicity,gridCoupling); From bb3d22f39b5f9b5a470393147c188bfc9c929ffa Mon Sep 17 00:00:00 2001 From: ykempf Date: Wed, 24 Apr 2019 11:42:37 +0300 Subject: [PATCH 341/602] Re-establish SPF in Makefile. --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 72b1fe278..1625c7f16 100644 --- a/Makefile +++ b/Makefile @@ -3,7 +3,7 @@ ARCH = ${VLASIATOR_ARCH} #set FP precision to SP (single) or DP (double) FP_PRECISION = DP #Set floating point precision for distribution function to SPF (single) or DPF (double) -DISTRIBUTION_FP_PRECISION = DPF +DISTRIBUTION_FP_PRECISION = SPF #override flags if we are building testpackage: ifneq (,$(findstring testpackage,$(MAKECMDGOALS))) From f158417d016ebbfec618218b9e1e46e4c2d30a03 Mon Sep 17 00:00:00 2001 From: ykempf Date: Wed, 24 Apr 2019 11:43:31 +0300 Subject: [PATCH 342/602] Added some fsgrid neighbour updates that might well be necessary. --- grid.cpp | 5 ++++- vlasiator.cpp | 5 ++++- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/grid.cpp b/grid.cpp index b138890f6..55ada0192 100644 --- a/grid.cpp +++ b/grid.cpp @@ -312,16 +312,19 @@ void initializeGrids( phiprof::start("setProjectBField"); project.setProjectBField(perBGrid, BgBGrid, technicalGrid); + perBGrid.updateGhostCells(); + BgBGrid.updateGhostCells(); phiprof::stop("setProjectBField"); phiprof::start("Finish fsgrid setup"); getFieldDataFromFsGrid(perBGrid, technicalGrid, mpiGrid, cells, CellParams::PERBX); getBgFieldsAndDerivativesFromFsGrid(BgBGrid, technicalGrid, mpiGrid, cells); - BgBGrid.updateGhostCells(); // WARNING this means moments and dt2 moments are the same here. feedMomentsIntoFsGrid(mpiGrid, cells, momentsGrid,false); feedMomentsIntoFsGrid(mpiGrid, cells, momentsDt2Grid,false); + momentsGrid.updateGhostCells(); + momentsDt2Grid.updateGhostCells(); phiprof::stop("Finish fsgrid setup"); phiprof::stop("Set initial state"); diff --git a/vlasiator.cpp b/vlasiator.cpp index 2f844ce78..61321cbec 100644 --- a/vlasiator.cpp +++ b/vlasiator.cpp @@ -520,6 +520,8 @@ int main(int argn,char* args[]) { phiprof::start("getVolumeFieldsFromFsGrid"); // These should be done by initializeFieldPropagator() if the propagation is turned off. + volGrid.updateGhostCells(); + technicalGrid.updateGhostCells(); getVolumeFieldsFromFsGrid(volGrid, technicalGrid, mpiGrid, cells); phiprof::stop("getVolumeFieldsFromFsGrid"); @@ -942,7 +944,6 @@ int main(int argn,char* args[]) { phiprof::start("Propagate"); //Propagate the state of simulation forward in time by dt: - if (P::propagateVlasovTranslation || P::propagateVlasovAcceleration ) { phiprof::start("Update system boundaries (Vlasov pre-translation)"); sysBoundaries.applySysBoundaryVlasovConditions(mpiGrid, P::t+0.5*P::dt); @@ -1036,6 +1037,8 @@ int main(int argn,char* args[]) { phiprof::start("getVolumeFieldsFromFsGrid"); // Copy results back from fsgrid. + volGrid.updateGhostCells(); + technicalGrid.updateGhostCells(); getVolumeFieldsFromFsGrid(volGrid, technicalGrid, mpiGrid, cells); phiprof::stop("getVolumeFieldsFromFsGrid"); phiprof::stop("Propagate Fields",cells.size(),"SpatialCells"); From 36930d011dcd2136663f91cb331deecfddaed13b Mon Sep 17 00:00:00 2001 From: tkoskela Date: Wed, 24 Apr 2019 12:00:54 +0300 Subject: [PATCH 343/602] Added translation test with amr --- .../tests/transtest_amr/transtest_amr.cfg | 103 ++++++++++++++++++ 1 file changed, 103 insertions(+) create mode 100644 testpackage/tests/transtest_amr/transtest_amr.cfg diff --git a/testpackage/tests/transtest_amr/transtest_amr.cfg b/testpackage/tests/transtest_amr/transtest_amr.cfg new file mode 100644 index 000000000..9866cbfdf --- /dev/null +++ b/testpackage/tests/transtest_amr/transtest_amr.cfg @@ -0,0 +1,103 @@ +dynamic_timestep = 1 +project = testAmr +ParticlePopulations = proton +propagate_field = 0 +propagate_vlasov_acceleration = 0 +propagate_vlasov_translation = 1 + +[proton_properties] +mass = 1 +mass_units = PROTON +charge = 1 + +[io] +diagnostic_write_interval = 180.0 +write_initial_state = 1 + +system_write_t_interval = 0.01 +system_write_file_name = fullf +system_write_distribution_stride = 1 +system_write_distribution_xline_stride = 0 +system_write_distribution_yline_stride = 0 +system_write_distribution_zline_stride = 0 + +[AMR] +max_spatial_level = 2 +box_half_width_x = 1 +box_half_width_y = 1 +box_half_width_z = 1 +box_center_x = 1.0e6 +box_center_y = 1.0e6 +box_center_z = 1.0e6 + +[gridbuilder] +x_length = 8 +y_length = 8 +z_length = 8 +x_min = -1.0e6 +x_max = 1.0e6 +y_min = -1.0e6 +y_max = 1.0e6 +z_min = -1.0e6 +z_max = 1.0e6 +t_max = 181.0 + +[proton_vspace] +vx_min = -2.0e6 +vx_max = +2.0e6 +vy_min = -2.0e6 +vy_max = +2.0e6 +vz_min = -2.0e6 +vz_max = +2.0e6 +vx_length = 1 +vy_length = 1 +vz_length = 1 +max_refinement_level = 1 +[proton_sparse] +minValue = 1.0e-16 + +[boundaries] +periodic_x = yes +periodic_y = yes +periodic_z = yes + +[variables] +output = populations_Rho +output = B +output = Pressure +output = populations_V +output = E +output = MPIrank +output = populations_Blocks +#output = VelocitySubSteps + +diagnostic = populations_Blocks +#diagnostic = Pressure +#diagnostic = populations_Rho +#diagnostic = populations_RhoLossAdjust +#diagnostic = populations_RhoLossVelBoundary + +[testAmr] +#magnitude of 1.82206867e-10 gives a period of 360s, useful for testing... +Bx = 1.2e-10 +By = 0.8e-10 +Bz = 1.1135233442526334e-10 +magXPertAbsAmp = 0 +magYPertAbsAmp = 0 +magZPertAbsAmp = 0 +densityModel = uniform +nVelocitySamples = 3 + +[proton_testAmr] +n = 1 +Vx = 5e5 +Vy = 5e5 +Vz = 0.0 +Tx = 500000.0 +Ty = 500000.0 +Tz = 500000.0 +rho = 1.0e6 +rhoPertAbsAmp = 1.0e5 + +[loadBalance] +algorithm = RCB \ No newline at end of file From e67341364ab0dbefa8ba4e58f184414263bea4e0 Mon Sep 17 00:00:00 2001 From: Sebastian von Alfthan Date: Wed, 24 Apr 2019 12:02:34 +0300 Subject: [PATCH 344/602] Added first new optimized coupling --- fieldsolver/gridGlue.cpp | 246 ++++++++++++++++++++++++++++++++------- 1 file changed, 205 insertions(+), 41 deletions(-) diff --git a/fieldsolver/gridGlue.cpp b/fieldsolver/gridGlue.cpp index 80f4053dd..50d33e761 100644 --- a/fieldsolver/gridGlue.cpp +++ b/fieldsolver/gridGlue.cpp @@ -24,57 +24,221 @@ int getNumberOfCellsOnMaxRefLvl(dccrg::Dccrg FSGRID + + onDccrgMapRemoteProcess maps fsgrid processes (key) => set of dccrg cellIDs owned by current rank that map to the fsgrid cells owned by fsgrid process (val) + + onFsgridMapRemoteProcess maps dccrg processes (key) => set of dccrg cellIDs owned by dccrg-process that map to current rank fsgrid cells + onFsgridMapCells maps remote dccrg CellIDs to local fsgrid cells +*/ + +template void computeCoupling(dccrg::Dccrg& mpiGrid, + const std::vector& cells, + FsGrid< T, stencil>& momentsGrid, + std::map >& onDccrgMapRemoteProcess, + std::map >& onFsgridMapRemoteProcess, + std::map >& onFsgridMapCells + ) { + + //sorted list of dccrg cells. cells is typicall already sorted, but just to make sure.... + std::vector dccrgCells = cells; + std::sort(dccrgCells.begin(), dccrgCells.end()); + + //make sure the datastructures are clean + onDccrgMapRemoteProcess.clear(); + onFsgridMapRemoteProcess.clear(); + onFsgridMapCells.clear(); + + + //size of fsgrid local part + const std::array gridDims(momentsGrid.getLocalSize()); + + + //Compute what we will receive, and where it should be stored + for (int k=0; k globalIndices = momentsGrid.getGlobalIndices(i,j,k); + const dccrg::Types<3>::indices_t indices = {{(uint64_t)globalIndices[0], + (uint64_t)globalIndices[1], + (uint64_t)globalIndices[2]}}; //cast to avoid warnings + CellID dccrgCell = mpiGrid.get_existing_cell(indices, 0, mpiGrid.mapping.get_maximum_refinement_level()); + int process = mpiGrid.get_process(dccrgCell); + int64_t fsgridLid = momentsGrid.LocalIDForCoords(i,j,k); + int64_t fsgridGid = momentsGrid.GlobalIDForCoords(i,j,k); + onFsgridMapRemoteProcess[process].insert(dccrgCell); //cells are ordered (sorted) in set + onFsgridMapCells[dccrgCell].push_back(fsgridLid); + } + } + } + + // Compute where to send data and what to send + for(int i=0; i< dccrgCells.size(); i++) { + //compute to which processes this cell maps + std::vector fsCells = mapDccrgIdToFsGridGlobalID(mpiGrid, dccrgCells[i]); + + //loop over fsgrid cells which this dccrg cell maps to + for (auto const &fsCellID : fsCells) { + int process = momentsGrid.getTaskForGlobalID(fsCellID).first; //process on fsgrid + onDccrgMapRemoteProcess[process].insert(dccrgCells[i]); //add to map + } + + } + + //debug + // int rank, nProcs; + // int dRank=1; + // MPI_Comm_rank(MPI_COMM_WORLD, &rank); + // MPI_Comm_size(MPI_COMM_WORLD, &nProcs); + + // if(rank==dRank){ + // for ( auto const &msg: onDccrgMapRemoteProcess) { + // printf("SND %d => %d :\n", rank, msg.first); + // for ( auto const &id: msg.second) { + // printf(" %ld ", id); + // } + // printf("\n"); + // } + // } + // MPI_Barrier(MPI_COMM_WORLD); + // for(int r = 0; r < nProcs; r++){ + // if(rank == r){ + // for ( auto const &msg: onFsgridMapRemoteProcess) { + // if (msg.first == dRank) { + // printf("RCV %d => %d :\n", msg.first, rank); + // for ( auto const &id: msg.second) { + // printf(" %ld ", id); + // } + // printf("\n"); + // } + // } + // } + // MPI_Barrier(MPI_COMM_WORLD); + // } + +} void feedMomentsIntoFsGrid(dccrg::Dccrg& mpiGrid, const std::vector& cells, FsGrid< std::array, 2>& momentsGrid, bool dt2 /*=false*/) { - int nCells = getNumberOfCellsOnMaxRefLvl(mpiGrid, cells); - momentsGrid.setupForTransferIn(nCells); - std::vector< std::array > transferBuffer(cells.size()); - - // Fill from cellParams -#pragma omp parallel for - for(uint i = 0; i < cells.size(); ++i) { - CellID dccrgId = cells[i]; - auto cellParams = mpiGrid[dccrgId]->get_cell_parameters(); - - std::array* thisCellData = &transferBuffer[i]; - - if(dt2) { - thisCellData->at(fsgrids::moments::RHOM) = cellParams[CellParams::RHOM_DT2]; - thisCellData->at(fsgrids::moments::RHOQ) = cellParams[CellParams::RHOQ_DT2]; - thisCellData->at(fsgrids::moments::VX) = cellParams[CellParams::VX_DT2]; - thisCellData->at(fsgrids::moments::VY) = cellParams[CellParams::VY_DT2]; - thisCellData->at(fsgrids::moments::VZ) = cellParams[CellParams::VZ_DT2]; - thisCellData->at(fsgrids::moments::P_11) = cellParams[CellParams::P_11_DT2]; - thisCellData->at(fsgrids::moments::P_22) = cellParams[CellParams::P_22_DT2]; - thisCellData->at(fsgrids::moments::P_33) = cellParams[CellParams::P_33_DT2]; + int ii; + //sorted list of dccrg cells. cells is typicall already sorted, but just to make sure.... + std::vector dccrgCells = cells; + std::sort(dccrgCells.begin(), dccrgCells.end()); + + //Datastructure for coupling + std::map > onDccrgMapRemoteProcess; + std::map > onFsgridMapRemoteProcess; + std::map > onFsgridMapCells; + + // map receive process => receive buffers + std::map > receivedData; + + // send buffers to each process + std::map > sendData; + + //list of requests + std::vector sendRequests; + std::vector receiveRequests; + + + //DEBUG IN + // for(int i = 0;i < cells.size();i++){ + // auto cellParams = mpiGrid[cells[i]]->get_cell_parameters(); + // if(!dt2) + // printf("IN %ld: %g %g , %g %g %g \n", cells[i],cellParams[CellParams::RHOM],cellParams[CellParams::RHOQ],cellParams[CellParams::VX],cellParams[CellParams::VY], cellParams[CellParams::VZ]); + // else + // printf("IN %ld: %g %g , %g %g %g \n", cells[i],cellParams[CellParams::RHOM_DT2],cellParams[CellParams::RHOQ_DT2],cellParams[CellParams::VX_DT2],cellParams[CellParams::VY_DT2], cellParams[CellParams::VZ_DT2]); + // } + + //computeCoupling + computeCoupling(mpiGrid, cells, momentsGrid, onDccrgMapRemoteProcess, onFsgridMapRemoteProcess, onFsgridMapCells); + + // Post receives + receiveRequests.resize(onFsgridMapRemoteProcess.size()); + ii=0; + for(auto const &receives: onFsgridMapRemoteProcess){ + int process = receives.first; + int count = receives.second.size(); + receivedData[process].resize(count * fsgrids::moments::N_MOMENTS); + MPI_Irecv(receivedData[process].data(), count * fsgrids::moments::N_MOMENTS * sizeof(Real), + MPI_BYTE, process, 1, MPI_COMM_WORLD,&(receiveRequests[ii++])); + } + + // Launch sends + ii=0; + sendRequests.resize(onDccrgMapRemoteProcess.size()); + for (auto const &snd : onDccrgMapRemoteProcess){ + int targetProc = snd.first; + auto& sendBuffer=sendData[targetProc]; + for(CellID sendCell: snd.second){ + //Collect data to send for this dccrg cell + auto cellParams = mpiGrid[sendCell]->get_cell_parameters(); + if(!dt2) { + sendBuffer.push_back(cellParams[CellParams::RHOM]); + sendBuffer.push_back(cellParams[CellParams::RHOQ]); + sendBuffer.push_back(cellParams[CellParams::VX]); + sendBuffer.push_back(cellParams[CellParams::VY]); + sendBuffer.push_back(cellParams[CellParams::VZ]); + sendBuffer.push_back(cellParams[CellParams::P_11]); + sendBuffer.push_back(cellParams[CellParams::P_22]); + sendBuffer.push_back(cellParams[CellParams::P_33]); } else { - thisCellData->at(fsgrids::moments::RHOM) = cellParams[CellParams::RHOM]; - thisCellData->at(fsgrids::moments::RHOQ) = cellParams[CellParams::RHOQ]; - thisCellData->at(fsgrids::moments::VX) = cellParams[CellParams::VX]; - thisCellData->at(fsgrids::moments::VY) = cellParams[CellParams::VY]; - thisCellData->at(fsgrids::moments::VZ) = cellParams[CellParams::VZ]; - thisCellData->at(fsgrids::moments::P_11) = cellParams[CellParams::P_11]; - thisCellData->at(fsgrids::moments::P_22) = cellParams[CellParams::P_22]; - thisCellData->at(fsgrids::moments::P_33) = cellParams[CellParams::P_33]; + sendBuffer.push_back(cellParams[CellParams::RHOM_DT2]); + sendBuffer.push_back(cellParams[CellParams::RHOQ_DT2]); + sendBuffer.push_back(cellParams[CellParams::VX_DT2]); + sendBuffer.push_back(cellParams[CellParams::VY_DT2]); + sendBuffer.push_back(cellParams[CellParams::VZ_DT2]); + sendBuffer.push_back(cellParams[CellParams::P_11_DT2]); + sendBuffer.push_back(cellParams[CellParams::P_22_DT2]); + sendBuffer.push_back(cellParams[CellParams::P_33_DT2]); } - } - - - - for (uint i = 0;i < cells.size(); ++i) { - CellID dccrgId = cells[i]; - const auto fsgridIds = mapDccrgIdToFsGridGlobalID(mpiGrid, dccrgId); - for (auto fsgridId : fsgridIds) { - momentsGrid.transferDataIn(fsgridId, &transferBuffer[i]); + } + int count = sendBuffer.size(); //note, compared to receive this includes all elements to be sent + MPI_Isend(sendBuffer.data(), sendBuffer.size() * sizeof(Real), + MPI_BYTE, targetProc, 1, MPI_COMM_WORLD,&(sendRequests[ii])); + ii++; + } + + + MPI_Waitall(receiveRequests.size(), receiveRequests.data(), MPI_STATUSES_IGNORE); + for(auto const &receives: onFsgridMapRemoteProcess){ + int process = receives.first; //data received from this process + Real* receiveBuffer = receivedData[process].data(); // data received from process + for(auto const &cell: receives.second){ //loop over cellids (dccrg) for receive + // this part heavily relies on both sender and receiver having cellids sorted! + for(auto lid: onFsgridMapCells[cell]){ + std::array * fsgridData = momentsGrid.get(lid); + for(int l = 0; l < fsgrids::moments::N_MOMENTS; l++) { + fsgridData->at(l) = receiveBuffer[l]; + } } - } + + receiveBuffer+=fsgrids::moments::N_MOMENTS; + } + } + + MPI_Waitall(sendRequests.size(), sendRequests.data(), MPI_STATUSES_IGNORE); + + //DEBUG OUT + //size of fsgrid local part + // const std::array gridDims(momentsGrid.getLocalSize()); + //Compute what we will receive, and where it should be stored + // for (int k=0; k * fsgridData = momentsGrid.get(fsgridLid); + // printf("OUT %ld (+1): %g %g , %g %g %g \n", fsgridGid + 1, + // fsgridData->at(0), fsgridData->at(1), fsgridData->at(2), fsgridData->at(3),fsgridData->at(4)); + // } + // } + // } + // MPI_Barrier(MPI_COMM_WORLD); - // Finish the actual transfer - momentsGrid.finishTransfersIn(); } From ad9dd6c1a90795016d90d4da7e6ccef4a19db469 Mon Sep 17 00:00:00 2001 From: ykempf Date: Wed, 24 Apr 2019 14:26:30 +0300 Subject: [PATCH 345/602] Re-deactivated the timed barriers. --- vlasiator.cpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/vlasiator.cpp b/vlasiator.cpp index 61321cbec..a959a8b5b 100644 --- a/vlasiator.cpp +++ b/vlasiator.cpp @@ -82,10 +82,10 @@ bool globalflags::balanceLoad = 0; ObjectWrapper objectWrapper; void addTimedBarrier(string name){ -//#ifdef NDEBUG -// //let's not do a barrier -// return; -//#endif +#ifdef NDEBUG +//let's not do a barrier + return; +#endif int bt=phiprof::initializeTimer(name,"Barriers","MPI"); phiprof::start(bt); MPI_Barrier(MPI_COMM_WORLD); From 6b9403b97f4893308e115d693676e02bf707392b Mon Sep 17 00:00:00 2001 From: ykempf Date: Wed, 24 Apr 2019 14:36:24 +0300 Subject: [PATCH 346/602] Fixed fsgrid version in Makefile. --- MAKE/Makefile.sisu_gcc | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/MAKE/Makefile.sisu_gcc b/MAKE/Makefile.sisu_gcc index 1fe1d172e..7aedb3bc9 100644 --- a/MAKE/Makefile.sisu_gcc +++ b/MAKE/Makefile.sisu_gcc @@ -84,7 +84,6 @@ INC_PROFILE = -I$(LIBRARY_PREFIX)/mpich2/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_V INC_EIGEN = -I$(LIBRARY_PREFIX)/eigen/ INC_DCCRG = -I$(LIBRARY_PREFIX)/dccrg_new_neighbours/ INC_VECTORCLASS = -I$(LIBRARY_PREFIX)/vectorclass -#INC_FSGRID = -I$(LIBRARY_PREFIX)/fsgrid -INC_FSGRID = -I/homeappl/home/kempf/fsgrid/ +INC_FSGRID = -I$(LIBRARY_PREFIX)/fsgrid INC_DCCRG = -I/homeappl/home/koskelat/lib/dccrg/ From aa1e09c7018687cf72c460830a22fffcdc938c5e Mon Sep 17 00:00:00 2001 From: ykempf Date: Wed, 24 Apr 2019 14:50:58 +0300 Subject: [PATCH 347/602] Removed extra INC_DCCRG. --- MAKE/Makefile.sisu_gcc | 2 -- 1 file changed, 2 deletions(-) diff --git a/MAKE/Makefile.sisu_gcc b/MAKE/Makefile.sisu_gcc index 7aedb3bc9..bbfc2c8c5 100644 --- a/MAKE/Makefile.sisu_gcc +++ b/MAKE/Makefile.sisu_gcc @@ -85,5 +85,3 @@ INC_EIGEN = -I$(LIBRARY_PREFIX)/eigen/ INC_DCCRG = -I$(LIBRARY_PREFIX)/dccrg_new_neighbours/ INC_VECTORCLASS = -I$(LIBRARY_PREFIX)/vectorclass INC_FSGRID = -I$(LIBRARY_PREFIX)/fsgrid -INC_DCCRG = -I/homeappl/home/koskelat/lib/dccrg/ - From f8223bbee9fe175b1d412e328ae3d076759a5635 Mon Sep 17 00:00:00 2001 From: ykempf Date: Thu, 25 Apr 2019 09:25:56 +0300 Subject: [PATCH 348/602] Removed threading in setBackgroundField as unsafe. --- backgroundfield/backgroundfield.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backgroundfield/backgroundfield.cpp b/backgroundfield/backgroundfield.cpp index 3e1e2d5b4..1adf3f71c 100644 --- a/backgroundfield/backgroundfield.cpp +++ b/backgroundfield/backgroundfield.cpp @@ -59,7 +59,7 @@ void setBackgroundField( auto localSize = BgBGrid.getLocalSize(); - #pragma omp parallel for collapse(3) + // Do not thread this blindly, the bgFunction.set* calls below are not thread-safe at the moment. for (int x = 0; x < localSize[0]; ++x) { for (int y = 0; y < localSize[1]; ++y) { for (int z = 0; z < localSize[2]; ++z) { From b8602a3562633d8d27041b59480d811daaf9c9d1 Mon Sep 17 00:00:00 2001 From: Sebastian von Alfthan Date: Thu, 25 Apr 2019 09:43:29 +0300 Subject: [PATCH 349/602] Work in progress, volume field communications --- fieldsolver/gridGlue.cpp | 148 +++++++++++++++++++++++++++++++-------- 1 file changed, 120 insertions(+), 28 deletions(-) diff --git a/fieldsolver/gridGlue.cpp b/fieldsolver/gridGlue.cpp index 5b34e5fd7..671ff02cc 100644 --- a/fieldsolver/gridGlue.cpp +++ b/fieldsolver/gridGlue.cpp @@ -141,17 +141,7 @@ void feedMomentsIntoFsGrid(dccrg::Dccrg& //list of requests std::vector sendRequests; std::vector receiveRequests; - - - //DEBUG IN - // for(int i = 0;i < cells.size();i++){ - // auto cellParams = mpiGrid[cells[i]]->get_cell_parameters(); - // if(!dt2) - // printf("IN %ld: %g %g , %g %g %g \n", cells[i],cellParams[CellParams::RHOM],cellParams[CellParams::RHOQ],cellParams[CellParams::VX],cellParams[CellParams::VY], cellParams[CellParams::VZ]); - // else - // printf("IN %ld: %g %g , %g %g %g \n", cells[i],cellParams[CellParams::RHOM_DT2],cellParams[CellParams::RHOQ_DT2],cellParams[CellParams::VX_DT2],cellParams[CellParams::VY_DT2], cellParams[CellParams::VZ_DT2]); - // } - + //computeCoupling computeCoupling(mpiGrid, cells, momentsGrid, onDccrgMapRemoteProcess, onFsgridMapRemoteProcess, onFsgridMapCells); @@ -221,27 +211,129 @@ void feedMomentsIntoFsGrid(dccrg::Dccrg& MPI_Waitall(sendRequests.size(), sendRequests.data(), MPI_STATUSES_IGNORE); - //DEBUG OUT - //size of fsgrid local part - // const std::array gridDims(momentsGrid.getLocalSize()); - //Compute what we will receive, and where it should be stored - // for (int k=0; k * fsgridData = momentsGrid.get(fsgridLid); - // printf("OUT %ld (+1): %g %g , %g %g %g \n", fsgridGid + 1, - // fsgridData->at(0), fsgridData->at(1), fsgridData->at(2), fsgridData->at(3),fsgridData->at(4)); - // } - // } - // } - // MPI_Barrier(MPI_COMM_WORLD); +} + +void getVolumeFieldsFromFsGrid(FsGrid< std::array, 2>& volumeFieldsGrid, + dccrg::Dccrg& mpiGrid, + const std::vector& cells) { + + const int fieldsToCommunicate = 12; + struct Average { + Real sums[fieldsToCommunicate]; + int cells; + } + + + int ii; + //sorted list of dccrg cells. cells is typicall already sorted, but just to make sure.... + std::vector dccrgCells = cells; + std::sort(dccrgCells.begin(), dccrgCells.end()); + + //Datastructure for coupling + std::map > onDccrgMapRemoteProcess; + std::map > onFsgridMapRemoteProcess; + std::map > onFsgridMapCells; + + // map receive process => receive buffers + std::map > receivedData; + + // send buffers to each process + std::map > sendData; + + //list of requests + std::vector sendRequests; + std::vector receiveRequests; + + + //computeCoupling + computeCoupling(mpiGrid, cells, momentsGrid, onDccrgMapRemoteProcess, onFsgridMapRemoteProcess, onFsgridMapCells); + + //post receives + ii=0; + receiveRequests.resize(onDccrgMapRemoteProcess.size()); + for (auto const &rcv : onDccrgMapRemoteProcess){ + int remoteRank = rcv.first; + int count = rcv.second.size(); + auto& receiveBuffer=receiveData[remoteRank]; + + receiveBuffer.resize(count); + MPI_Irecv(receiveBuffer.data(), count * sizeof(Average), + MPI_BYTE, remoteRank, 1, MPI_COMM_WORLD,&(receiveRequests[ii++])); + } + + + /* + onDccrgMapRemoteProcess maps fsgrid processes (key) => set of dccrg cellIDs owned by current rank that map to the fsgrid cells owned by fsgrid process (val) + onFsgridMapRemoteProcess maps dccrg processes (key) => set of dccrg cellIDs owned by dccrg-process that map to current rank fsgrid cells + onFsgridMapCells maps remote dccrg CellIDs to local fsgrid cells + */ + + //compute average and weight for each field that we want to send to dccrg grid + for(auto const &snd: onFsgridMapRemoteProcess){ + int remoteRank = snd.first; + int count = snd.second.size(); + auto& sendBuffer = sendData[remoteRank]; + sendBuffer.resize(count); + int ii=0; + + for(auto const dccrgCell: snd.second){ + //loop over dccrg cells to which we shall send data for this remoteRank + auto const &fsgridCells = onFsgridMapCells[dccrgCell]; + for (auto const fsgridCell: fsgridCells){ + //loop over fsgrid cells for which we compute the average that is sent to dccrgCell on rank remoteRank + sendBuffer[ii].sum[0] + = //work in progress, start to add stuff here; + sendBuffer[ii].cells++; + + + /* + iCellParams.push_back(std::make_pair(CellParams::PERBXVOL, fsgrids::volfields::PERBXVOL)); + iCellParams.push_back(std::make_pair(CellParams::PERBYVOL, fsgrids::volfields::PERBYVOL)); + iCellParams.push_back(std::make_pair(CellParams::PERBZVOL, fsgrids::volfields::PERBZVOL)); + iCellParams.push_back(std::make_pair(CellParams::EXVOL, fsgrids::volfields::EXVOL)); + iCellParams.push_back(std::make_pair(CellParams::EYVOL, fsgrids::volfields::EYVOL)); + iCellParams.push_back(std::make_pair(CellParams::EZVOL, fsgrids::volfields::EZVOL)); + + // Build lists of index pairs to dccrg and fsgrid + std::vector> iDerivativesBVOL; + iDerivativesBVOL.reserve(6); + iDerivativesBVOL.push_back(std::make_pair(bvolderivatives::dPERBXVOLdy, fsgrids::volfields::dPERBXVOLdy)); + iDerivativesBVOL.push_back(std::make_pair(bvolderivatives::dPERBXVOLdz, fsgrids::volfields::dPERBXVOLdz)); + iDerivativesBVOL.push_back(std::make_pair(bvolderivatives::dPERBYVOLdx, fsgrids::volfields::dPERBYVOLdx)); + iDerivativesBVOL.push_back(std::make_pair(bvolderivatives::dPERBYVOLdz, fsgrids::volfields::dPERBYVOLdz)); + iDerivativesBVOL.push_back(std::make_pair(bvolderivatives::dPERBZVOLdx, fsgrids::volfields::dPERBZVOLdx)); + iDerivativesBVOL.push_back(std::make_pair(bvolderivatives::dPERBZVOLdy, fsgrids::volfields::dPERBZVOLdy)); + */ + } + ii+=fieldsToCommunicate; + + } + receivedData[process].resize(count * fsgrids::moments::N_MOMENTS); + MPI_Irecv(receivedData[process].data(), count * fsgrids::moments::N_MOMENTS * sizeof(Real), + MPI_BYTE, process, 1, MPI_COMM_WORLD,&(receiveRequests[ii++])); + } + + + //post sends + sendRequests.resize(onFsgridMapRemoteProcess.size()); + ii=0; + + for(auto const &sends: onFsgridMapRemoteProcess){ + int process = sends.first; + int count = sends.second.size(); + senddData[process].resize(count * fsgrids::moments::N_MOMENTS); + MPI_Irecv(senddData[process].data(), count * fsgrids::moments::N_MOMENTS * sizeof(Real), + MPI_BYTE, process, 1, MPI_COMM_WORLD,&(sendRequests[ii++])); + } + + + //handle receives, compute the weighted average of these } -void getVolumeFieldsFromFsGrid( + + +void getVolumeFieldsFromFsGridOld( FsGrid< std::array, 2>& volumeFieldsGrid, FsGrid< fsgrids::technical, 2>& technicalGrid, dccrg::Dccrg& mpiGrid, From 824f4c5646dc3037a8058525b058e0212d3a06fa Mon Sep 17 00:00:00 2001 From: ykempf Date: Thu, 25 Apr 2019 11:43:08 +0300 Subject: [PATCH 350/602] Added sends to code in progress, no receive handling yet. --- fieldsolver/gridGlue.cpp | 80 +++++++++++++++++++++------------------- 1 file changed, 42 insertions(+), 38 deletions(-) diff --git a/fieldsolver/gridGlue.cpp b/fieldsolver/gridGlue.cpp index 671ff02cc..1ca8bee9b 100644 --- a/fieldsolver/gridGlue.cpp +++ b/fieldsolver/gridGlue.cpp @@ -213,9 +213,12 @@ void feedMomentsIntoFsGrid(dccrg::Dccrg& } -void getVolumeFieldsFromFsGrid(FsGrid< std::array, 2>& volumeFieldsGrid, - dccrg::Dccrg& mpiGrid, - const std::vector& cells) { +void getVolumeFieldsFromFsGrid( + FsGrid< std::array, 2>& volumeFieldsGrid, + FsGrid< fsgrids::technical, 2>& technicalGrid, + dccrg::Dccrg& mpiGrid, + const std::vector& cells +) { const int fieldsToCommunicate = 12; struct Average { @@ -254,7 +257,7 @@ void getVolumeFieldsFromFsGrid(FsGrid< std::array> iDerivativesBVOL; - iDerivativesBVOL.reserve(6); - iDerivativesBVOL.push_back(std::make_pair(bvolderivatives::dPERBXVOLdy, fsgrids::volfields::dPERBXVOLdy)); - iDerivativesBVOL.push_back(std::make_pair(bvolderivatives::dPERBXVOLdz, fsgrids::volfields::dPERBXVOLdz)); - iDerivativesBVOL.push_back(std::make_pair(bvolderivatives::dPERBYVOLdx, fsgrids::volfields::dPERBYVOLdx)); - iDerivativesBVOL.push_back(std::make_pair(bvolderivatives::dPERBYVOLdz, fsgrids::volfields::dPERBYVOLdz)); - iDerivativesBVOL.push_back(std::make_pair(bvolderivatives::dPERBZVOLdx, fsgrids::volfields::dPERBZVOLdx)); - iDerivativesBVOL.push_back(std::make_pair(bvolderivatives::dPERBZVOLdy, fsgrids::volfields::dPERBZVOLdy)); - */ + //loop over fsgrid cells for which we compute the average that is sent to dccrgCell on rank remoteRank + if(technicalGrid.get(fsgridCell)->sysBoundaryFlag == sysboundarytype::DO_NOT_COMPUTE) { + continue; + } + std::array * cell = volumeFieldsGrid.get(fsgridCell); + + sendBuffer[ii].sums[0 ] += cell->at(fsgrids::volfields::PERBXVOL); + sendBuffer[ii].sums[1 ] += cell->at(fsgrids::volfields::PERBYVOL); + sendBuffer[ii].sums[2 ] += cell->at(fsgrids::volfields::PERBZVOL); + sendBuffer[ii].sums[3 ] += cell->at(fsgrids::volfields::EXVOL); + sendBuffer[ii].sums[4 ] += cell->at(fsgrids::volfields::EYVOL); + sendBuffer[ii].sums[5 ] += cell->at(fsgrids::volfields::EZVOL); + sendBuffer[ii].sums[6 ] += cell->at(fsgrids::volfields::dPERBXVOLdy); + sendBuffer[ii].sums[7 ] += cell->at(fsgrids::volfields::dPERBXVOLdz); + sendBuffer[ii].sums[8 ] += cell->at(fsgrids::volfields::dPERBXVOLdx); + sendBuffer[ii].sums[9 ] += cell->at(fsgrids::volfields::dPERBXVOLdz); + sendBuffer[ii].sums[10] += cell->at(fsgrids::volfields::dPERBXVOLdx); + sendBuffer[ii].sums[11] += cell->at(fsgrids::volfields::dPERBXVOLdy); + sendBuffer[ii].cells++; } - ii+=fieldsToCommunicate; + ii++; } - receivedData[process].resize(count * fsgrids::moments::N_MOMENTS); - MPI_Irecv(receivedData[process].data(), count * fsgrids::moments::N_MOMENTS * sizeof(Real), - MPI_BYTE, process, 1, MPI_COMM_WORLD,&(receiveRequests[ii++])); + } - //post sends - sendRequests.resize(onFsgridMapRemoteProcess.size()); + sendRequests.resize(onFsgridMapRemoteProcess.size()); ii=0; for(auto const &sends: onFsgridMapRemoteProcess){ int process = sends.first; int count = sends.second.size(); - senddData[process].resize(count * fsgrids::moments::N_MOMENTS); - MPI_Irecv(senddData[process].data(), count * fsgrids::moments::N_MOMENTS * sizeof(Real), + senddData[process].resize(count * sizeof(Average)); + MPI_Irecv(senddData[process].data(), count * sizeof(Average), MPI_BYTE, process, 1, MPI_COMM_WORLD,&(sendRequests[ii++])); } - - + + MPI_Waitall(receiveRequests.size(), receiveRequests.data(), MPI_STATUSES_IGNORE); + //handle receives, compute the weighted average of these - + + MPI_Waitall(sendRequests.size(), sendRequests.data(), MPI_STATUSES_IGNORE); + } From 1f295adda74f84593922f678c9c356f9fa9a7a1a Mon Sep 17 00:00:00 2001 From: Tuomas Koskela Date: Thu, 25 Apr 2019 11:48:49 +0300 Subject: [PATCH 351/602] Updated testpackage amr tests --- projects/testAmr/testAmr.cpp | 10 +++++++++- testpackage/small_test_definitions.sh | 15 +++++++++++++++ .../tests/Flowthrough_amr/Flowthrough_amr.cfg | 18 +++++++++--------- .../tests/transtest_amr/transtest_amr.cfg | 4 ++-- 4 files changed, 35 insertions(+), 12 deletions(-) diff --git a/projects/testAmr/testAmr.cpp b/projects/testAmr/testAmr.cpp index 4da00888e..93a6e6830 100644 --- a/projects/testAmr/testAmr.cpp +++ b/projects/testAmr/testAmr.cpp @@ -292,13 +292,16 @@ namespace projects { CellID myCell = mpiGrid.get_existing_cell(xyz); if (mpiGrid.refine_completely_at(xyz)) { +#ifndef NDEBUG std::cout << "Rank " << myRank << " is refining cell " << myCell << std::endl; +#endif } } } } std::vector refinedCells = mpiGrid.stop_refining(true); if(myRank == MASTER_RANK) std::cout << "Finished first level of refinement" << endl; +#ifndef NDEBUG if(refinedCells.size() > 0) { std::cout << "Refined cells produced by rank " << myRank << " are: "; for (auto cellid : refinedCells) { @@ -306,6 +309,7 @@ namespace projects { } std::cout << endl; } +#endif mpiGrid.balance_load(); @@ -322,7 +326,9 @@ namespace projects { CellID myCell = mpiGrid.get_existing_cell(xyz); if (mpiGrid.refine_completely_at(xyz)) { +#ifndef NDEBUG std::cout << "Rank " << myRank << " is refining cell " << myCell << std::endl; +#endif } } } @@ -330,13 +336,15 @@ namespace projects { std::vector refinedCells = mpiGrid.stop_refining(true); if(myRank == MASTER_RANK) std::cout << "Finished second level of refinement" << endl; +#ifndef NDEBUG if(refinedCells.size() > 0) { std::cout << "Refined cells produced by rank " << myRank << " are: "; for (auto cellid : refinedCells) { std::cout << cellid << " "; } std::cout << endl; - } + } +#endif mpiGrid.balance_load(); } diff --git a/testpackage/small_test_definitions.sh b/testpackage/small_test_definitions.sh index b042b4924..8e5fb1882 100644 --- a/testpackage/small_test_definitions.sh +++ b/testpackage/small_test_definitions.sh @@ -124,3 +124,18 @@ comparison_vlsv[15]="fullf.0000001.vlsv" comparison_phiprof[15]="phiprof_0.txt" variable_names[15]="proton/rho proton/V proton/V proton/V B B B E E E protons" variable_components[15]="0 0 1 2 0 1 2 0 1 2" + +##AMR tests +# translation test +test_name[16]="transtest_amr" +comparison_vlsv[3]="fullf.0000001.vlsv" +comparison_phiprof[3]="phiprof_0.txt" +variable_names[3]="proton/rho proton/V proton/V proton/V protons" +variable_components[3]="0 0 1 2" + +# Flowthrough test +test_name[17]="Flowthrough_amr" +comparison_vlsv[12]="bulk.0000001.vlsv" +comparison_phiprof[12]="phiprof_0.txt" +variable_names[12]="proton/rho proton/V proton/V proton/V B B B E E E" +variable_components[12]="0 0 1 2 0 1 2 0 1 2" diff --git a/testpackage/tests/Flowthrough_amr/Flowthrough_amr.cfg b/testpackage/tests/Flowthrough_amr/Flowthrough_amr.cfg index c471090e1..ede3b7913 100644 --- a/testpackage/tests/Flowthrough_amr/Flowthrough_amr.cfg +++ b/testpackage/tests/Flowthrough_amr/Flowthrough_amr.cfg @@ -13,21 +13,21 @@ charge = 1 [AMR] max_spatial_level = 2 -box_half_width_x = 2 -box_half_width_z = 2 -box_half_width_y = 2 +box_half_width_x = 1 +box_half_width_z = 1 +box_half_width_y = 1 [gridbuilder] -x_length = 16 +x_length = 14 y_length = 8 z_length = 8 -x_min = -8e7 -x_max = 8e7 +x_min = -7e7 +x_max = 7e7 y_min = -4e7 y_max = 4e7 z_min = -4e7 z_max = 4e7 -t_max = 162.0 +t_max = 182.0 dt = 2.0 [proton_vspace] @@ -44,7 +44,7 @@ vz_length = 15 [io] write_initial_state = 1 -system_write_t_interval = 160.0 +system_write_t_interval = 180.0 system_write_file_name = bulk system_write_distribution_stride = 0 system_write_distribution_xline_stride = 0 @@ -101,4 +101,4 @@ nSpaceSamples = 2 nVelocitySamples = 2 [loadBalance] -algorithm = RANDOM +algorithm = RCB diff --git a/testpackage/tests/transtest_amr/transtest_amr.cfg b/testpackage/tests/transtest_amr/transtest_amr.cfg index 9866cbfdf..6e53193c0 100644 --- a/testpackage/tests/transtest_amr/transtest_amr.cfg +++ b/testpackage/tests/transtest_amr/transtest_amr.cfg @@ -11,10 +11,10 @@ mass_units = PROTON charge = 1 [io] -diagnostic_write_interval = 180.0 +diagnostic_write_interval = 1 write_initial_state = 1 -system_write_t_interval = 0.01 +system_write_t_interval = 180.0 system_write_file_name = fullf system_write_distribution_stride = 1 system_write_distribution_xline_stride = 0 From e6b7b6559bce2ae46a091a04cb2b682289b56e3f Mon Sep 17 00:00:00 2001 From: Markus Battarbee Date: Thu, 25 Apr 2019 16:31:51 +0300 Subject: [PATCH 352/602] started work on vector potential dipole field, still need to implement derivatives --- Makefile | 5 +- backgroundfield/vectordipole.cpp | 184 +++++++++++++++++++++++ backgroundfield/vectordipole.hpp | 49 ++++++ projects/Magnetosphere/Magnetosphere.cpp | 28 +++- projects/Magnetosphere/Magnetosphere.h | 5 + 5 files changed, 269 insertions(+), 2 deletions(-) create mode 100644 backgroundfield/vectordipole.cpp create mode 100644 backgroundfield/vectordipole.hpp diff --git a/Makefile b/Makefile index 1625c7f16..1e1975331 100644 --- a/Makefile +++ b/Makefile @@ -185,7 +185,7 @@ DEPS_VLSVMOVER_AMR = ${DEPS_CELL} vlasovsolver_amr/vlasovmover.cpp vlasovsolver_ #all objects for vlasiator -OBJS = version.o memoryallocation.o backgroundfield.o quadr.o dipole.o linedipole.o constantfield.o integratefunction.o \ +OBJS = version.o memoryallocation.o backgroundfield.o quadr.o dipole.o linedipole.o vectordipole.o constantfield.o integratefunction.o \ datareducer.o datareductionoperator.o dro_populations.o amr_refinement_criteria.o\ donotcompute.o ionosphere.o outflow.o setbyuser.o setmaxwellian.o antisymmetric.o\ sysboundary.o sysboundarycondition.o project_boundary.o particle_species.o\ @@ -251,6 +251,9 @@ dipole.o: backgroundfield/dipole.cpp backgroundfield/dipole.hpp backgroundfield/ linedipole.o: backgroundfield/linedipole.cpp backgroundfield/linedipole.hpp backgroundfield/fieldfunction.hpp backgroundfield/functions.hpp ${CMP} ${CXXFLAGS} ${FLAGS} -c backgroundfield/linedipole.cpp +vectordipole.o: backgroundfield/vectordipole.cpp backgroundfield/vectordipole.hpp backgroundfield/fieldfunction.hpp backgroundfield/functions.hpp + ${CMP} ${CXXFLAGS} ${FLAGS} -c backgroundfield/vectordipole.cpp + constantfield.o: backgroundfield/constantfield.cpp backgroundfield/constantfield.hpp backgroundfield/fieldfunction.hpp backgroundfield/functions.hpp ${CMP} ${CXXFLAGS} ${FLAGS} -c backgroundfield/constantfield.cpp diff --git a/backgroundfield/vectordipole.cpp b/backgroundfield/vectordipole.cpp new file mode 100644 index 000000000..4c5dc7688 --- /dev/null +++ b/backgroundfield/vectordipole.cpp @@ -0,0 +1,184 @@ +/* + * This file is part of Vlasiator. + * Copyright 2010-2016 Finnish Meteorological Institute + * + * For details of usage, see the COPYING file and read the "Rules of the Road" + * at http://www.physics.helsinki.fi/vlasiator/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ +/* +Background magnetic field class of Vlasiator. +*/ + +#include +#include +#include "vectordipole.hpp" +#include "../common.h" + +// tilt_angle_phi is from the z-axis in radians +// tilt_angle_theta is from the Sun-Earth-line in radians +void VectorDipole::initialize(const double moment,const double center_x, const double center_y, const double center_z, const double tilt_angle_phi=0, const double tilt_angle_theta=0, const double radius_f, const double radius_z){ + this->initialized = true; + + q[0]=-sin(tilt_angle_phi)*cos(tilt_angle_theta)*moment; + q[1]=-sin(tilt_angle_phi)*sin(tilt_angle_theta)*moment; + q[2]=-cos(tilt_angle_phi)*moment; + + center[0]=center_x; + center[1]=center_y; + center[2]=center_z; + + radius[0]=radius_f; + radius[1]=radius_z; +} + + + +double VectorDipole::call( double x, double y, double z) const +{ + const double minimumR=1e-3*physicalconstants::R_E; //The dipole field is defined to be outside of Earth, and units are in meters + if(this->initialized==false) + return 0.0; + double r[3]; + + r[0]= x-center[0]; + r[1]= y-center[1]; + r[2]= z-center[2]; + + double r2 = r[0]*r[0]+r[1]*r[1]+r[2]*r[2]; + + if(r2=radius[1]*radius[1]) + return 0.0; //set zero field and derivatives outside zero radius + + /* This function is called from within other calls, one component at a time. + The component in question is defined using the _fComponent index. */ + + const double r1 = sqrt(r2); + const double r5 = (r2*r2*r1); + const double rdotq=q[0]*r[0] + q[1]*r[1] +q[2]*r[2]; + const double B=( 3*r[_fComponent]*rdotq-q[_fComponent]*r2)/r5; + + if(_derivative == 0) && (r1 <= radius[0]) + // Full dipole field within full radius + return B; + + if(_derivative == 1) && (r1 <= radius[0]){ + //first derivatives of full field + unsigned int sameComponent; + if(_dComponent==_fComponent) + sameComponent=1; + else + sameComponent=0; + + // TODO: verify that this doesn't assume dipole aligned with z + return -5*B*r[_dComponent]/r2+ + (3*q[_dComponent]*r[_fComponent] - + 2*q[_fComponent]*r[_dComponent] + + 3*rdotq*sameComponent)/r5; + } + + // Calculate vector potential within transition range + double qcrossr[3]; + qcrossr[0] = q[1]*r[2]-q[2]*r[1]; + qcrossr[1] = q[2]*r[0]-q[0]*r[2]; + qcrossr[2] = q[0]*r[1]-q[1]*r[0]; + const double A = qcrossr / (r2*r1); + // Coordinate within smootherstep function + const double Sx = -(r1-radius[1])/(radius[1]-radius[0]); + const double Sx2 = Sx*Sx; + // Smootherstep and its radial derivative + const double S2 = 6.*Sx2*Sx2*Sx - 15.*Sx2*Sx2 + 10.*Sx2*Sx; + const double dS2dr = -(30.*Sx2*Sx2 - 60.*Sx2*Sx + 30.*Sx2)/(radius[1]-radius[0]); + + // Radial unit vector at that location in cartesian components + double er[3]; + er[0]=r[0]/r1; + er[1]=r[1]/r1; + er[2]=r[2]/r1; + + // Cartesian derivatives of S2 + double dS2cart; + dS2cart[0] = er[0]*dS2dr; + dS2cart[1] = er[1]*dS2dr; + dS2cart[2] = er[2]*dS2dr; + + if(_derivative == 0) && (r1 > radius[0]) { + /* Within transition range (between radius[0] and radius[1]) we + multiply the magnetic field with the S2 smootherstep function + and an additional corrective term to remove divergence. This + is based on using the dipole field vector potential and scaling + it using the smootherstep function S2. + + Notation: + m = dipole moment (vector) + r = position vector + R = position distance + + The regular dipole field vector potential + A(r) = (mu0/4 pi R^3) * (q cross r) + + The smootherstep function + ( 0, x<=0 + S2(Sx) = ( 6x^5 -15x^4 +10x^3, 0<=x<=1 + ( 1, x>=1 + + Radial distance scaling for S2 + Sx = -(R-radius[1])/(radius[1]-radius[0]) + + The scaled vector potential is A'(r) = A(r)*S2(Sx) + + The scaled magnetic field is + del cross A'(r) + = S2(Sx) del cross A(r) + del S2(Sx) cross A(r) + = S2(sx) B(r) + del S2(Sx) cross A(r) + + */ + double correctionterm[3]; + correctionterm[0] = dS2cart[1]*A[2] - dS2cart[2]*A[1]; + correctionterm[1] = dS2cart[2]*A[0] - dS2cart[0]*A[2]; + correctionterm[2] = dS2cart[0]*A[1] - dS2cart[1]*A[0]; + + return B*S2 + correctionterm[_fComponent]; + } + + else if(_derivative == 1) && (r1 > radius[0]) { + // first derivatives of field calculated from diminishing vector potential + + // TODO: calculate derivatives and implement + unsigned int sameComponent; + if(_dComponent==_fComponent) + sameComponent=1; + else + sameComponent=0; + + return -5*B*r[_dComponent]/r2+ + (3*q[_dComponent]*r[_fComponent] - + 2*q[_fComponent]*r[_dComponent] + + 3*rdotq*sameComponent)/r5; + } + + return 0; // dummy, but prevents gcc from yelling +} + + + + + + diff --git a/backgroundfield/vectordipole.hpp b/backgroundfield/vectordipole.hpp new file mode 100644 index 000000000..bc775ba1a --- /dev/null +++ b/backgroundfield/vectordipole.hpp @@ -0,0 +1,49 @@ +/* + * This file is part of Vlasiator. + * Copyright 2010-2016 Finnish Meteorological Institute + * + * For details of usage, see the COPYING file and read the "Rules of the Road" + * at http://www.physics.helsinki.fi/vlasiator/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ +/* +Background magnetic field class of Vlasiator. +*/ + +#ifndef VECTORDIPOLE_HPP +#define VECTORDIPOLE_HPP +#include "fieldfunction.hpp" + + + +class VectorDipole: public FieldFunction { +private: + bool initialized; + double q[3]; // Dipole moment; set to (0,0,moment) for z-aligned + double center[3]; // Coordinates where the dipole sits; set to (0,0,0) + double radius[2]; // Radial extents of full and zero dipole +public: + + VectorDipole(){ + this->initialized = false; + } + void initialize(const double moment,const double center_x, const double center_y, const double center_z, const double tilt_angle_phi, const double tilt_angle_theta, const double radius_f, const double radius_z); + virtual double call(double x, double y, double z) const; + virtual ~Dipole() {} +}; + +#endif + diff --git a/projects/Magnetosphere/Magnetosphere.cpp b/projects/Magnetosphere/Magnetosphere.cpp index c8c8c5b07..f752b5471 100644 --- a/projects/Magnetosphere/Magnetosphere.cpp +++ b/projects/Magnetosphere/Magnetosphere.cpp @@ -58,6 +58,11 @@ namespace projects { RP::add("Magnetosphere.refine_L1radius","Radius of L1-refined sphere", 1.59275e8); // 25 RE RP::add("Magnetosphere.refine_L1tailthick","Thickness of L1-refined tail region", 6.371e7); // 10 RE + RP::add("Magnetosphere.dipoleTiltPhi","Magnitude of dipole tilt in radians", 0.0); + RP::add("Magnetosphere.dipoleTiltTheta","Direction of dipole tilt from Sun-Earth-line in radians", 0.0); + RP::add("Magnetosphere.dipoleRadiusFull","Radius up to which dipole is at full strength, in metres", 1.59275e8); // 25 RE + RP::add("Magnetosphere.dipoleRadiusFull","Radius after which dipole is at zero strength, in metres", 1.9113e8); // 30 RE + // Per-population parameters for(uint i=0; i< getObjectWrapper().particleSpecies.size(); i++) { const std::string& pop = getObjectWrapper().particleSpecies[i].name; @@ -149,6 +154,22 @@ namespace projects { exit(1); } + if(!Readparameters::get("Magnetosphere.dipoleTiltPhi", this->dipoleTiltPhi)) { + if(myRank == MASTER_RANK) cerr << __FILE__ << ":" << __LINE__ << " ERROR: This option has not been added!" << endl; + exit(1); + } + if(!Readparameters::get("Magnetosphere.dipoleTiltTheta", this->dipoleTiltTheta)) { + if(myRank == MASTER_RANK) cerr << __FILE__ << ":" << __LINE__ << " ERROR: This option has not been added!" << endl; + exit(1); + } + if(!Readparameters::get("Magnetosphere.dipoleRadiusFull", this->dipoleRadiusFull)) { + if(myRank == MASTER_RANK) cerr << __FILE__ << ":" << __LINE__ << " ERROR: This option has not been added!" << endl; + exit(1); + } + if(!Readparameters::get("Magnetosphere.dipoleRadiusZero", this->dipoleRadiusZero)) { + if(myRank == MASTER_RANK) cerr << __FILE__ << ":" << __LINE__ << " ERROR: This option has not been added!" << endl; + exit(1); + } // Per-population parameters for(uint i=0; i< getObjectWrapper().particleSpecies.size(); i++) { @@ -259,6 +280,7 @@ namespace projects { ) { Dipole bgFieldDipole; LineDipole bgFieldLineDipole; + VectorDipole bgVectorDipole; // The hardcoded constants of dipole and line dipole moments are obtained // from Daldorff et al (2014), see @@ -286,8 +308,12 @@ namespace projects { //Append mirror dipole bgFieldDipole.initialize(8e15 *this->dipoleScalingFactor, this->dipoleMirrorLocationX, 0.0, 0.0, 0.0 );//mirror setBackgroundField(bgFieldDipole, BgBGrid, true); + break; + case 4: // Vector potential dipole, vanishes after a given radius + bgVectorDipole.initialize(126.2e6 *this->dipoleScalingFactor, 0.0, 0.0, 0.0, this->dipoleTiltPhi, this->dipoleTiltTheta, this->dipoleRadiusFull, this->dipoleRadiusZero ); + setBackgroundField(bgVectorDipole, BgBGrid); break; - + default: setBackgroundFieldToZero(BgBGrid); diff --git a/projects/Magnetosphere/Magnetosphere.h b/projects/Magnetosphere/Magnetosphere.h index 34ffe208a..4fe4ca993 100644 --- a/projects/Magnetosphere/Magnetosphere.h +++ b/projects/Magnetosphere/Magnetosphere.h @@ -90,6 +90,11 @@ namespace projects { Real refine_L1radius; Real refine_L1tailthick; + Real dipoleTiltPhi; + Real dipoleTiltTheta; + Real dipoleRadiusFull; + Real dipoleRadiusNone; + std::vector speciesParams; }; // class Magnetosphere } // namespace projects From 156917cfccfe0f78882478531fc0c78f8023ffbc Mon Sep 17 00:00:00 2001 From: Sebastian von Alfthan Date: Thu, 25 Apr 2019 22:01:28 +0300 Subject: [PATCH 353/602] wip - added posting sends and aggregating receives --- fieldsolver/gridGlue.cpp | 66 ++++++++++++++++++++++++++-------------- 1 file changed, 43 insertions(+), 23 deletions(-) diff --git a/fieldsolver/gridGlue.cpp b/fieldsolver/gridGlue.cpp index 1ca8bee9b..8c13bd003 100644 --- a/fieldsolver/gridGlue.cpp +++ b/fieldsolver/gridGlue.cpp @@ -193,6 +193,7 @@ void feedMomentsIntoFsGrid(dccrg::Dccrg& MPI_Waitall(receiveRequests.size(), receiveRequests.data(), MPI_STATUSES_IGNORE); + for(auto const &receives: onFsgridMapRemoteProcess){ int process = receives.first; //data received from this process Real* receiveBuffer = receivedData[process].data(); // data received from process @@ -224,7 +225,19 @@ void getVolumeFieldsFromFsGrid( struct Average { Real sums[fieldsToCommunicate]; int cells; - } + Average() { + cells = 0; + for(int i = 0; i < fieldsToCommunicate; i++){ + sums[i] = 0; + } + } + Average operator+=(const Average& rhs) { + this->cells += rhs.cells; + for(int i = 0; i < fieldsToCommunicate; i++){ + this->sums[i] += rhs.sums[i]; + } + } + }; int ii; @@ -249,7 +262,7 @@ void getVolumeFieldsFromFsGrid( //computeCoupling - computeCoupling(mpiGrid, cells, momentsGrid, onDccrgMapRemoteProcess, onFsgridMapRemoteProcess, onFsgridMapCells); + computeCoupling(mpiGrid, cells, volumeFieldsGrid, onDccrgMapRemoteProcess, onFsgridMapRemoteProcess, onFsgridMapCells); //post receives ii=0; @@ -258,7 +271,7 @@ void getVolumeFieldsFromFsGrid( int remoteRank = rcv.first; int count = rcv.second.size(); auto& receiveBuffer=receivedData[remoteRank]; - + receiveBuffer.resize(count); MPI_Irecv(receiveBuffer.data(), count * sizeof(Average), MPI_BYTE, remoteRank, 1, MPI_COMM_WORLD,&(receiveRequests[ii++])); @@ -277,23 +290,17 @@ void getVolumeFieldsFromFsGrid( int count = snd.second.size(); auto& sendBuffer = sendData[remoteRank]; sendBuffer.resize(count); - int ii=0; + ii=0; for(auto const dccrgCell: snd.second){ //loop over dccrg cells to which we shall send data for this remoteRank auto const &fsgridCells = onFsgridMapCells[dccrgCell]; - // Initialise send buffer values to 0 - for(int idx=0; idxsysBoundaryFlag == sysboundarytype::DO_NOT_COMPUTE) { continue; } - std::array * cell = volumeFieldsGrid.get(fsgridCell); - + std::array * cell = volumeFieldsGrid.get(fsgridCell); sendBuffer[ii].sums[0 ] += cell->at(fsgrids::volfields::PERBXVOL); sendBuffer[ii].sums[1 ] += cell->at(fsgrids::volfields::PERBYVOL); sendBuffer[ii].sums[2 ] += cell->at(fsgrids::volfields::PERBZVOL); @@ -302,33 +309,46 @@ void getVolumeFieldsFromFsGrid( sendBuffer[ii].sums[5 ] += cell->at(fsgrids::volfields::EZVOL); sendBuffer[ii].sums[6 ] += cell->at(fsgrids::volfields::dPERBXVOLdy); sendBuffer[ii].sums[7 ] += cell->at(fsgrids::volfields::dPERBXVOLdz); - sendBuffer[ii].sums[8 ] += cell->at(fsgrids::volfields::dPERBXVOLdx); - sendBuffer[ii].sums[9 ] += cell->at(fsgrids::volfields::dPERBXVOLdz); - sendBuffer[ii].sums[10] += cell->at(fsgrids::volfields::dPERBXVOLdx); - sendBuffer[ii].sums[11] += cell->at(fsgrids::volfields::dPERBXVOLdy); + sendBuffer[ii].sums[8 ] += cell->at(fsgrids::volfields::dPERBYVOLdx); + sendBuffer[ii].sums[9 ] += cell->at(fsgrids::volfields::dPERBYVOLdz); + sendBuffer[ii].sums[10] += cell->at(fsgrids::volfields::dPERBZVOLdx); + sendBuffer[ii].sums[11] += cell->at(fsgrids::volfields::dPERBZVOLdy); sendBuffer[ii].cells++; } ii++; - } - } //post sends sendRequests.resize(onFsgridMapRemoteProcess.size()); ii=0; - for(auto const &sends: onFsgridMapRemoteProcess){ - int process = sends.first; + int remoteRank = sends.first; int count = sends.second.size(); - senddData[process].resize(count * sizeof(Average)); - MPI_Irecv(senddData[process].data(), count * sizeof(Average), - MPI_BYTE, process, 1, MPI_COMM_WORLD,&(sendRequests[ii++])); + MPI_Isend(sendData[remoteRank].data(), count * sizeof(Average), + MPI_BYTE, remoteRank, 1, MPI_COMM_WORLD,&(sendRequests[ii++])); } MPI_Waitall(receiveRequests.size(), receiveRequests.data(), MPI_STATUSES_IGNORE); + + + //Aggregate receives, compute the weighted average of these + ii=0; + std::map aggregatedResult; + for (auto const &rcv : onDccrgMapRemoteProcess){ + int remoteRank = rcv.first; + std::vector& receiveBuffer=receivedData[remoteRank]; + ii=0; + for (CellID dccrgCell: rcv.second ) { + //aggregate result. Average strct has operator += and a constructor + aggregatedResult[dccrgCell] += receiveBuffer[ii++]; + } + } + - //handle receives, compute the weighted average of these + //TODO store in dccrg + + MPI_Waitall(sendRequests.size(), sendRequests.data(), MPI_STATUSES_IGNORE); From 028214a65321521b7719922901dddea94cebb3bc Mon Sep 17 00:00:00 2001 From: Sebastian von Alfthan Date: Thu, 25 Apr 2019 22:24:18 +0300 Subject: [PATCH 354/602] First version compiles [x] runs [x] correct [ ] --- fieldsolver/gridGlue.cpp | 180 ++++++++------------------------------- 1 file changed, 37 insertions(+), 143 deletions(-) diff --git a/fieldsolver/gridGlue.cpp b/fieldsolver/gridGlue.cpp index 8c13bd003..514cc461d 100644 --- a/fieldsolver/gridGlue.cpp +++ b/fieldsolver/gridGlue.cpp @@ -82,40 +82,7 @@ template void computeCoupling(dccrg::Dccrg %d :\n", rank, msg.first); - // for ( auto const &id: msg.second) { - // printf(" %ld ", id); - // } - // printf("\n"); - // } - // } - // MPI_Barrier(MPI_COMM_WORLD); - // for(int r = 0; r < nProcs; r++){ - // if(rank == r){ - // for ( auto const &msg: onFsgridMapRemoteProcess) { - // if (msg.first == dRank) { - // printf("RCV %d => %d :\n", msg.first, rank); - // for ( auto const &id: msg.second) { - // printf(" %ld ", id); - // } - // printf("\n"); - // } - // } - // } - // MPI_Barrier(MPI_COMM_WORLD); - // } - } void feedMomentsIntoFsGrid(dccrg::Dccrg& mpiGrid, @@ -256,6 +223,9 @@ void getVolumeFieldsFromFsGrid( // send buffers to each process std::map > sendData; + // map where we finally aggregate result for each local dccrg cell + std::map aggregatedResult; + //list of requests std::vector sendRequests; std::vector receiveRequests; @@ -276,13 +246,6 @@ void getVolumeFieldsFromFsGrid( MPI_Irecv(receiveBuffer.data(), count * sizeof(Average), MPI_BYTE, remoteRank, 1, MPI_COMM_WORLD,&(receiveRequests[ii++])); } - - - /* - onDccrgMapRemoteProcess maps fsgrid processes (key) => set of dccrg cellIDs owned by current rank that map to the fsgrid cells owned by fsgrid process (val) - onFsgridMapRemoteProcess maps dccrg processes (key) => set of dccrg cellIDs owned by dccrg-process that map to current rank fsgrid cells - onFsgridMapCells maps remote dccrg CellIDs to local fsgrid cells - */ //compute average and weight for each field that we want to send to dccrg grid for(auto const &snd: onFsgridMapRemoteProcess){ @@ -334,7 +297,6 @@ void getVolumeFieldsFromFsGrid( //Aggregate receives, compute the weighted average of these ii=0; - std::map aggregatedResult; for (auto const &rcv : onDccrgMapRemoteProcess){ int remoteRank = rcv.first; std::vector& receiveBuffer=receivedData[remoteRank]; @@ -346,116 +308,48 @@ void getVolumeFieldsFromFsGrid( } - //TODO store in dccrg - - + //Store data in dccrg + for (auto const &cellAggregate : aggregatedResult) { + auto cellParams = mpiGrid[cellAggregate.first]->get_cell_parameters(); + if ( cellAggregate.second.cells > 0) { + cellParams[CellParams::BGBX] = cellAggregate.second.sums[0] / cellAggregate.second.cells; + cellParams[CellParams::BGBY] = cellAggregate.second.sums[1] / cellAggregate.second.cells; + cellParams[CellParams::BGBZ] = cellAggregate.second.sums[2] / cellAggregate.second.cells; + cellParams[CellParams::BGBXVOL] = cellAggregate.second.sums[3] / cellAggregate.second.cells; + cellParams[CellParams::BGBYVOL] = cellAggregate.second.sums[4] / cellAggregate.second.cells; + cellParams[CellParams::BGBZVOL] = cellAggregate.second.sums[5] / cellAggregate.second.cells; + mpiGrid[cellAggregate.first]->derivativesBVOL[fieldsolver::dBGBxdy] = cellAggregate.second.sums[6] / cellAggregate.second.cells; + mpiGrid[cellAggregate.first]->derivativesBVOL[fieldsolver::dBGBxdz] = cellAggregate.second.sums[7] / cellAggregate.second.cells; + mpiGrid[cellAggregate.first]->derivativesBVOL[fieldsolver::dBGBydx] = cellAggregate.second.sums[8] / cellAggregate.second.cells; + mpiGrid[cellAggregate.first]->derivativesBVOL[fieldsolver::dBGBydz] = cellAggregate.second.sums[9] / cellAggregate.second.cells; + mpiGrid[cellAggregate.first]->derivativesBVOL[fieldsolver::dBGBzdx] = cellAggregate.second.sums[10] / cellAggregate.second.cells; + mpiGrid[cellAggregate.first]->derivativesBVOL[fieldsolver::dBGBzdy] = cellAggregate.second.sums[11] / cellAggregate.second.cells; + } + else{ + // This could happpen if all fsgrid cells are do not compute + cellParams[CellParams::BGBX] = 0; + cellParams[CellParams::BGBY] = 0; + cellParams[CellParams::BGBZ] = 0; + cellParams[CellParams::BGBXVOL] = 0; + cellParams[CellParams::BGBYVOL] = 0; + cellParams[CellParams::BGBZVOL] = 0; + mpiGrid[cellAggregate.first]->derivativesBVOL[fieldsolver::dBGBxdy] = 0; + mpiGrid[cellAggregate.first]->derivativesBVOL[fieldsolver::dBGBxdz] = 0; + mpiGrid[cellAggregate.first]->derivativesBVOL[fieldsolver::dBGBydx] = 0; + mpiGrid[cellAggregate.first]->derivativesBVOL[fieldsolver::dBGBydz] = 0; + mpiGrid[cellAggregate.first]->derivativesBVOL[fieldsolver::dBGBzdx] = 0; + mpiGrid[cellAggregate.first]->derivativesBVOL[fieldsolver::dBGBzdy] = 0; + } + } MPI_Waitall(sendRequests.size(), sendRequests.data(), MPI_STATUSES_IGNORE); } + -void getVolumeFieldsFromFsGridOld( - FsGrid< std::array, 2>& volumeFieldsGrid, - FsGrid< fsgrids::technical, 2>& technicalGrid, - dccrg::Dccrg& mpiGrid, - const std::vector& cells -) { - // Setup transfer buffers - cint nCellsOnMaxRefLvl = getNumberOfCellsOnMaxRefLvl(mpiGrid, cells); - std::vector< std::array > transferBufferVolFields(nCellsOnMaxRefLvl); - std::vector< std::array*> transferBufferPointerVolFields; - std::vector< fsgrids::technical > transferBufferTechnical(nCellsOnMaxRefLvl); - std::vector< fsgrids::technical*> transferBufferPointerTechnical; - - // Setup transfer pointers - volumeFieldsGrid.setupForTransferOut(nCellsOnMaxRefLvl); - technicalGrid.setupForTransferOut(nCellsOnMaxRefLvl); - int k = 0; - for(auto dccrgId : cells) { - const auto fsgridIds = mapDccrgIdToFsGridGlobalID(mpiGrid, dccrgId); - // Store a pointer to the first fsgrid cell that maps to each dccrg Id - transferBufferPointerVolFields.push_back(&transferBufferVolFields[k]); - transferBufferPointerTechnical.push_back(&transferBufferTechnical[k]); - for (auto fsgridId : fsgridIds) { - std::array* thisCellDataVolFields = &transferBufferVolFields[k]; - volumeFieldsGrid.transferDataOut(fsgridId, thisCellDataVolFields); - fsgrids::technical* thisCellDataTechnical = &transferBufferTechnical[k]; - technicalGrid.transferDataOut(fsgridId, thisCellDataTechnical); - k++; - } - } - // Do the transfer - volumeFieldsGrid.finishTransfersOut(); - technicalGrid.finishTransfersOut(); - - // Build a list of index pairs to cellparams and fsgrid - std::vector> iCellParams; - iCellParams.reserve(6); - iCellParams.push_back(std::make_pair(CellParams::PERBXVOL, fsgrids::volfields::PERBXVOL)); - iCellParams.push_back(std::make_pair(CellParams::PERBYVOL, fsgrids::volfields::PERBYVOL)); - iCellParams.push_back(std::make_pair(CellParams::PERBZVOL, fsgrids::volfields::PERBZVOL)); - iCellParams.push_back(std::make_pair(CellParams::EXVOL, fsgrids::volfields::EXVOL)); - iCellParams.push_back(std::make_pair(CellParams::EYVOL, fsgrids::volfields::EYVOL)); - iCellParams.push_back(std::make_pair(CellParams::EZVOL, fsgrids::volfields::EZVOL)); - - // Build lists of index pairs to dccrg and fsgrid - std::vector> iDerivativesBVOL; - iDerivativesBVOL.reserve(6); - iDerivativesBVOL.push_back(std::make_pair(bvolderivatives::dPERBXVOLdy, fsgrids::volfields::dPERBXVOLdy)); - iDerivativesBVOL.push_back(std::make_pair(bvolderivatives::dPERBXVOLdz, fsgrids::volfields::dPERBXVOLdz)); - iDerivativesBVOL.push_back(std::make_pair(bvolderivatives::dPERBYVOLdx, fsgrids::volfields::dPERBYVOLdx)); - iDerivativesBVOL.push_back(std::make_pair(bvolderivatives::dPERBYVOLdz, fsgrids::volfields::dPERBYVOLdz)); - iDerivativesBVOL.push_back(std::make_pair(bvolderivatives::dPERBZVOLdx, fsgrids::volfields::dPERBZVOLdx)); - iDerivativesBVOL.push_back(std::make_pair(bvolderivatives::dPERBZVOLdy, fsgrids::volfields::dPERBZVOLdy)); - - // Distribute data from the transfer buffer back into the appropriate mpiGrid places - // Disregard DO_NOT_COMPUTE cells - #pragma omp parallel for - for(uint i = 0; i < cells.size(); ++i) { - - const CellID dccrgId = cells[i]; - auto cellParams = mpiGrid[dccrgId]->get_cell_parameters(); - - // Calculate the number of fsgrid cells we loop through - cint nCells = pow(pow(2,mpiGrid.mapping.get_maximum_refinement_level() - mpiGrid.mapping.get_refinement_level(dccrgId)),3); - // Count the number of fsgrid cells we need to average into the current dccrg cell - int nCellsToSum = 0; - - // TODO: Could optimize here by adding a separate branch for nCells == 1 with direct assignment of the value - // Could also do the average in a temporary value and only access grid structure once. - - // Initialize values to 0 - for (auto j : iCellParams) cellParams[j.first] = 0.0; - for (auto j : iDerivativesBVOL) mpiGrid[dccrgId]->derivativesBVOL[j.first] = 0.0; - - for(int iCell = 0; iCell < nCells; ++iCell) { - // The fsgrid cells that cover the i'th dccrg cell are pointed at by - // transferBufferPointer[i] ... transferBufferPointer[i] + nCell. - // We want to average over those who are not DO_NOT_COMPUTE to get the value for the dccrg cell - if ((transferBufferPointerTechnical[i] + iCell)->sysBoundaryFlag == sysboundarytype::DO_NOT_COMPUTE) { - continue; - } else { - nCellsToSum++; - - std::array* thisCellData = transferBufferPointerVolFields[i] + iCell; - - for (auto j : iCellParams) cellParams[j.first] += thisCellData->at(j.second); - for (auto j : iDerivativesBVOL) mpiGrid[dccrgId]->derivativesBVOL[j.first] += thisCellData->at(j.second); - } - } - - if (nCellsToSum > 0) { - // Divide by the number of cells to get the average - for (auto j : iCellParams) cellParams[j.first] /= nCellsToSum; - for (auto j : iDerivativesBVOL) mpiGrid[dccrgId]->derivativesBVOL[j.first] /= nCellsToSum; - } - } -} - - void getBgFieldsAndDerivativesFromFsGrid( FsGrid< std::array, 2>& BgBGrid, FsGrid< fsgrids::technical, 2>& technicalGrid, From 859ca1c7c4601aca89811aad50b998b796a3a889 Mon Sep 17 00:00:00 2001 From: Sebastian von Alfthan Date: Sat, 27 Apr 2019 14:11:00 +0300 Subject: [PATCH 355/602] Fixed fields that are transferred after fieldsolver. Now they include all fields used in vlasov solver. --- fieldsolver/gridGlue.cpp | 110 +++++++++++++++++++++++---------------- fieldsolver/gridGlue.hpp | 15 +++--- vlasiator.cpp | 12 ++--- 3 files changed, 79 insertions(+), 58 deletions(-) diff --git a/fieldsolver/gridGlue.cpp b/fieldsolver/gridGlue.cpp index 514cc461d..db277cc51 100644 --- a/fieldsolver/gridGlue.cpp +++ b/fieldsolver/gridGlue.cpp @@ -181,14 +181,17 @@ void feedMomentsIntoFsGrid(dccrg::Dccrg& } -void getVolumeFieldsFromFsGrid( +void getFieldsFromFsGrid( FsGrid< std::array, 2>& volumeFieldsGrid, + FsGrid< std::array, 2>& BgBGrid, + FsGrid< std::array, 2>& EGradPeGrid, FsGrid< fsgrids::technical, 2>& technicalGrid, dccrg::Dccrg& mpiGrid, const std::vector& cells ) { - - const int fieldsToCommunicate = 12; + // TODO: solver only needs bgb + PERB, we could combine them + + const int fieldsToCommunicate = 18; struct Average { Real sums[fieldsToCommunicate]; int cells; @@ -263,19 +266,29 @@ void getVolumeFieldsFromFsGrid( if(technicalGrid.get(fsgridCell)->sysBoundaryFlag == sysboundarytype::DO_NOT_COMPUTE) { continue; } - std::array * cell = volumeFieldsGrid.get(fsgridCell); - sendBuffer[ii].sums[0 ] += cell->at(fsgrids::volfields::PERBXVOL); - sendBuffer[ii].sums[1 ] += cell->at(fsgrids::volfields::PERBYVOL); - sendBuffer[ii].sums[2 ] += cell->at(fsgrids::volfields::PERBZVOL); - sendBuffer[ii].sums[3 ] += cell->at(fsgrids::volfields::EXVOL); - sendBuffer[ii].sums[4 ] += cell->at(fsgrids::volfields::EYVOL); - sendBuffer[ii].sums[5 ] += cell->at(fsgrids::volfields::EZVOL); - sendBuffer[ii].sums[6 ] += cell->at(fsgrids::volfields::dPERBXVOLdy); - sendBuffer[ii].sums[7 ] += cell->at(fsgrids::volfields::dPERBXVOLdz); - sendBuffer[ii].sums[8 ] += cell->at(fsgrids::volfields::dPERBYVOLdx); - sendBuffer[ii].sums[9 ] += cell->at(fsgrids::volfields::dPERBYVOLdz); - sendBuffer[ii].sums[10] += cell->at(fsgrids::volfields::dPERBZVOLdx); - sendBuffer[ii].sums[11] += cell->at(fsgrids::volfields::dPERBZVOLdy); + std::array * volcell = volumeFieldsGrid.get(fsgridCell); + std::array * bgcell = BgBGrid.get(fsgridCell); + std::array * egradpecell = EGradPeGrid.get(fsgridCell); + + sendBuffer[ii].sums[0 ] += volcell->at(fsgrids::volfields::PERBXVOL); + sendBuffer[ii].sums[1 ] += volcell->at(fsgrids::volfields::PERBYVOL); + sendBuffer[ii].sums[2 ] += volcell->at(fsgrids::volfields::PERBZVOL); + sendBuffer[ii].sums[3 ] += volcell->at(fsgrids::volfields::EXVOL); + sendBuffer[ii].sums[4 ] += volcell->at(fsgrids::volfields::EYVOL); + sendBuffer[ii].sums[5 ] += volcell->at(fsgrids::volfields::EZVOL); + sendBuffer[ii].sums[6 ] += volcell->at(fsgrids::volfields::dPERBXVOLdy); + sendBuffer[ii].sums[7 ] += volcell->at(fsgrids::volfields::dPERBXVOLdz); + sendBuffer[ii].sums[8 ] += volcell->at(fsgrids::volfields::dPERBYVOLdx); + sendBuffer[ii].sums[9 ] += volcell->at(fsgrids::volfields::dPERBYVOLdz); + sendBuffer[ii].sums[10] += volcell->at(fsgrids::volfields::dPERBZVOLdx); + sendBuffer[ii].sums[11] += volcell->at(fsgrids::volfields::dPERBZVOLdy); + sendBuffer[ii].sums[12] += bgcell->at(fsgrids::bgbfield::BGBXVOL); + sendBuffer[ii].sums[13] += bgcell->at(fsgrids::bgbfield::BGBYVOL); + sendBuffer[ii].sums[14] += bgcell->at(fsgrids::bgbfield::BGBZVOL); + sendBuffer[ii].sums[15] += egradpecell->at(fsgrids::egradpe::EXGRADPE); + sendBuffer[ii].sums[16] += egradpecell->at(fsgrids::egradpe::EYGRADPE); + sendBuffer[ii].sums[17] += egradpecell->at(fsgrids::egradpe::EZGRADPE); + sendBuffer[ii].cells++; } ii++; @@ -307,48 +320,55 @@ void getVolumeFieldsFromFsGrid( } } - //Store data in dccrg for (auto const &cellAggregate : aggregatedResult) { auto cellParams = mpiGrid[cellAggregate.first]->get_cell_parameters(); if ( cellAggregate.second.cells > 0) { - cellParams[CellParams::BGBX] = cellAggregate.second.sums[0] / cellAggregate.second.cells; - cellParams[CellParams::BGBY] = cellAggregate.second.sums[1] / cellAggregate.second.cells; - cellParams[CellParams::BGBZ] = cellAggregate.second.sums[2] / cellAggregate.second.cells; - cellParams[CellParams::BGBXVOL] = cellAggregate.second.sums[3] / cellAggregate.second.cells; - cellParams[CellParams::BGBYVOL] = cellAggregate.second.sums[4] / cellAggregate.second.cells; - cellParams[CellParams::BGBZVOL] = cellAggregate.second.sums[5] / cellAggregate.second.cells; - mpiGrid[cellAggregate.first]->derivativesBVOL[fieldsolver::dBGBxdy] = cellAggregate.second.sums[6] / cellAggregate.second.cells; - mpiGrid[cellAggregate.first]->derivativesBVOL[fieldsolver::dBGBxdz] = cellAggregate.second.sums[7] / cellAggregate.second.cells; - mpiGrid[cellAggregate.first]->derivativesBVOL[fieldsolver::dBGBydx] = cellAggregate.second.sums[8] / cellAggregate.second.cells; - mpiGrid[cellAggregate.first]->derivativesBVOL[fieldsolver::dBGBydz] = cellAggregate.second.sums[9] / cellAggregate.second.cells; - mpiGrid[cellAggregate.first]->derivativesBVOL[fieldsolver::dBGBzdx] = cellAggregate.second.sums[10] / cellAggregate.second.cells; - mpiGrid[cellAggregate.first]->derivativesBVOL[fieldsolver::dBGBzdy] = cellAggregate.second.sums[11] / cellAggregate.second.cells; + cellParams[CellParams::PERBXVOL] = cellAggregate.second.sums[0] / cellAggregate.second.cells; + cellParams[CellParams::PERBYVOL] = cellAggregate.second.sums[1] / cellAggregate.second.cells; + cellParams[CellParams::PERBZVOL] = cellAggregate.second.sums[2] / cellAggregate.second.cells; + cellParams[CellParams::EXVOL] = cellAggregate.second.sums[3] / cellAggregate.second.cells; + cellParams[CellParams::EYVOL] = cellAggregate.second.sums[4] / cellAggregate.second.cells; + cellParams[CellParams::EZVOL] = cellAggregate.second.sums[5] / cellAggregate.second.cells; + mpiGrid[cellAggregate.first]->derivativesBVOL[bvolderivatives::dPERBXVOLdy] = cellAggregate.second.sums[6] / cellAggregate.second.cells; + mpiGrid[cellAggregate.first]->derivativesBVOL[bvolderivatives::dPERBXVOLdz] = cellAggregate.second.sums[7] / cellAggregate.second.cells; + mpiGrid[cellAggregate.first]->derivativesBVOL[bvolderivatives::dPERBYVOLdx] = cellAggregate.second.sums[8] / cellAggregate.second.cells; + mpiGrid[cellAggregate.first]->derivativesBVOL[bvolderivatives::dPERBYVOLdz] = cellAggregate.second.sums[9] / cellAggregate.second.cells; + mpiGrid[cellAggregate.first]->derivativesBVOL[bvolderivatives::dPERBZVOLdx] = cellAggregate.second.sums[10] / cellAggregate.second.cells; + mpiGrid[cellAggregate.first]->derivativesBVOL[bvolderivatives::dPERBZVOLdy] = cellAggregate.second.sums[11] / cellAggregate.second.cells; + cellParams[CellParams::BGBXVOL] = cellAggregate.second.sums[12] / cellAggregate.second.cells; + cellParams[CellParams::BGBYVOL] = cellAggregate.second.sums[13] / cellAggregate.second.cells; + cellParams[CellParams::BGBZVOL] = cellAggregate.second.sums[14] / cellAggregate.second.cells; + cellParams[CellParams::EXGRADPE] = cellAggregate.second.sums[15] / cellAggregate.second.cells; + cellParams[CellParams::EYGRADPE] = cellAggregate.second.sums[16] / cellAggregate.second.cells; + cellParams[CellParams::EZGRADPE] = cellAggregate.second.sums[17] / cellAggregate.second.cells; } else{ // This could happpen if all fsgrid cells are do not compute - cellParams[CellParams::BGBX] = 0; - cellParams[CellParams::BGBY] = 0; - cellParams[CellParams::BGBZ] = 0; - cellParams[CellParams::BGBXVOL] = 0; - cellParams[CellParams::BGBYVOL] = 0; - cellParams[CellParams::BGBZVOL] = 0; - mpiGrid[cellAggregate.first]->derivativesBVOL[fieldsolver::dBGBxdy] = 0; - mpiGrid[cellAggregate.first]->derivativesBVOL[fieldsolver::dBGBxdz] = 0; - mpiGrid[cellAggregate.first]->derivativesBVOL[fieldsolver::dBGBydx] = 0; - mpiGrid[cellAggregate.first]->derivativesBVOL[fieldsolver::dBGBydz] = 0; - mpiGrid[cellAggregate.first]->derivativesBVOL[fieldsolver::dBGBzdx] = 0; - mpiGrid[cellAggregate.first]->derivativesBVOL[fieldsolver::dBGBzdy] = 0; + cellParams[CellParams::PERBXVOL] = 0; + cellParams[CellParams::PERBYVOL] = 0; + cellParams[CellParams::PERBZVOL] = 0; + cellParams[CellParams::EXVOL] = 0; + cellParams[CellParams::EYVOL] = 0; + cellParams[CellParams::EZVOL] = 0; + mpiGrid[cellAggregate.first]->derivativesBVOL[bvolderivatives::dPERBXVOLdy] = 0; + mpiGrid[cellAggregate.first]->derivativesBVOL[bvolderivatives::dPERBXVOLdz] = 0; + mpiGrid[cellAggregate.first]->derivativesBVOL[bvolderivatives::dPERBYVOLdx] = 0; + mpiGrid[cellAggregate.first]->derivativesBVOL[bvolderivatives::dPERBYVOLdz] = 0; + mpiGrid[cellAggregate.first]->derivativesBVOL[bvolderivatives::dPERBZVOLdx] = 0; + mpiGrid[cellAggregate.first]->derivativesBVOL[bvolderivatives::dPERBZVOLdy] = 0; + cellParams[CellParams::BGBXVOL] = 0; + cellParams[CellParams::BGBYVOL] = 0; + cellParams[CellParams::BGBZVOL] = 0; + cellParams[CellParams::EXGRADPE] = 0; + cellParams[CellParams::EYGRADPE] = 0; + cellParams[CellParams::EZGRADPE] = 0; } } MPI_Waitall(sendRequests.size(), sendRequests.data(), MPI_STATUSES_IGNORE); } - - - - void getBgFieldsAndDerivativesFromFsGrid( FsGrid< std::array, 2>& BgBGrid, diff --git a/fieldsolver/gridGlue.hpp b/fieldsolver/gridGlue.hpp index f40d04db6..12dc885bb 100644 --- a/fieldsolver/gridGlue.hpp +++ b/fieldsolver/gridGlue.hpp @@ -19,19 +19,20 @@ void feedMomentsIntoFsGrid(dccrg::Dccrg& FsGrid< std::array, 2>& momentsGrid, bool dt2=false); -/*! Copy field solver result (Volume-averaged fields) and store them back into DCCRG +/*! Copy field solver result (VOLB, VOLE, VOLPERB derivatives, gradpe) and store them back into DCCRG * \param mpiGrid The DCCRG grid carrying fields. * \param cells List of local cells * \param volumeFieldsGrid Fieldsolver grid for these quantities * * This function assumes that proper grid coupling has been set up. */ -void getVolumeFieldsFromFsGrid( - FsGrid< std::array, 2>& volumeFieldsGrid, - FsGrid< fsgrids::technical, 2>& technicalGrid, - dccrg::Dccrg& mpiGrid, - const std::vector& cells -); +void getFieldsFromFsGrid(FsGrid< std::array, 2>& volumeFieldsGrid, + FsGrid< std::array, 2>& BgBGrid, + FsGrid< std::array, 2>& EGradPeGrid, + FsGrid< fsgrids::technical, 2>& technicalGrid, + dccrg::Dccrg& mpiGrid, + const std::vector& cells + ); /*! Copy background B fields and store them into DCCRG * \param mpiGrid The DCCRG grid carrying fields. diff --git a/vlasiator.cpp b/vlasiator.cpp index b53f48c94..eef6ad4ec 100644 --- a/vlasiator.cpp +++ b/vlasiator.cpp @@ -519,12 +519,12 @@ int main(int argn,char* args[]) { phiprof::stop("compute-dt"); } - phiprof::start("getVolumeFieldsFromFsGrid"); + phiprof::start("getFieldsFromFsGrid"); // These should be done by initializeFieldPropagator() if the propagation is turned off. volGrid.updateGhostCells(); technicalGrid.updateGhostCells(); - getVolumeFieldsFromFsGrid(volGrid, technicalGrid, mpiGrid, cells); - phiprof::stop("getVolumeFieldsFromFsGrid"); + getFieldsFromFsGrid(volGrid, BgBGrid, EGradPeGrid, technicalGrid, mpiGrid, cells); + phiprof::stop("getFieldsFromFsGrid"); // Save restart data if (P::writeInitialState) { @@ -1033,12 +1033,12 @@ int main(int argn,char* args[]) { P::fieldSolverSubcycles ); - phiprof::start("getVolumeFieldsFromFsGrid"); + phiprof::start("getFieldsFromFsGrid"); // Copy results back from fsgrid. volGrid.updateGhostCells(); technicalGrid.updateGhostCells(); - getVolumeFieldsFromFsGrid(volGrid, technicalGrid, mpiGrid, cells); - phiprof::stop("getVolumeFieldsFromFsGrid"); + getFieldsFromFsGrid(volGrid, BgBGrid, EGradPeGrid, technicalGrid, mpiGrid, cells); + phiprof::stop("getFieldsFromFsGrid"); phiprof::stop("Propagate Fields",cells.size(),"SpatialCells"); addTimedBarrier("barrier-after-field-solver"); } From 882c87839670efc225a66d2bfbf595eb161c95f2 Mon Sep 17 00:00:00 2001 From: Sebastian von Alfthan Date: Sat, 27 Apr 2019 23:32:57 +0300 Subject: [PATCH 356/602] Compute fieldsolver dt from techgrid, removes need to communicate it --- fieldsolver/gridGlue.cpp | 68 ---------------------------------------- fieldsolver/gridGlue.hpp | 10 ------ vlasiator.cpp | 28 ++++++++++++----- 3 files changed, 21 insertions(+), 85 deletions(-) diff --git a/fieldsolver/gridGlue.cpp b/fieldsolver/gridGlue.cpp index db277cc51..0bfaf096f 100644 --- a/fieldsolver/gridGlue.cpp +++ b/fieldsolver/gridGlue.cpp @@ -724,74 +724,6 @@ void setupTechnicalFsGrid(dccrg::Dccrg& m } } } - -// for (int x = 0; x < localSize[0]; ++x) { -// for (int y = 0; y < localSize[1]; ++y) { -// for (int z = 0; z < localSize[2]; ++z) { -// std::cout << "boundary layer+flag at " << x << ", " << y << ", " << z << " = "; -// std::cout << technicalGrid.get(x,y,z)->sysBoundaryLayer; -// std::cout << " "; -// std::cout << technicalGrid.get(x,y,z)->sysBoundaryFlag; -// std::cout << std::endl; -// } -// } -// } -// abort(); -} - -void getFsGridMaxDt(FsGrid< fsgrids::technical, 2>& technicalGrid, - dccrg::Dccrg& mpiGrid, - const std::vector& cells) { - - cint nCellsOnMaxRefLvl = getNumberOfCellsOnMaxRefLvl(mpiGrid, cells); - technicalGrid.setupForTransferOut(nCellsOnMaxRefLvl); - - // Buffer to store contents of the grid - std::vector transferBuffer(nCellsOnMaxRefLvl); - std::vector transferBufferPointer; - - int k = 0; - for(int i=0; i< cells.size(); i++) { - - transferBufferPointer.push_back(&transferBuffer[k]); - const auto fsgridIds = mapDccrgIdToFsGridGlobalID(mpiGrid, cells[i]); - for (auto fsgridId : fsgridIds) { - fsgrids::technical* thisCellData = &transferBuffer[k++]; - technicalGrid.transferDataOut(fsgridId, thisCellData); - } - } - - technicalGrid.finishTransfersOut(); - - // After the transfer is completed, stuff the recieved maxFDt into the cells. - #pragma omp parallel for - for(int i=0; i< cells.size(); i++) { - - const CellID dccrgId = cells[i]; - auto cellParams = mpiGrid[dccrgId]->get_cell_parameters(); - - // Calculate the number of fsgrid cells we need to loop through - cint nCells = pow(pow(2,mpiGrid.get_maximum_refinement_level() - mpiGrid.get_refinement_level(dccrgId)),3); - - cellParams[CellParams::MAXFDT] = std::numeric_limits::max(); - //cellParams[CellParams::FSGRID_RANK] = 0; - //cellParams[CellParams::FSGRID_BOUNDARYTYPE] = 0; - - for (int iCell = 0; iCell < nCells; ++iCell) { - - fsgrids::technical* thisCellData = transferBufferPointer[i] + iCell; - - if (thisCellData->sysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY || thisCellData->sysBoundaryLayer == 1) { - - cellParams[CellParams::MAXFDT] = std::min(cellParams[CellParams::MAXFDT],thisCellData->maxFsDt); - - } - - //TODO: Implement something for FSGRID_RANK and FSGRID_BOUNDARYTYPE - //cellParams[CellParams::FSGRID_RANK] = thisCellData->fsGridRank; - //cellParams[CellParams::FSGRID_BOUNDARYTYPE] = thisCellData->sysBoundaryFlag; - } - } } /* diff --git a/fieldsolver/gridGlue.hpp b/fieldsolver/gridGlue.hpp index 12dc885bb..ab01d7fff 100644 --- a/fieldsolver/gridGlue.hpp +++ b/fieldsolver/gridGlue.hpp @@ -70,16 +70,6 @@ void getDerivativesFromFsGrid( void setupTechnicalFsGrid(dccrg::Dccrg& mpiGrid, const std::vector& cells, FsGrid< fsgrids::technical, 2>& technicalGrid); -/*! Transfer max timestep data from technical grid back into DCCRG. - * \param technicalGrid the target Fieldsolver grid for this information - * \param mpiGrid The DCCRG grid carrying rho, rhoV and P - * \param cells List of local cells - * - * This function assumes that proper grid coupling has been set up. - */ -void getFsGridMaxDt(FsGrid< fsgrids::technical, 2>& technicalGrid, - dccrg::Dccrg& mpiGrid, - const std::vector& cells); int getNumberOfCellsOnMaxRefLvl(dccrg::Dccrg& mpiGrid, const std::vector& cells); diff --git a/vlasiator.cpp b/vlasiator.cpp index eef6ad4ec..759570385 100644 --- a/vlasiator.cpp +++ b/vlasiator.cpp @@ -92,7 +92,8 @@ void addTimedBarrier(string name){ phiprof::stop(bt); } -bool computeNewTimeStep(dccrg::Dccrg& mpiGrid,Real &newDt, bool &isChanged) { +bool computeNewTimeStep(dccrg::Dccrg& mpiGrid, + FsGrid< fsgrids::technical, 2>& technicalGrid, Real &newDt, bool &isChanged) { phiprof::start("compute-timestep"); //compute maximum time-step, this cannot be done at the first @@ -150,7 +151,6 @@ bool computeNewTimeStep(dccrg::Dccrg& mpi (cell->sysBoundaryLayer == 1 && cell->sysBoundaryFlag != sysboundarytype::NOT_SYSBOUNDARY )) { //spatial fluxes computed also for boundary cells dtMaxLocal[0]=min(dtMaxLocal[0], cell->parameters[CellParams::MAXRDT]); - dtMaxLocal[2]=min(dtMaxLocal[2], cell->parameters[CellParams::MAXFDT]); } if (cell->sysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY && cell->parameters[CellParams::MAXVDT] != 0) { @@ -158,6 +158,23 @@ bool computeNewTimeStep(dccrg::Dccrg& mpi dtMaxLocal[1]=min(dtMaxLocal[1], cell->parameters[CellParams::MAXVDT]); } } + + //compute max dt for fieldsolver + const std::array gridDims(technicalGrid.getLocalSize()); + for (int k=0; ksysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY || + (cell->sysBoundaryLayer == 1 && cell->sysBoundaryFlag != sysboundarytype::NOT_SYSBOUNDARY )) { + dtMaxLocal[2]=min(dtMaxLocal[2], cell->maxFsDt); + } + } + } + } + + + MPI_Allreduce(&(dtMaxLocal[0]), &(dtMaxGlobal[0]), 3, MPI_Type(), MPI_MIN, MPI_COMM_WORLD); //If any of the solvers are disabled there should be no limits in timespace from it @@ -568,9 +585,7 @@ int main(int argn,char* args[]) { if (P::isRestart == false) { //compute new dt phiprof::start("compute-dt"); - getFsGridMaxDt(technicalGrid, mpiGrid, cells); - - computeNewTimeStep(mpiGrid,newDt,dtIsChanged); + computeNewTimeStep(mpiGrid, technicalGrid, newDt, dtIsChanged); if (P::dynamicTimestep == true && dtIsChanged == true) { // Only actually update the timestep if dynamicTimestep is on P::dt=newDt; @@ -904,8 +919,7 @@ int main(int argn,char* args[]) { //simulation loop // FIXME what if dt changes at a restart?? if(P::dynamicTimestep && P::tstep > P::tstep_min) { - getFsGridMaxDt(technicalGrid, mpiGrid, cells); - computeNewTimeStep(mpiGrid,newDt,dtIsChanged); + computeNewTimeStep(mpiGrid, technicalGrid, newDt, dtIsChanged); addTimedBarrier("barrier-check-dt"); if(dtIsChanged) { phiprof::start("update-dt"); From 6770b3727c1ced05cb281977374a3fe943a92939 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Tue, 30 Apr 2019 10:18:45 +0300 Subject: [PATCH 357/602] Updated mapDccrgIdToFsGridGlobalID to the version from Sebastian's branch --- fieldsolver/gridGlue.cpp | 28 ++++++---------------------- 1 file changed, 6 insertions(+), 22 deletions(-) diff --git a/fieldsolver/gridGlue.cpp b/fieldsolver/gridGlue.cpp index 0bfaf096f..1f9631ae4 100644 --- a/fieldsolver/gridGlue.cpp +++ b/fieldsolver/gridGlue.cpp @@ -732,41 +732,25 @@ Map from dccrg cell id to fsgrid global cell ids when they aren't identical (ie. std::vector mapDccrgIdToFsGridGlobalID(dccrg::Dccrg& mpiGrid, CellID dccrgID) { - const auto maxRefLvl = mpiGrid.get_maximum_refinement_level(); const auto refLvl = mpiGrid.get_refinement_level(dccrgID); const auto cellLength = pow(2,maxRefLvl-refLvl); const auto topLeftIndices = mpiGrid.mapping.get_indices(dccrgID); - std::array indices; - std::vector> allIndices; - std::array fsgridDims; + fsgridDims[0] = P::xcells_ini * pow(2,mpiGrid.get_maximum_refinement_level()); fsgridDims[1] = P::ycells_ini * pow(2,mpiGrid.get_maximum_refinement_level()); fsgridDims[2] = P::zcells_ini * pow(2,mpiGrid.get_maximum_refinement_level()); - + + std::vector fsgridIDs(cellLength * cellLength * cellLength); for (uint k = 0; k < cellLength; ++k) { for (uint j = 0; j < cellLength; ++j) { for (uint i = 0; i < cellLength; ++i) { - indices[0] = topLeftIndices[0] + i; - indices[1] = topLeftIndices[1] + j; - indices[2] = topLeftIndices[2] + k; - allIndices.push_back(indices); - } + const std::array indices = {{topLeftIndices[0] + i,topLeftIndices[1] + j,topLeftIndices[2] + k}}; + fsgridIDs[k*cellLength*cellLength + j*cellLength + i] = indices[0] + indices[1] * fsgridDims[0] + indices[2] * fsgridDims[1] * fsgridDims[0]; + } } } - - std::vector fsgridIDs; - - - for (auto cellCoord: allIndices) { - - fsgridIDs.push_back(cellCoord[0] - + cellCoord[1] * fsgridDims[0] - + cellCoord[2] * fsgridDims[1] * fsgridDims[0]); - - } - return fsgridIDs; } From 19ed84abb1a059a67b005e9aed062545967f8a34 Mon Sep 17 00:00:00 2001 From: Markus Battarbee Date: Tue, 30 Apr 2019 11:26:43 +0300 Subject: [PATCH 358/602] Added derivative calculation within vector potential dipole transition region --- backgroundfield/vectordipole.cpp | 133 +++++++++++++++++++++++-------- backgroundfield/vectordipole.hpp | 1 + 2 files changed, 100 insertions(+), 34 deletions(-) diff --git a/backgroundfield/vectordipole.cpp b/backgroundfield/vectordipole.cpp index 4c5dc7688..f002b937b 100644 --- a/backgroundfield/vectordipole.cpp +++ b/backgroundfield/vectordipole.cpp @@ -1,6 +1,7 @@ /* * This file is part of Vlasiator. * Copyright 2010-2016 Finnish Meteorological Institute + * Copyright 2017-2019 University of Helsinki * * For details of usage, see the COPYING file and read the "Rules of the Road" * at http://www.physics.helsinki.fi/vlasiator/ @@ -65,10 +66,11 @@ double VectorDipole::call( double x, double y, double z) const return 0.0; //set zero field inside dipole if(r2>=radius[1]*radius[1]) - return 0.0; //set zero field and derivatives outside zero radius + return 0.0; //set zero field and derivatives outside "zero radius" /* This function is called from within other calls, one component at a time. - The component in question is defined using the _fComponent index. */ + The component in question is defined using the _fComponent index. If a derivative + is requested, the direction of the derivative is defined using _dComponent. */ const double r1 = sqrt(r2); const double r5 = (r2*r2*r1); @@ -87,19 +89,23 @@ double VectorDipole::call( double x, double y, double z) const else sameComponent=0; - // TODO: verify that this doesn't assume dipole aligned with z + /* Confirmed Battarbee 26.04.2019: This is the correct + 3D dipole derivative. */ return -5*B*r[_dComponent]/r2+ (3*q[_dComponent]*r[_fComponent] - 2*q[_fComponent]*r[_dComponent] + 3*rdotq*sameComponent)/r5; } + /* Within transition range (between "full radius" and "zero radius"), use + a vector potential scaled with the smootherstep function. Calculated + and coded by Markus Battarbee, 30.04.2019 */ + // Calculate vector potential within transition range - double qcrossr[3]; - qcrossr[0] = q[1]*r[2]-q[2]*r[1]; - qcrossr[1] = q[2]*r[0]-q[0]*r[2]; - qcrossr[2] = q[0]*r[1]-q[1]*r[0]; - const double A = qcrossr / (r2*r1); + double A[3]; + A[0] = (q[1]*r[2]-q[2]*r[1]) / (r2*r1); + A[1] = (q[2]*r[0]-q[0]*r[2]) / (r2*r1); + A[2] = (q[0]*r[1]-q[1]*r[0]) / (r2*r1); // Coordinate within smootherstep function const double Sx = -(r1-radius[1])/(radius[1]-radius[0]); const double Sx2 = Sx*Sx; @@ -107,28 +113,22 @@ double VectorDipole::call( double x, double y, double z) const const double S2 = 6.*Sx2*Sx2*Sx - 15.*Sx2*Sx2 + 10.*Sx2*Sx; const double dS2dr = -(30.*Sx2*Sx2 - 60.*Sx2*Sx + 30.*Sx2)/(radius[1]-radius[0]); - // Radial unit vector at that location in cartesian components - double er[3]; - er[0]=r[0]/r1; - er[1]=r[1]/r1; - er[2]=r[2]/r1; - // Cartesian derivatives of S2 - double dS2cart; - dS2cart[0] = er[0]*dS2dr; - dS2cart[1] = er[1]*dS2dr; - dS2cart[2] = er[2]*dS2dr; + double dS2cart[3]; + dS2cart[0] = (r[0]/r1)*dS2dr; + dS2cart[1] = (r[1]/r1)*dS2dr; + dS2cart[2] = (r[2]/r1)*dS2dr; if(_derivative == 0) && (r1 > radius[0]) { /* Within transition range (between radius[0] and radius[1]) we multiply the magnetic field with the S2 smootherstep function - and an additional corrective term to remove divergence. This + and add an additional corrective term to remove divergence. This is based on using the dipole field vector potential and scaling it using the smootherstep function S2. Notation: - m = dipole moment (vector) - r = position vector + q = dipole moment (vector) + r = position vector R = position distance The regular dipole field vector potential @@ -145,33 +145,98 @@ double VectorDipole::call( double x, double y, double z) const The scaled vector potential is A'(r) = A(r)*S2(Sx) The scaled magnetic field is - del cross A'(r) - = S2(Sx) del cross A(r) + del S2(Sx) cross A(r) - = S2(sx) B(r) + del S2(Sx) cross A(r) + B'(r) = del cross A'(r) + =(NRL)= S2(Sx) del cross A(r) + del S2(Sx) cross A(r) + = S2(Sx) B(r) + del S2(Sx) cross A(r) */ - double correctionterm[3]; - correctionterm[0] = dS2cart[1]*A[2] - dS2cart[2]*A[1]; - correctionterm[1] = dS2cart[2]*A[0] - dS2cart[0]*A[2]; - correctionterm[2] = dS2cart[0]*A[1] - dS2cart[1]*A[0]; - - return B*S2 + correctionterm[_fComponent]; + double delS2crossA[3]; + delS2crossA[0] = dS2cart[1]*A[2] - dS2cart[2]*A[1]; + delS2crossA[1] = dS2cart[2]*A[0] - dS2cart[0]*A[2]; + delS2crossA[2] = dS2cart[0]*A[1] - dS2cart[1]*A[0]; + + return S2*B + delS2crossA[_fComponent]; } else if(_derivative == 1) && (r1 > radius[0]) { - // first derivatives of field calculated from diminishing vector potential + /* first derivatives of field calculated from diminishing vector potential + + del B'(r) = S2(Sx) del B(r) + B(r) del S2(Sx) + del (del S2(Sx) cross A(r)) + + component-wise: + + del Bx = S2(Sx) del Bx + del S2(Sx) Bx + del(del S2(Sx) cross A)@i=x + del By = S2(Sx) del By + del S2(Sx) By + del(del S2(Sx) cross A)@i=y + del Bz = S2(Sx) del Bz + del S2(Sx) Bz + del(del S2(Sx) cross A)@i=z + + where + + del(del S2(Sx) cross A)@i=x = del (dS2/dy Az - dS/dz Ay) + = del(dS/dy) Az + dS/dy del Az - del(DS/dz) Ay - dS/dz del Ay + + del(del S2(Sx) cross A)@i=y = del (dS2/dz Ax - dS/dx Az) + = del(dS/dz) Ax + dS/dz del Ax - del(DS/dx) Az - dS/dx del Az + + del(del S2(Sx) cross A)@i=z = del (dS2/dx Ay - dS/dy Ax) + = del(dS/dx) Ay + dS/dx del Ay - del(DS/dy) Ax - dS/dy del Ax + **********/ - // TODO: calculate derivatives and implement unsigned int sameComponent; if(_dComponent==_fComponent) sameComponent=1; else sameComponent=0; - - return -5*B*r[_dComponent]/r2+ + + // Regular derivative of B + const double delB = -5*B*r[_dComponent]/r2+ (3*q[_dComponent]*r[_fComponent] - 2*q[_fComponent]*r[_dComponent] + 3*rdotq*sameComponent)/r5; + + // Calculate del Ax, del Ay, del Az + double delAx[3]; + double delAy[3]; + double delAz[3]; + delAx[0] = (-3./(r2*r2*r1))*(q[1]*r[2]-q[2]*r[1])*r[0]; + delAx[1] = (-3./(r2*r2*r1))*(q[1]*r[2]-q[2]*r[1])*r[1] -q[2]/(r2*r1); + delAx[2] = (-3./(r2*r2*r1))*(q[1]*r[2]-q[2]*r[1])*r[2] +q[1]/(r2*r1); + delAy[0] = (-3./(r2*r2*r1))*(q[2]*r[0]-q[0]*r[2])*r[0] +q[2]/(r2*r1); + delAy[1] = (-3./(r2*r2*r1))*(q[2]*r[0]-q[0]*r[2])*r[1]; + delAy[2] = (-3./(r2*r2*r1))*(q[2]*r[0]-q[0]*r[2])*r[2] -q[0]/(r2*r1); + delAz[0] = (-3./(r2*r2*r1))*(q[0]*r[1]-q[1]*r[0])*r[0] -q[1]/(r2*r1); + delAz[1] = (-3./(r2*r2*r1))*(q[0]*r[1]-q[1]*r[0])*r[1] +q[0]/(r2*r1); + delAz[2] = (-3./(r2*r2*r1))*(q[0]*r[1]-q[1]*r[0])*r[2]; + + // Calculate del (dS2/dx), del (dS2/dy), del (dS2/dz) + double deldS2dx[3]; + double deldS2dy[3]; + double deldS2dz[3]; + deldS2dx[0] = (-r[0]/(r2*r2*r1))*dS2dr*r[0] + dS2dr/r1; + deldS2dx[1] = (-r[0]/(r2*r2*r1))*dS2dr*r[1]; + deldS2dx[2] = (-r[0]/(r2*r2*r1))*dS2dr*r[2]; + deldS2dy[0] = (-r[1]/(r2*r2*r1))*dS2dr*r[0]; + deldS2dy[1] = (-r[1]/(r2*r2*r1))*dS2dr*r[1] + dS2dr/r1; + deldS2dy[2] = (-r[1]/(r2*r2*r1))*dS2dr*r[2]; + deldS2dz[0] = (-r[2]/(r2*r2*r1))*dS2dr*r[0]; + deldS2dz[1] = (-r[2]/(r2*r2*r1))*dS2dr*r[1]; + deldS2dz[2] = (-r[2]/(r2*r2*r1))*dS2dr*r[2] + dS2dr/r1; + + // Calculate del(del S2(Sx) cross A)@i=x, del(del S2(Sx) cross A)@i=y, del(del S2(Sx) cross A)@i=z + double ddS2crossA[3][3]; + // derivatives of X-directional field + ddS2crossA[0][0] = deldS2dy[0]*A[2] + dS2cart[1]*delAz[0] - deldS2dz[0]*A[1] - dS2cart[2]*delAy[0]; + ddS2crossA[0][1] = deldS2dy[1]*A[2] + dS2cart[1]*delAz[1] - deldS2dz[1]*A[1] - dS2cart[2]*delAy[1]; + ddS2crossA[0][2] = deldS2dy[2]*A[2] + dS2cart[1]*delAz[2] - deldS2dz[2]*A[1] - dS2cart[2]*delAy[2]; + // derivatives of Y-directional field + ddS2crossA[1][0] = deldS2dz[0]*A[0] + dS2cart[2]*delAx[0] - deldS2dx[0]*A[2] - dS2cart[0]*delAz[0]; + ddS2crossA[1][1] = deldS2dz[1]*A[0] + dS2cart[2]*delAx[1] - deldS2dx[1]*A[2] - dS2cart[0]*delAz[1]; + ddS2crossA[1][2] = deldS2dz[2]*A[0] + dS2cart[2]*delAx[2] - deldS2dx[2]*A[2] - dS2cart[0]*delAz[2]; + // derivatives of Z-directional field + ddS2crossA[2][0] = deldS2dx[0]*A[1] + dS2cart[0]*delAy[0] - deldS2dy[0]*A[0] - dS2cart[1]*delAx[0]; + ddS2crossA[2][1] = deldS2dx[1]*A[1] + dS2cart[0]*delAy[1] - deldS2dy[1]*A[0] - dS2cart[1]*delAx[1]; + ddS2crossA[2][2] = deldS2dx[2]*A[1] + dS2cart[0]*delAy[2] - deldS2dy[2]*A[0] - dS2cart[1]*delAx[2]; + + return S2*delB + dS2cart[_dComponent]*B + ddS2crossA[_fComponent][_dComponent]; } return 0; // dummy, but prevents gcc from yelling diff --git a/backgroundfield/vectordipole.hpp b/backgroundfield/vectordipole.hpp index bc775ba1a..bf8c2e817 100644 --- a/backgroundfield/vectordipole.hpp +++ b/backgroundfield/vectordipole.hpp @@ -1,6 +1,7 @@ /* * This file is part of Vlasiator. * Copyright 2010-2016 Finnish Meteorological Institute + * Copyright 2017-2019 University of Helsinki * * For details of usage, see the COPYING file and read the "Rules of the Road" * at http://www.physics.helsinki.fi/vlasiator/ From 0b75146249df151aec5963815514b39688f1b3d4 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Tue, 30 Apr 2019 15:49:02 +0300 Subject: [PATCH 359/602] Setup boundary flags directly on fsgrid. Added fsgrid boundary flag setup directly to sysboundary classes. NOTE: Ionosphere sets the boundary flag on fsgrid resolution using real-space coordinates --> can lead to disagreements with coarser dccrg due to aliasing. Compiles, not tested. --- fieldsolver/gridGlue.cpp | 3 +- grid.cpp | 2 +- sysboundary/antisymmetric.cpp | 44 +++++++++++++-- sysboundary/antisymmetric.h | 3 +- sysboundary/donotcompute.cpp | 3 +- sysboundary/donotcompute.h | 3 +- sysboundary/ionosphere.cpp | 86 +++++++++++++++++++++--------- sysboundary/ionosphere.h | 3 +- sysboundary/outflow.cpp | 50 ++++++++++++++--- sysboundary/outflow.h | 3 +- sysboundary/project_boundary.cpp | 41 ++++++++++++-- sysboundary/project_boundary.h | 3 +- sysboundary/setbyuser.cpp | 40 ++++++++++++-- sysboundary/setbyuser.h | 3 +- sysboundary/sysboundary.cpp | 5 +- sysboundary/sysboundary.h | 3 +- sysboundary/sysboundarycondition.h | 4 +- 17 files changed, 241 insertions(+), 58 deletions(-) diff --git a/fieldsolver/gridGlue.cpp b/fieldsolver/gridGlue.cpp index 1f9631ae4..4b30d5489 100644 --- a/fieldsolver/gridGlue.cpp +++ b/fieldsolver/gridGlue.cpp @@ -659,7 +659,8 @@ void setupTechnicalFsGrid(dccrg::Dccrg& m fsgrids::technical* thisCellData = &transferBuffer[i]; // Data needs to be collected from some different places for this grid. - thisCellData->sysBoundaryFlag = mpiGrid[cells[i]]->sysBoundaryFlag; + // Boundary flags are now set on fsgrid in assignSysBoundary + // thisCellData->sysBoundaryFlag = mpiGrid[cells[i]]->sysBoundaryFlag; // Remove boundary layer copy here // thisCellData->sysBoundaryLayer = mpiGrid[cells[i]]->sysBoundaryLayer; thisCellData->maxFsDt = std::numeric_limits::max(); diff --git a/grid.cpp b/grid.cpp index 1e54a6b96..45c599d1c 100644 --- a/grid.cpp +++ b/grid.cpp @@ -168,7 +168,7 @@ void initializeGrids( // Initialise system boundary conditions (they need the initialised positions!!) phiprof::start("Classify cells (sys boundary conditions)"); - if(sysBoundaries.classifyCells(mpiGrid) == false) { + if(sysBoundaries.classifyCells(mpiGrid,technicalGrid) == false) { cerr << "(MAIN) ERROR: System boundary conditions were not set correctly." << endl; exit(1); } diff --git a/sysboundary/antisymmetric.cpp b/sysboundary/antisymmetric.cpp index 6b72e94da..b122d0bd0 100644 --- a/sysboundary/antisymmetric.cpp +++ b/sysboundary/antisymmetric.cpp @@ -93,7 +93,11 @@ namespace SBC { return true; } - bool Antisymmetric::assignSysBoundary(dccrg::Dccrg& mpiGrid) { + bool Antisymmetric::assignSysBoundary(dccrg::Dccrg& mpiGrid, + FsGrid< fsgrids::technical, 2> & technicalGrid) { + bool doAssign; + std::array isThisCellOnAFace; + const vector& cells = getLocalCells(); for (size_t c=0; csysBoundaryFlag == sysboundarytype::DO_NOT_COMPUTE) continue; @@ -105,12 +109,12 @@ namespace SBC { creal y = cellParams[CellParams::YCRD] + 0.5*dy; creal z = cellParams[CellParams::ZCRD] + 0.5*dz; - bool isThisCellOnAFace[6]; - determineFace(&isThisCellOnAFace[0], x, y, z, dx, dy, dz); + isThisCellOnAFace.fill(false); + determineFace(isThisCellOnAFace.data(), x, y, z, dx, dy, dz); // Comparison of the array defining which faces to use and the // array telling on which faces this cell is - bool doAssign = false; + doAssign = false; for (uint j=0; j<6; j++) doAssign = doAssign || (facesToProcess[j] && isThisCellOnAFace[j]); if (doAssign) { uint flag = getIndex(); @@ -121,6 +125,38 @@ namespace SBC { mpiGrid[cells[c]]->sysBoundaryFlag = flag; } } + + // Assign boundary flags to local fsgrid cells + const std::array gridDims(technicalGrid.getLocalSize()); + for (int k=0; kparameters[0]); + creal dx = cellParams[CellParams::DX] / pow(2,P::amrMaxSpatialRefLevel); + creal dy = cellParams[CellParams::DY] / pow(2,P::amrMaxSpatialRefLevel); + creal dz = cellParams[CellParams::DZ] / pow(2,P::amrMaxSpatialRefLevel); + creal x = coords[0] + 0.5 * dx; + creal y = coords[1] + 0.5 * dy; + creal z = coords[2] + 0.5 * dz; + + isThisCellOnAFace.fill(false); + doAssign = false; + + determineFace(isThisCellOnAFace.data(), x, y, z, dx, dy, dz); + for(uint j=0; j<6; j++) doAssign = doAssign || (facesToProcess[j] && isThisCellOnAFace[j]); + if(doAssign) { + if (y < Parameters::ymin+Parameters::dy_ini) { + technicalGrid.get(i,j,k)->sysBoundaryFlag = sysboundarytype::DO_NOT_COMPUTE; + } else { + technicalGrid.get(i,j,k)->sysBoundaryFlag = this->getIndex(); + } + } + } + } + } + return true; } diff --git a/sysboundary/antisymmetric.h b/sysboundary/antisymmetric.h index e365b1eb0..9f1218481 100644 --- a/sysboundary/antisymmetric.h +++ b/sysboundary/antisymmetric.h @@ -51,7 +51,8 @@ namespace SBC { creal& t, Project &project ); - virtual bool assignSysBoundary(dccrg::Dccrg& mpiGrid); + virtual bool assignSysBoundary(dccrg::Dccrg& mpiGrid, + FsGrid< fsgrids::technical, 2> & technicalGrid); virtual bool applyInitialState( const dccrg::Dccrg& mpiGrid, Project &project diff --git a/sysboundary/donotcompute.cpp b/sysboundary/donotcompute.cpp index f786a726d..3a8775729 100644 --- a/sysboundary/donotcompute.cpp +++ b/sysboundary/donotcompute.cpp @@ -48,7 +48,8 @@ namespace SBC { return true; } - bool DoNotCompute::assignSysBoundary(dccrg::Dccrg& ) { + bool DoNotCompute::assignSysBoundary(dccrg::Dccrg&, + FsGrid< fsgrids::technical, 2> & technicalGrid) { return true; } diff --git a/sysboundary/donotcompute.h b/sysboundary/donotcompute.h index 7cc178427..697efe3af 100644 --- a/sysboundary/donotcompute.h +++ b/sysboundary/donotcompute.h @@ -48,7 +48,8 @@ namespace SBC { creal& t, Project &project ); - virtual bool assignSysBoundary(dccrg::Dccrg& mpiGrid); + virtual bool assignSysBoundary(dccrg::Dccrg& mpiGrid, + FsGrid< fsgrids::technical, 2> & technicalGrid); virtual bool applyInitialState( const dccrg::Dccrg& mpiGrid, Project &project diff --git a/sysboundary/ionosphere.cpp b/sysboundary/ionosphere.cpp index 57f7f4468..aa37975c8 100644 --- a/sysboundary/ionosphere.cpp +++ b/sysboundary/ionosphere.cpp @@ -156,13 +156,44 @@ namespace SBC { return true; } + + Real getR(creal x,creal y,creal z, uint geometry, Real center[3]) { + + Real r; + + switch(geometry) { + case 0: + // infinity-norm, result is a diamond/square with diagonals aligned on the axes in 2D + r = fabs(x-center[0]) + fabs(y-center[1]) + fabs(z-center[2]); + break; + case 1: + // 1-norm, result is is a grid-aligned square in 2D + r = max(max(fabs(x-center[0]), fabs(y-center[1])), fabs(z-center[2])); + break; + case 2: + // 2-norm (Cartesian), result is a circle in 2D + r = sqrt((x-center[0])*(x-center[0]) + (y-center[1])*(y-center[1]) + (z-center[2])*(z-center[2])); + break; + case 3: + // 2-norm (Cartesian) cylinder aligned on y-axis + r = sqrt((x-center[0])*(x-center[0]) + (z-center[2])*(z-center[2])); + break; + default: + std::cerr << __FILE__ << ":" << __LINE__ << ":" << "ionosphere.geometry has to be 0, 1 or 2." << std::endl; + abort(); + } + + return r; + } - bool Ionosphere::assignSysBoundary(dccrg::Dccrg& mpiGrid) { + bool Ionosphere::assignSysBoundary(dccrg::Dccrg& mpiGrid, + FsGrid< fsgrids::technical, 2> & technicalGrid) { vector cells = mpiGrid.get_cells(); for(uint i=0; isysBoundaryFlag == sysboundarytype::DO_NOT_COMPUTE) { continue; } + creal* const cellParams = &(mpiGrid[cells[i]]->parameters[0]); creal dx = cellParams[CellParams::DX]; creal dy = cellParams[CellParams::DY]; @@ -170,34 +201,39 @@ namespace SBC { creal x = cellParams[CellParams::XCRD] + 0.5*dx; creal y = cellParams[CellParams::YCRD] + 0.5*dy; creal z = cellParams[CellParams::ZCRD] + 0.5*dz; - Real r; - switch(this->geometry) { - case 0: - // infinity-norm, result is a diamond/square with diagonals aligned on the axes in 2D - r = fabs(x-center[0]) + fabs(y-center[1]) + fabs(z-center[2]); - break; - case 1: - // 1-norm, result is is a grid-aligned square in 2D - r = max(max(fabs(x-center[0]), fabs(y-center[1])), fabs(z-center[2])); - break; - case 2: - // 2-norm (Cartesian), result is a circle in 2D - r = sqrt((x-center[0])*(x-center[0]) + (y-center[1])*(y-center[1]) + (z-center[2])*(z-center[2])); - break; - case 3: - // 2-norm (Cartesian) cylinder aligned on y-axis - r = sqrt((x-center[0])*(x-center[0]) + (z-center[2])*(z-center[2])); - break; - default: - std::cerr << __FILE__ << ":" << __LINE__ << ":" << "ionosphere.geometry has to be 0, 1 or 2." << std::endl; - abort(); - } - - if(r < this->radius) { + if(getR(x,y,z,this->geometry,this->center) < this->radius) { mpiGrid[cells[i]]->sysBoundaryFlag = this->getIndex(); } } + + // Assign boundary flags to local fsgrid cells + const std::array gridDims(technicalGrid.getLocalSize()); + for (int k=0; ksysBoundaryFlag == sysboundarytype::DO_NOT_COMPUTE) { + continue; + } + + creal* const cellParams = &(cell->parameters[0]); + creal dx = cellParams[CellParams::DX] / pow(2,P::amrMaxSpatialRefLevel); + creal dy = cellParams[CellParams::DY] / pow(2,P::amrMaxSpatialRefLevel); + creal dz = cellParams[CellParams::DZ] / pow(2,P::amrMaxSpatialRefLevel); + creal x = coords[0] + 0.5 * dx; + creal y = coords[1] + 0.5 * dy; + creal z = coords[2] + 0.5 * dz; + + if(getR(x,y,z,this->geometry,this->center) < this->radius) { + technicalGrid.get(i,j,k)->sysBoundaryFlag = this->getIndex(); + } + + } + } + } + return true; } diff --git a/sysboundary/ionosphere.h b/sysboundary/ionosphere.h index 6252376f2..62bbc2052 100644 --- a/sysboundary/ionosphere.h +++ b/sysboundary/ionosphere.h @@ -63,7 +63,8 @@ namespace SBC { creal& t, Project &project ); - virtual bool assignSysBoundary(dccrg::Dccrg& mpiGrid); + virtual bool assignSysBoundary(dccrg::Dccrg& mpiGrid, + FsGrid< fsgrids::technical, 2> & technicalGrid); virtual bool applyInitialState( const dccrg::Dccrg& mpiGrid, Project &project diff --git a/sysboundary/outflow.cpp b/sysboundary/outflow.cpp index 0feb76ecb..53ba9d706 100644 --- a/sysboundary/outflow.cpp +++ b/sysboundary/outflow.cpp @@ -214,11 +214,17 @@ namespace SBC { return true; } - bool Outflow::assignSysBoundary(dccrg::Dccrg& mpiGrid) { + bool Outflow::assignSysBoundary(dccrg::Dccrg& mpiGrid, + FsGrid< fsgrids::technical, 2> & technicalGrid) { + + bool doAssign; + std::array isThisCellOnAFace; + + // Assign boundary flags to local DCCRG cells vector cells = mpiGrid.get_cells(); - for(uint i = 0; i < cells.size(); i++) { - if(mpiGrid[cells[i]]->sysBoundaryFlag == sysboundarytype::DO_NOT_COMPUTE) continue; - creal* const cellParams = &(mpiGrid[cells[i]]->parameters[0]); + for(const auto& dccrgId : cells) { + if(mpiGrid[dccrgId]->sysBoundaryFlag == sysboundarytype::DO_NOT_COMPUTE) continue; + creal* const cellParams = &(mpiGrid[dccrgId]->parameters[0]); creal dx = cellParams[CellParams::DX]; creal dy = cellParams[CellParams::DY]; creal dz = cellParams[CellParams::DZ]; @@ -226,16 +232,44 @@ namespace SBC { creal y = cellParams[CellParams::YCRD] + 0.5*dy; creal z = cellParams[CellParams::ZCRD] + 0.5*dz; - bool isThisCellOnAFace[6]; - determineFace(&isThisCellOnAFace[0], x, y, z, dx, dy, dz); + isThisCellOnAFace.fill(false); + determineFace(isThisCellOnAFace.data(), x, y, z, dx, dy, dz); // Comparison of the array defining which faces to use and the array telling on which faces this cell is - bool doAssign = false; + doAssign = false; for(uint j=0; j<6; j++) doAssign = doAssign || (facesToProcess[j] && isThisCellOnAFace[j]); if(doAssign) { - mpiGrid[cells[i]]->sysBoundaryFlag = this->getIndex(); + mpiGrid[dccrgId]->sysBoundaryFlag = this->getIndex(); + } + } + + // Assign boundary flags to local fsgrid cells + const std::array gridDims(technicalGrid.getLocalSize()); + for (int k=0; kparameters[0]); + creal dx = cellParams[CellParams::DX] / pow(2,P::amrMaxSpatialRefLevel); + creal dy = cellParams[CellParams::DY] / pow(2,P::amrMaxSpatialRefLevel); + creal dz = cellParams[CellParams::DZ] / pow(2,P::amrMaxSpatialRefLevel); + creal x = coords[0] + 0.5*dx; + creal y = coords[1] + 0.5*dy; + creal z = coords[2] + 0.5*dz; + + isThisCellOnAFace.fill(false); + doAssign = false; + + determineFace(isThisCellOnAFace.data(), x, y, z, dx, dy, dz); + for(uint j=0; j<6; j++) doAssign = doAssign || (facesToProcess[j] && isThisCellOnAFace[j]); + if(doAssign) { + technicalGrid.get(i,j,k)->sysBoundaryFlag = this->getIndex(); + } + } } } + return true; } diff --git a/sysboundary/outflow.h b/sysboundary/outflow.h index 484222532..18b278ed2 100644 --- a/sysboundary/outflow.h +++ b/sysboundary/outflow.h @@ -63,7 +63,8 @@ namespace SBC { creal& t, Project &project ); - virtual bool assignSysBoundary(dccrg::Dccrg& mpiGrid); + virtual bool assignSysBoundary(dccrg::Dccrg& mpiGrid, + FsGrid< fsgrids::technical, 2> & technicalGrid); virtual bool applyInitialState( const dccrg::Dccrg& mpiGrid, Project &project diff --git a/sysboundary/project_boundary.cpp b/sysboundary/project_boundary.cpp index bb9f9432d..3db21d428 100644 --- a/sysboundary/project_boundary.cpp +++ b/sysboundary/project_boundary.cpp @@ -92,7 +92,12 @@ namespace SBC { return success; } - bool ProjectBoundary::assignSysBoundary(dccrg::Dccrg& mpiGrid) { + bool ProjectBoundary::assignSysBoundary(dccrg::Dccrg& mpiGrid, + FsGrid< fsgrids::technical, 2> & technicalGrid) { + + bool doAssign; + std::array isThisCellOnAFace; + vector cells = mpiGrid.get_cells(); for(uint i = 0; i < cells.size(); i++) { if(mpiGrid[cells[i]]->sysBoundaryFlag == sysboundarytype::DO_NOT_COMPUTE) continue; @@ -104,15 +109,43 @@ namespace SBC { creal y = cellParams[CellParams::YCRD] + 0.5*dy; creal z = cellParams[CellParams::ZCRD] + 0.5*dz; - bool isThisCellOnAFace[6]; - determineFace(&isThisCellOnAFace[0], x, y, z, dx, dy, dz); + isThisCellOnAFace.fill(false); + determineFace(isThisCellOnAFace.data(), x, y, z, dx, dy, dz); // Comparison of the array defining which faces to use and the array telling on which faces this cell is - bool doAssign = false; + doAssign = false; for(uint j=0; j<6; j++) doAssign = doAssign || (facesToProcess[j] && isThisCellOnAFace[j]); if(doAssign) { mpiGrid[cells[i]]->sysBoundaryFlag = this->getIndex(); } } + + // Assign boundary flags to local fsgrid cells + const std::array gridDims(technicalGrid.getLocalSize()); + for (int k=0; kparameters[0]); + creal dx = cellParams[CellParams::DX] / pow(2,P::amrMaxSpatialRefLevel); + creal dy = cellParams[CellParams::DY] / pow(2,P::amrMaxSpatialRefLevel); + creal dz = cellParams[CellParams::DZ] / pow(2,P::amrMaxSpatialRefLevel); + creal x = coords[0] + 0.5*dx; + creal y = coords[1] + 0.5*dy; + creal z = coords[2] + 0.5*dz; + + isThisCellOnAFace.fill(false); + doAssign = false; + + determineFace(isThisCellOnAFace.data(), x, y, z, dx, dy, dz); + for(uint j=0; j<6; j++) doAssign = doAssign || (facesToProcess[j] && isThisCellOnAFace[j]); + if(doAssign) { + technicalGrid.get(i,j,k)->sysBoundaryFlag = this->getIndex(); + } + } + } + } + return true; } diff --git a/sysboundary/project_boundary.h b/sysboundary/project_boundary.h index cfa4d4a1e..510479207 100644 --- a/sysboundary/project_boundary.h +++ b/sysboundary/project_boundary.h @@ -52,7 +52,8 @@ namespace SBC { creal& t, Project &project ); - virtual bool assignSysBoundary(dccrg::Dccrg& mpiGrid); + virtual bool assignSysBoundary(dccrg::Dccrg& mpiGrid, + FsGrid< fsgrids::technical, 2> & technicalGrid); virtual bool applyInitialState( const dccrg::Dccrg& mpiGrid, Project &project diff --git a/sysboundary/setbyuser.cpp b/sysboundary/setbyuser.cpp index d1fc44613..37ee47e47 100644 --- a/sysboundary/setbyuser.cpp +++ b/sysboundary/setbyuser.cpp @@ -78,7 +78,11 @@ namespace SBC { return success; } - bool SetByUser::assignSysBoundary(dccrg::Dccrg& mpiGrid) { + bool SetByUser::assignSysBoundary(dccrg::Dccrg& mpiGrid, + FsGrid< fsgrids::technical, 2> & technicalGrid) { + bool doAssign; + std::array isThisCellOnAFace; + vector cells = mpiGrid.get_cells(); for(uint i = 0; i < cells.size(); i++) { if(mpiGrid[cells[i]]->sysBoundaryFlag == sysboundarytype::DO_NOT_COMPUTE) continue; @@ -90,15 +94,43 @@ namespace SBC { creal y = cellParams[CellParams::YCRD] + 0.5*dy; creal z = cellParams[CellParams::ZCRD] + 0.5*dz; - bool isThisCellOnAFace[6]; - determineFace(&isThisCellOnAFace[0], x, y, z, dx, dy, dz); + isThisCellOnAFace.fill(false); + determineFace(isThisCellOnAFace.data(), x, y, z, dx, dy, dz); // Comparison of the array defining which faces to use and the array telling on which faces this cell is - bool doAssign = false; + doAssign = false; for(uint j=0; j<6; j++) doAssign = doAssign || (facesToProcess[j] && isThisCellOnAFace[j]); if(doAssign) { mpiGrid[cells[i]]->sysBoundaryFlag = this->getIndex(); } } + + // Assign boundary flags to local fsgrid cells + const std::array gridDims(technicalGrid.getLocalSize()); + for (int k=0; kparameters[0]); + creal dx = cellParams[CellParams::DX] / pow(2,P::amrMaxSpatialRefLevel); + creal dy = cellParams[CellParams::DY] / pow(2,P::amrMaxSpatialRefLevel); + creal dz = cellParams[CellParams::DZ] / pow(2,P::amrMaxSpatialRefLevel); + creal x = coords[0] + 0.5*dx; + creal y = coords[1] + 0.5*dy; + creal z = coords[2] + 0.5*dz; + + isThisCellOnAFace.fill(false); + doAssign = false; + + determineFace(isThisCellOnAFace.data(), x, y, z, dx, dy, dz); + for(uint j=0; j<6; j++) doAssign = doAssign || (facesToProcess[j] && isThisCellOnAFace[j]); + if(doAssign) { + technicalGrid.get(i,j,k)->sysBoundaryFlag = this->getIndex(); + } + } + } + } + return true; } diff --git a/sysboundary/setbyuser.h b/sysboundary/setbyuser.h index 56a24f8ea..9a5d0881c 100644 --- a/sysboundary/setbyuser.h +++ b/sysboundary/setbyuser.h @@ -68,7 +68,8 @@ namespace SBC { creal& t, Project &project ); - virtual bool assignSysBoundary(dccrg::Dccrg& mpiGrid); + virtual bool assignSysBoundary(dccrg::Dccrg& mpiGrid, + FsGrid< fsgrids::technical, 2> & technicalGrid); virtual bool applyInitialState( const dccrg::Dccrg& mpiGrid, Project &project diff --git a/sysboundary/sysboundary.cpp b/sysboundary/sysboundary.cpp index 882659a7c..de2999be9 100644 --- a/sysboundary/sysboundary.cpp +++ b/sysboundary/sysboundary.cpp @@ -389,7 +389,8 @@ bool SysBoundary::checkRefinement(dccrg::Dccrg& mpiGrid) { +bool SysBoundary::classifyCells(dccrg::Dccrg& mpiGrid, + FsGrid< fsgrids::technical, 2> & technicalGrid) { bool success = true; vector cells = mpiGrid.get_cells(); @@ -405,7 +406,7 @@ bool SysBoundary::classifyCells(dccrg::Dccrg::iterator it; for (it = sysBoundaries.begin(); it != sysBoundaries.end(); it++) { - success = success && (*it)->assignSysBoundary(mpiGrid); + success = success && (*it)->assignSysBoundary(mpiGrid,technicalGrid); } // communicate boundary assignments (sysBoundaryFlag and diff --git a/sysboundary/sysboundary.h b/sysboundary/sysboundary.h index 7c29a5c21..bc6387b54 100644 --- a/sysboundary/sysboundary.h +++ b/sysboundary/sysboundary.h @@ -68,7 +68,8 @@ class SysBoundary { creal& t ); bool checkRefinement(dccrg::Dccrg& mpiGrid); - bool classifyCells(dccrg::Dccrg& mpiGrid); + bool classifyCells(dccrg::Dccrg& mpiGrid, + FsGrid< fsgrids::technical, 2> & technicalGrid); bool applyInitialState( dccrg::Dccrg& mpiGrid, Project& project diff --git a/sysboundary/sysboundarycondition.h b/sysboundary/sysboundarycondition.h index f6ccff8a8..855a9a0a4 100644 --- a/sysboundary/sysboundarycondition.h +++ b/sysboundary/sysboundarycondition.h @@ -65,7 +65,9 @@ namespace SBC { creal& t, Project &project )=0; - virtual bool assignSysBoundary(dccrg::Dccrg& mpiGrid)=0; + virtual bool assignSysBoundary(dccrg::Dccrg& mpiGrid, + FsGrid< fsgrids::technical, 2> & technicalGrid)=0; virtual bool applyInitialState( const dccrg::Dccrg& mpiGrid, Project &project From 8034f037677584b1a96ea89695c21b1b3e8d1e68 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Thu, 2 May 2019 11:21:56 +0300 Subject: [PATCH 360/602] Removed sysBoundaryLayerNew stuff --- spatial_cell.cpp | 3 -- sysboundary/sysboundary.cpp | 67 ------------------------------------- 2 files changed, 70 deletions(-) diff --git a/spatial_cell.cpp b/spatial_cell.cpp index 65d3b95a9..12d59952a 100644 --- a/spatial_cell.cpp +++ b/spatial_cell.cpp @@ -81,7 +81,6 @@ namespace spatial_cell { SpatialCell::SpatialCell(const SpatialCell& other): sysBoundaryFlag(other.sysBoundaryFlag), sysBoundaryLayer(other.sysBoundaryLayer), - sysBoundaryLayerNew(other.sysBoundaryLayerNew), velocity_block_with_content_list(other.velocity_block_with_content_list), velocity_block_with_no_content_list(other.velocity_block_with_no_content_list), initialized(other.initialized), @@ -792,8 +791,6 @@ namespace spatial_cell { block_lengths.push_back(sizeof(uint)); displacements.push_back((uint8_t*) &(this->sysBoundaryLayer) - (uint8_t*) this); block_lengths.push_back(sizeof(uint)); - displacements.push_back((uint8_t*) &(this->sysBoundaryLayerNew) - (uint8_t*) this); - block_lengths.push_back(sizeof(int)); } if ((SpatialCell::mpi_transfer_type & Transfer::VEL_BLOCK_PARAMETERS) !=0) { diff --git a/sysboundary/sysboundary.cpp b/sysboundary/sysboundary.cpp index de2999be9..d97b2548b 100644 --- a/sysboundary/sysboundary.cpp +++ b/sysboundary/sysboundary.cpp @@ -414,73 +414,6 @@ bool SysBoundary::classifyCells(dccrg::Dccrg* nbrs = mpiGrid.get_neighbors_of(cells[c],SYSBOUNDARIES_NEIGHBORHOOD_ID); - for (size_t n=0; nsize(); ++n) { - if ((*nbrs)[n] == 0) continue; - if (mpiGrid[(*nbrs)[n]]->sysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY) hasNormalNbrs = true; - if (mpiGrid[(*nbrs)[n]]->sysBoundaryFlag != sysboundarytype::NOT_SYSBOUNDARY) hasBndryNbrs = true; - }*/ - - Real xmin = mpiGrid[cells[c]]->get_cell_parameters()[CellParams::XCRD]; - Real ymin = mpiGrid[cells[c]]->get_cell_parameters()[CellParams::YCRD]; - Real zmin = mpiGrid[cells[c]]->get_cell_parameters()[CellParams::ZCRD]; - Real dx = mpiGrid[cells[c]]->get_cell_parameters()[CellParams::DX]; - Real dy = mpiGrid[cells[c]]->get_cell_parameters()[CellParams::DY]; - Real dz = mpiGrid[cells[c]]->get_cell_parameters()[CellParams::DZ]; - - Real x_cen = xmin + 0.5*dx; - Real y_cen = ymin + 0.5*dy; - Real z_cen = zmin + 0.5*dz; - - Real x_bndr = 900.0; - Real y_bndr = 700.0; - - bool xoutside = false; - bool youtside = false; - if (x_cen < -x_bndr) xoutside = true; - if (x_cen > +x_bndr) xoutside = true; - if (y_cen < -y_bndr) youtside = true; - if (y_cen > +y_bndr) youtside = true; - if (xoutside == true || youtside == true) mpiGrid[cells[c]]->sysBoundaryLayerNew = -2; - else mpiGrid[cells[c]]->sysBoundaryLayerNew = +2; - - if (x_cen <= -x_bndr && x_cen+0.6*dx > -x_bndr && youtside == false) mpiGrid[cells[c]]->sysBoundaryLayerNew = -1; - if (x_cen >= +x_bndr && x_cen-0.6*dx < +x_bndr && youtside == false) mpiGrid[cells[c]]->sysBoundaryLayerNew = -1; - if (x_cen >= -x_bndr && x_cen-0.6*dx < -x_bndr && youtside == false) mpiGrid[cells[c]]->sysBoundaryLayerNew = +1; - if (x_cen <= +x_bndr && x_cen+0.6*dx > +x_bndr && youtside == false) mpiGrid[cells[c]]->sysBoundaryLayerNew = +1; - - if (y_cen <= -y_bndr && y_cen+0.6*dy > -y_bndr && xoutside == false) mpiGrid[cells[c]]->sysBoundaryLayerNew = -1; - if (y_cen >= +y_bndr && y_cen-0.6*dy < +y_bndr && xoutside == false) mpiGrid[cells[c]]->sysBoundaryLayerNew = -1; - if (y_cen >= -y_bndr && y_cen-0.6*dy < -y_bndr && xoutside == false) mpiGrid[cells[c]]->sysBoundaryLayerNew = +1; - if (y_cen <= +y_bndr && y_cen+0.6*dy > +y_bndr && xoutside == false) mpiGrid[cells[c]]->sysBoundaryLayerNew = +1; - - if ( (x_cen <= -x_bndr && x_cen+0.6*dx > -x_bndr) - && (y_cen <= -y_bndr && y_cen+0.6*dy > -y_bndr)) mpiGrid[cells[c]]->sysBoundaryLayerNew = -1; - if ( (x_cen <= -x_bndr && x_cen+0.6*dx > -x_bndr) - && (y_cen >= +y_bndr && y_cen-0.6*dy < +y_bndr)) mpiGrid[cells[c]]->sysBoundaryLayerNew = -1; - if ( (x_cen >= +x_bndr && x_cen-0.6*dx < +x_bndr) - && (y_cen <= -y_bndr && y_cen+0.6*dy > -y_bndr)) mpiGrid[cells[c]]->sysBoundaryLayerNew = -1; - if ( (x_cen >= +x_bndr && x_cen-0.6*dx < +x_bndr) - && (y_cen >= +y_bndr && y_cen-0.6*dy < +y_bndr)) mpiGrid[cells[c]]->sysBoundaryLayerNew = -1; - - /* - if (mpiGrid[cells[c]]->sysBoundaryFlag != sysboundarytype::NOT_SYSBOUNDARY) { - // Cell inside system boundary, if it touches the interface it gets value -1. - // Otherwise it gets value -2. - if (hasNormalNbrs == true) mpiGrid[cells[c]]->sysBoundaryLayerNew = -1; - else mpiGrid[cells[c]]->sysBoundaryLayerNew = -2; - } else { - // Cell inside simulation domain, if it touches the interface it gets value +2. - // Otherwise it gets value +2. - if (hasBndryNbrs == true) mpiGrid[cells[c]]->sysBoundaryLayerNew = 1; - else mpiGrid[cells[c]]->sysBoundaryLayerNew = 2; - }*/ - } - // set distance 1 cells to boundary cells, that have neighbors which are normal cells for(uint i=0; isysBoundaryLayer=0; /*Initial value*/ From e38f0c86eb45209e9e6d3d508c7f322205e08c81 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Thu, 2 May 2019 12:00:04 +0300 Subject: [PATCH 361/602] Fix to crash due to remote dccrg cells not being found. We do not actually need the cell pointer, only the refinement level. That should be available for remote cells. Added an error check if this is not the case, comments in dccrg are a bit unclear. --- sysboundary/antisymmetric.cpp | 19 +++++++++++-------- sysboundary/ionosphere.cpp | 21 ++++++++++----------- sysboundary/outflow.cpp | 19 +++++++++++-------- sysboundary/project_boundary.cpp | 19 +++++++++++-------- sysboundary/setbyuser.cpp | 23 +++++++++++++---------- 5 files changed, 56 insertions(+), 45 deletions(-) diff --git a/sysboundary/antisymmetric.cpp b/sysboundary/antisymmetric.cpp index b122d0bd0..26639b99d 100644 --- a/sysboundary/antisymmetric.cpp +++ b/sysboundary/antisymmetric.cpp @@ -132,14 +132,17 @@ namespace SBC { for (int j=0; jparameters[0]); - creal dx = cellParams[CellParams::DX] / pow(2,P::amrMaxSpatialRefLevel); - creal dy = cellParams[CellParams::DY] / pow(2,P::amrMaxSpatialRefLevel); - creal dz = cellParams[CellParams::DZ] / pow(2,P::amrMaxSpatialRefLevel); - creal x = coords[0] + 0.5 * dx; - creal y = coords[1] + 0.5 * dy; - creal z = coords[2] + 0.5 * dz; + const auto refLvl = mpiGrid.get_refinement_level(mpiGrid.get_existing_cell(coords)); + if(refLvl == -1) { + cerr << "Error, could not get refinement level of remote DCCRG cell " << __FILE__ << " " << __LINE__ << endl; + } + + creal dx = P::dx_ini * pow(2,-refLvl); + creal dy = P::dy_ini * pow(2,-refLvl); + creal dz = P::dz_ini * pow(2,-refLvl); + creal x = coords[0] + 0.5 * P::dx_ini * pow(2,-P::amrMaxSpatialRefLevel); + creal y = coords[1] + 0.5 * P::dy_ini * pow(2,-P::amrMaxSpatialRefLevel); + creal z = coords[2] + 0.5 * P::dz_ini * pow(2,-P::amrMaxSpatialRefLevel); isThisCellOnAFace.fill(false); doAssign = false; diff --git a/sysboundary/ionosphere.cpp b/sysboundary/ionosphere.cpp index aa37975c8..69fe7f0b1 100644 --- a/sysboundary/ionosphere.cpp +++ b/sysboundary/ionosphere.cpp @@ -213,18 +213,17 @@ namespace SBC { for (int j=0; jsysBoundaryFlag == sysboundarytype::DO_NOT_COMPUTE) { - continue; + const auto refLvl = mpiGrid.get_refinement_level(mpiGrid.get_existing_cell(coords)); + if(refLvl == -1) { + cerr << "Error, could not get refinement level of remote DCCRG cell " << __FILE__ << " " << __LINE__ << endl; } - - creal* const cellParams = &(cell->parameters[0]); - creal dx = cellParams[CellParams::DX] / pow(2,P::amrMaxSpatialRefLevel); - creal dy = cellParams[CellParams::DY] / pow(2,P::amrMaxSpatialRefLevel); - creal dz = cellParams[CellParams::DZ] / pow(2,P::amrMaxSpatialRefLevel); - creal x = coords[0] + 0.5 * dx; - creal y = coords[1] + 0.5 * dy; - creal z = coords[2] + 0.5 * dz; + + creal dx = P::dx_ini * pow(2,-refLvl); + creal dy = P::dy_ini * pow(2,-refLvl); + creal dz = P::dz_ini * pow(2,-refLvl); + creal x = coords[0] + 0.5 * P::dx_ini * pow(2,-P::amrMaxSpatialRefLevel); + creal y = coords[1] + 0.5 * P::dy_ini * pow(2,-P::amrMaxSpatialRefLevel); + creal z = coords[2] + 0.5 * P::dz_ini * pow(2,-P::amrMaxSpatialRefLevel); if(getR(x,y,z,this->geometry,this->center) < this->radius) { technicalGrid.get(i,j,k)->sysBoundaryFlag = this->getIndex(); diff --git a/sysboundary/outflow.cpp b/sysboundary/outflow.cpp index 53ba9d706..2159ca14f 100644 --- a/sysboundary/outflow.cpp +++ b/sysboundary/outflow.cpp @@ -249,14 +249,17 @@ namespace SBC { for (int j=0; jparameters[0]); - creal dx = cellParams[CellParams::DX] / pow(2,P::amrMaxSpatialRefLevel); - creal dy = cellParams[CellParams::DY] / pow(2,P::amrMaxSpatialRefLevel); - creal dz = cellParams[CellParams::DZ] / pow(2,P::amrMaxSpatialRefLevel); - creal x = coords[0] + 0.5*dx; - creal y = coords[1] + 0.5*dy; - creal z = coords[2] + 0.5*dz; + const auto refLvl = mpiGrid.get_refinement_level(mpiGrid.get_existing_cell(coords)); + if(refLvl == -1) { + cerr << "Error, could not get refinement level of remote DCCRG cell " << __FILE__ << " " << __LINE__ << endl; + } + + creal dx = P::dx_ini * pow(2,-refLvl); + creal dy = P::dy_ini * pow(2,-refLvl); + creal dz = P::dz_ini * pow(2,-refLvl); + creal x = coords[0] + 0.5 * P::dx_ini * pow(2,-P::amrMaxSpatialRefLevel); + creal y = coords[1] + 0.5 * P::dy_ini * pow(2,-P::amrMaxSpatialRefLevel); + creal z = coords[2] + 0.5 * P::dz_ini * pow(2,-P::amrMaxSpatialRefLevel); isThisCellOnAFace.fill(false); doAssign = false; diff --git a/sysboundary/project_boundary.cpp b/sysboundary/project_boundary.cpp index 3db21d428..e260b272a 100644 --- a/sysboundary/project_boundary.cpp +++ b/sysboundary/project_boundary.cpp @@ -125,14 +125,17 @@ namespace SBC { for (int j=0; jparameters[0]); - creal dx = cellParams[CellParams::DX] / pow(2,P::amrMaxSpatialRefLevel); - creal dy = cellParams[CellParams::DY] / pow(2,P::amrMaxSpatialRefLevel); - creal dz = cellParams[CellParams::DZ] / pow(2,P::amrMaxSpatialRefLevel); - creal x = coords[0] + 0.5*dx; - creal y = coords[1] + 0.5*dy; - creal z = coords[2] + 0.5*dz; + const auto refLvl = mpiGrid.get_refinement_level(mpiGrid.get_existing_cell(coords)); + if(refLvl == -1) { + cerr << "Error, could not get refinement level of remote DCCRG cell " << __FILE__ << " " << __LINE__ << endl; + } + + creal dx = P::dx_ini * pow(2,-refLvl); + creal dy = P::dy_ini * pow(2,-refLvl); + creal dz = P::dz_ini * pow(2,-refLvl); + creal x = coords[0] + 0.5 * P::dx_ini * pow(2,-P::amrMaxSpatialRefLevel); + creal y = coords[1] + 0.5 * P::dy_ini * pow(2,-P::amrMaxSpatialRefLevel); + creal z = coords[2] + 0.5 * P::dz_ini * pow(2,-P::amrMaxSpatialRefLevel); isThisCellOnAFace.fill(false); doAssign = false; diff --git a/sysboundary/setbyuser.cpp b/sysboundary/setbyuser.cpp index 37ee47e47..1d5181591 100644 --- a/sysboundary/setbyuser.cpp +++ b/sysboundary/setbyuser.cpp @@ -105,20 +105,23 @@ namespace SBC { } // Assign boundary flags to local fsgrid cells - const std::array gridDims(technicalGrid.getLocalSize()); + const std::array gridDims(technicalGrid.getLocalSize()); for (int k=0; kparameters[0]); - creal dx = cellParams[CellParams::DX] / pow(2,P::amrMaxSpatialRefLevel); - creal dy = cellParams[CellParams::DY] / pow(2,P::amrMaxSpatialRefLevel); - creal dz = cellParams[CellParams::DZ] / pow(2,P::amrMaxSpatialRefLevel); - creal x = coords[0] + 0.5*dx; - creal y = coords[1] + 0.5*dy; - creal z = coords[2] + 0.5*dz; + const auto coords = technicalGrid.getPhysicalCoords(i,j,k); + const auto refLvl = mpiGrid.get_refinement_level(mpiGrid.get_existing_cell(coords)); + if(refLvl == -1) { + cerr << "Error, could not get refinement level of remote DCCRG cell " << __FILE__ << " " << __LINE__ << endl; + } + creal dx = P::dx_ini * pow(2,-refLvl); + creal dy = P::dy_ini * pow(2,-refLvl); + creal dz = P::dz_ini * pow(2,-refLvl); + creal x = coords[0] + 0.5 * P::dx_ini * pow(2,-P::amrMaxSpatialRefLevel); + creal y = coords[1] + 0.5 * P::dy_ini * pow(2,-P::amrMaxSpatialRefLevel); + creal z = coords[2] + 0.5 * P::dz_ini * pow(2,-P::amrMaxSpatialRefLevel); + isThisCellOnAFace.fill(false); doAssign = false; From 49cb62a440fb188a16f3b583a07d5a84960d0ec4 Mon Sep 17 00:00:00 2001 From: ykempf Date: Thu, 2 May 2019 12:12:17 +0300 Subject: [PATCH 362/602] Removed setupTechnicalGrid and fixed layer/boundary flag initialisation on fsgrid. --- fieldsolver/gridGlue.cpp | 114 --------------------------- fieldsolver/gridGlue.hpp | 10 --- grid.cpp | 6 -- sysboundary/sysboundary.cpp | 148 ++++++++++++++++++++---------------- 4 files changed, 82 insertions(+), 196 deletions(-) diff --git a/fieldsolver/gridGlue.cpp b/fieldsolver/gridGlue.cpp index 4b30d5489..41b91a78a 100644 --- a/fieldsolver/gridGlue.cpp +++ b/fieldsolver/gridGlue.cpp @@ -612,120 +612,6 @@ void getDerivativesFromFsGrid( } } -bool belongsToLayer(const int layer, const int x, const int y, const int z, - FsGrid< fsgrids::technical, 2>& technicalGrid) { - - bool belongs = false; - - // loop through all neighbors (including diagonals) - for (int ix = -1; ix <= 1; ++ix) { - for (int iy = -1; iy <= 1; ++iy) { - for (int iz = -1; iz <= 1; ++iz) { - - // not strictly necessary but logically we should not consider the cell itself - // among its neighbors. - if( ix == 0 && iy == 0 && iz == 0 || !technicalGrid.get(x+ix,y+iy,z+iz)) { - continue; - } - - if(layer == 1 && technicalGrid.get(x+ix,y+iy,z+iz)->sysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY) { - // in the first layer, boundary cell belongs if it has a non-boundary neighbor - belongs = true; - return belongs; - - } else if (layer > 1 && technicalGrid.get(x+ix,y+iy,z+iz)->sysBoundaryLayer == layer - 1) { - // in all other layers, boundary cell belongs if it has a neighbor in the previous layer - belongs = true; - return belongs; - } - } - } - } - - return belongs; -} - -void setupTechnicalFsGrid(dccrg::Dccrg& mpiGrid, - const std::vector& cells, FsGrid< fsgrids::technical, 2>& technicalGrid) { - - cint nCellsOnMaxRefLvl = getNumberOfCellsOnMaxRefLvl(mpiGrid, cells); - technicalGrid.setupForTransferIn(nCellsOnMaxRefLvl); - - // Setup transfer buffers - std::vector< fsgrids::technical > transferBuffer(cells.size()); - -#pragma omp parallel for - for(uint i = 0; i < cells.size(); ++i) { - - fsgrids::technical* thisCellData = &transferBuffer[i]; - // Data needs to be collected from some different places for this grid. - // Boundary flags are now set on fsgrid in assignSysBoundary - // thisCellData->sysBoundaryFlag = mpiGrid[cells[i]]->sysBoundaryFlag; - // Remove boundary layer copy here - // thisCellData->sysBoundaryLayer = mpiGrid[cells[i]]->sysBoundaryLayer; - thisCellData->maxFsDt = std::numeric_limits::max(); - } - - for(uint i = 0; i < cells.size(); ++i) { - - const auto fsgridIds = mapDccrgIdToFsGridGlobalID(mpiGrid, cells[i]); - - for (auto fsgridId : fsgridIds) { - // std::cout << "fsgridId: " << fsgridId << ", fsgrid Cell Coordinates:"; - // auto coords = technicalGrid.globalIDtoCellCoord(fsgridId); - // for (auto coord : coords) std::cout << " " << coord; - // std::cout << std::endl; - technicalGrid.transferDataIn(fsgridId,&transferBuffer[i]); - } - } - - technicalGrid.finishTransfersIn(); - - auto localSize = technicalGrid.getLocalSize(); - - // Add layer calculation here. Include diagonals +-1. - - // Initialize boundary layer flags to 0. -#pragma omp parallel for collapse(3) - for (int x = 0; x < localSize[0]; ++x) { - for (int y = 0; y < localSize[1]; ++y) { - for (int z = 0; z < localSize[2]; ++z) { - technicalGrid.get(x,y,z)->sysBoundaryLayer = 0; - } - } - } - - // In dccrg initialization the max number of boundary layers is set to 3. - const int MAX_NUMBER_OF_BOUNDARY_LAYERS = 3 * pow(2,mpiGrid.get_maximum_refinement_level()); - - // loop through max number of layers - for(uint layer = 1; layer <= MAX_NUMBER_OF_BOUNDARY_LAYERS; ++layer) { - - // loop through all cells in grid -#pragma omp parallel for collapse(3) - for (int x = 0; x < localSize[0]; ++x) { - for (int y = 0; y < localSize[1]; ++y) { - for (int z = 0; z < localSize[2]; ++z) { - - // for the first layer, consider all cells that belong to a boundary, for other layers - // consider all cells that have not yet been labeled. - if((layer == 1 && technicalGrid.get(x,y,z)->sysBoundaryFlag != sysboundarytype::NOT_SYSBOUNDARY) || - (layer > 1 && technicalGrid.get(x,y,z)->sysBoundaryLayer == 0)) { - - if (belongsToLayer(layer, x, y, z, technicalGrid)) { - - technicalGrid.get(x,y,z)->sysBoundaryLayer = layer; - - if (layer > 2 && technicalGrid.get(x,y,z)->sysBoundaryFlag != sysboundarytype::NOT_SYSBOUNDARY) { - technicalGrid.get(x,y,z)->sysBoundaryFlag = sysboundarytype::DO_NOT_COMPUTE; - } - } - } - } - } - } - } -} /* Map from dccrg cell id to fsgrid global cell ids when they aren't identical (ie. when dccrg has refinement). diff --git a/fieldsolver/gridGlue.hpp b/fieldsolver/gridGlue.hpp index ab01d7fff..0b085d2e5 100644 --- a/fieldsolver/gridGlue.hpp +++ b/fieldsolver/gridGlue.hpp @@ -60,16 +60,6 @@ void getDerivativesFromFsGrid( const std::vector& cells ); -/*! Transfer data into technical grid (boundary info etc.) - * \param mpiGrid The DCCRG grid carrying rho, rhoV and P - * \param cells List of local cells - * \param technicalGrid the target Fieldsolver grid for this information - * - * This function assumes that proper grid coupling has been set up. - */ -void setupTechnicalFsGrid(dccrg::Dccrg& mpiGrid, - const std::vector& cells, FsGrid< fsgrids::technical, 2>& technicalGrid); - int getNumberOfCellsOnMaxRefLvl(dccrg::Dccrg& mpiGrid, const std::vector& cells); diff --git a/grid.cpp b/grid.cpp index 45c599d1c..ef16201c7 100644 --- a/grid.cpp +++ b/grid.cpp @@ -307,12 +307,6 @@ void initializeGrids( technicalGrid.finishGridCoupling(); phiprof::stop("Initial fsgrid coupling"); - phiprof::start("setupTechnicalFsGrid"); - setupTechnicalFsGrid(mpiGrid, cells, technicalGrid); - - technicalGrid.updateGhostCells(); - phiprof::stop("setupTechnicalFsGrid"); - phiprof::start("setProjectBField"); project.setProjectBField(perBGrid, BgBGrid, technicalGrid); perBGrid.updateGhostCells(); diff --git a/sysboundary/sysboundary.cpp b/sysboundary/sysboundary.cpp index de2999be9..7b76b8214 100644 --- a/sysboundary/sysboundary.cpp +++ b/sysboundary/sysboundary.cpp @@ -382,6 +382,39 @@ bool SysBoundary::checkRefinement(dccrg::Dccrg& technicalGrid) { + + bool belongs = false; + + // loop through all neighbors (including diagonals) + for (int ix = -1; ix <= 1; ++ix) { + for (int iy = -1; iy <= 1; ++iy) { + for (int iz = -1; iz <= 1; ++iz) { + + // not strictly necessary but logically we should not consider the cell itself + // among its neighbors. + if( ix == 0 && iy == 0 && iz == 0 || !technicalGrid.get(x+ix,y+iy,z+iz)) { + continue; + } + + if(layer == 1 && technicalGrid.get(x+ix,y+iy,z+iz)->sysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY) { + // in the first layer, boundary cell belongs if it has a non-boundary neighbor + belongs = true; + return belongs; + + } else if (layer > 1 && technicalGrid.get(x+ix,y+iy,z+iz)->sysBoundaryLayer == layer - 1) { + // in all other layers, boundary cell belongs if it has a neighbor in the previous layer + belongs = true; + return belongs; + } + } + } + } + + return belongs; +} + /*!\brief Classify all simulation cells with respect to the system boundary conditions. * * Loops through all cells and and for each assigns the correct sysBoundaryFlag depending on @@ -393,11 +426,23 @@ bool SysBoundary::classifyCells(dccrg::Dccrg & technicalGrid) { bool success = true; vector cells = mpiGrid.get_cells(); + auto localSize = technicalGrid.getLocalSize(); /*set all cells to default value, not_sysboundary*/ for(uint i=0; isysBoundaryFlag = sysboundarytype::NOT_SYSBOUNDARY; } + #pragma omp parallel for collapse(3) + for (int x = 0; x < localSize[0]; ++x) { + for (int y = 0; y < localSize[1]; ++y) { + for (int z = 0; z < localSize[2]; ++z) { + technicalGrid.get(x,y,z)->sysBoundaryFlag = sysboundarytype::NOT_SYSBOUNDARY; + technicalGrid.get(x,y,z)->sysBoundaryLayer = 0; + technicalGrid.get(x,y,z)->maxFsDt = std::numeric_limits::max(); + } + } + } + /* loop through sysboundaries and let all sysboundaries set in local cells if they are part of which sysboundary (cell location needs to @@ -408,78 +453,14 @@ bool SysBoundary::classifyCells(dccrg::DccrgassignSysBoundary(mpiGrid,technicalGrid); } + // communicate boundary assignments (sysBoundaryFlag and // sysBoundaryLayer communicated) SpatialCell::set_mpi_transfer_type(Transfer::CELL_SYSBOUNDARYFLAG); mpiGrid.update_copies_of_remote_neighbors(SYSBOUNDARIES_NEIGHBORHOOD_ID); - // Compute distances to system boundaries according to the new classification - for (size_t c=0; c* nbrs = mpiGrid.get_neighbors_of(cells[c],SYSBOUNDARIES_NEIGHBORHOOD_ID); - for (size_t n=0; nsize(); ++n) { - if ((*nbrs)[n] == 0) continue; - if (mpiGrid[(*nbrs)[n]]->sysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY) hasNormalNbrs = true; - if (mpiGrid[(*nbrs)[n]]->sysBoundaryFlag != sysboundarytype::NOT_SYSBOUNDARY) hasBndryNbrs = true; - }*/ - - Real xmin = mpiGrid[cells[c]]->get_cell_parameters()[CellParams::XCRD]; - Real ymin = mpiGrid[cells[c]]->get_cell_parameters()[CellParams::YCRD]; - Real zmin = mpiGrid[cells[c]]->get_cell_parameters()[CellParams::ZCRD]; - Real dx = mpiGrid[cells[c]]->get_cell_parameters()[CellParams::DX]; - Real dy = mpiGrid[cells[c]]->get_cell_parameters()[CellParams::DY]; - Real dz = mpiGrid[cells[c]]->get_cell_parameters()[CellParams::DZ]; - - Real x_cen = xmin + 0.5*dx; - Real y_cen = ymin + 0.5*dy; - Real z_cen = zmin + 0.5*dz; - - Real x_bndr = 900.0; - Real y_bndr = 700.0; - - bool xoutside = false; - bool youtside = false; - if (x_cen < -x_bndr) xoutside = true; - if (x_cen > +x_bndr) xoutside = true; - if (y_cen < -y_bndr) youtside = true; - if (y_cen > +y_bndr) youtside = true; - if (xoutside == true || youtside == true) mpiGrid[cells[c]]->sysBoundaryLayerNew = -2; - else mpiGrid[cells[c]]->sysBoundaryLayerNew = +2; - - if (x_cen <= -x_bndr && x_cen+0.6*dx > -x_bndr && youtside == false) mpiGrid[cells[c]]->sysBoundaryLayerNew = -1; - if (x_cen >= +x_bndr && x_cen-0.6*dx < +x_bndr && youtside == false) mpiGrid[cells[c]]->sysBoundaryLayerNew = -1; - if (x_cen >= -x_bndr && x_cen-0.6*dx < -x_bndr && youtside == false) mpiGrid[cells[c]]->sysBoundaryLayerNew = +1; - if (x_cen <= +x_bndr && x_cen+0.6*dx > +x_bndr && youtside == false) mpiGrid[cells[c]]->sysBoundaryLayerNew = +1; - - if (y_cen <= -y_bndr && y_cen+0.6*dy > -y_bndr && xoutside == false) mpiGrid[cells[c]]->sysBoundaryLayerNew = -1; - if (y_cen >= +y_bndr && y_cen-0.6*dy < +y_bndr && xoutside == false) mpiGrid[cells[c]]->sysBoundaryLayerNew = -1; - if (y_cen >= -y_bndr && y_cen-0.6*dy < -y_bndr && xoutside == false) mpiGrid[cells[c]]->sysBoundaryLayerNew = +1; - if (y_cen <= +y_bndr && y_cen+0.6*dy > +y_bndr && xoutside == false) mpiGrid[cells[c]]->sysBoundaryLayerNew = +1; - - if ( (x_cen <= -x_bndr && x_cen+0.6*dx > -x_bndr) - && (y_cen <= -y_bndr && y_cen+0.6*dy > -y_bndr)) mpiGrid[cells[c]]->sysBoundaryLayerNew = -1; - if ( (x_cen <= -x_bndr && x_cen+0.6*dx > -x_bndr) - && (y_cen >= +y_bndr && y_cen-0.6*dy < +y_bndr)) mpiGrid[cells[c]]->sysBoundaryLayerNew = -1; - if ( (x_cen >= +x_bndr && x_cen-0.6*dx < +x_bndr) - && (y_cen <= -y_bndr && y_cen+0.6*dy > -y_bndr)) mpiGrid[cells[c]]->sysBoundaryLayerNew = -1; - if ( (x_cen >= +x_bndr && x_cen-0.6*dx < +x_bndr) - && (y_cen >= +y_bndr && y_cen-0.6*dy < +y_bndr)) mpiGrid[cells[c]]->sysBoundaryLayerNew = -1; - - /* - if (mpiGrid[cells[c]]->sysBoundaryFlag != sysboundarytype::NOT_SYSBOUNDARY) { - // Cell inside system boundary, if it touches the interface it gets value -1. - // Otherwise it gets value -2. - if (hasNormalNbrs == true) mpiGrid[cells[c]]->sysBoundaryLayerNew = -1; - else mpiGrid[cells[c]]->sysBoundaryLayerNew = -2; - } else { - // Cell inside simulation domain, if it touches the interface it gets value +2. - // Otherwise it gets value +2. - if (hasBndryNbrs == true) mpiGrid[cells[c]]->sysBoundaryLayerNew = 1; - else mpiGrid[cells[c]]->sysBoundaryLayerNew = 2; - }*/ - } + // set distance 1 cells to boundary cells, that have neighbors which are normal cells for(uint i=0; isysBoundaryFlag != sysboundarytype::NOT_SYSBOUNDARY) || + (layer > 1 && technicalGrid.get(x,y,z)->sysBoundaryLayer == 0)) { + + if (belongsToLayer(layer, x, y, z, technicalGrid)) { + + technicalGrid.get(x,y,z)->sysBoundaryLayer = layer; + + if (layer > 2 && technicalGrid.get(x,y,z)->sysBoundaryFlag != sysboundarytype::NOT_SYSBOUNDARY) { + technicalGrid.get(x,y,z)->sysBoundaryFlag = sysboundarytype::DO_NOT_COMPUTE; + } + } + } + } + } + } + } + + technicalGrid.updateGhostCells(); + return success; } From 3ad8f7c5d88a794d420fce581ccdb6d5b49464f9 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Thu, 2 May 2019 12:22:11 +0300 Subject: [PATCH 363/602] Fixed compiler warnings. --- sysboundary/sysboundary.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sysboundary/sysboundary.cpp b/sysboundary/sysboundary.cpp index 3ac443eef..3c1603cb4 100644 --- a/sysboundary/sysboundary.cpp +++ b/sysboundary/sysboundary.cpp @@ -394,7 +394,7 @@ bool belongsToLayer(const int layer, const int x, const int y, const int z, // not strictly necessary but logically we should not consider the cell itself // among its neighbors. - if( ix == 0 && iy == 0 && iz == 0 || !technicalGrid.get(x+ix,y+iy,z+iz)) { + if( ( ix == 0 && iy == 0 && iz == 0 ) || !technicalGrid.get(x+ix,y+iy,z+iz)) { continue; } @@ -522,7 +522,7 @@ bool SysBoundary::classifyCells(dccrg::Dccrg Date: Thu, 2 May 2019 13:21:45 +0300 Subject: [PATCH 364/602] Fixing more compiler warnings. --- vlasovsolver/cpu_trans_map_amr.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/vlasovsolver/cpu_trans_map_amr.cpp b/vlasovsolver/cpu_trans_map_amr.cpp index 7426f5942..5e0901bbc 100644 --- a/vlasovsolver/cpu_trans_map_amr.cpp +++ b/vlasovsolver/cpu_trans_map_amr.cpp @@ -347,7 +347,7 @@ setOfPencils buildPencilsWithNeighbors( const dccrg::Dccrg Date: Thu, 2 May 2019 13:26:53 +0300 Subject: [PATCH 365/602] Clean up comments --- vlasovsolver/cpu_trans_map_amr.cpp | 95 ------------------------------ 1 file changed, 95 deletions(-) diff --git a/vlasovsolver/cpu_trans_map_amr.cpp b/vlasovsolver/cpu_trans_map_amr.cpp index 5e0901bbc..1976016d2 100644 --- a/vlasovsolver/cpu_trans_map_amr.cpp +++ b/vlasovsolver/cpu_trans_map_amr.cpp @@ -14,14 +14,12 @@ using namespace spatial_cell; // ordinary space [- VLASOV_STENCIL_WIDTH to VLASOV_STENCIL_WIDTH], // i,j,k are the cell ids inside on block (i in vector elements). // Vectors with same i,j,k coordinates, but in different spatial cells, are consequtive -//#define i_trans_ps_blockv(j, k, b_k) ( (b_k + VLASOV_STENCIL_WIDTH ) + ( (((j) * WID + (k) * WID2)/VECL) * ( 1 + 2 * VLASOV_STENCIL_WIDTH) ) ) #define i_trans_ps_blockv(planeVectorIndex, planeIndex, blockIndex) ( (blockIndex) + VLASOV_STENCIL_WIDTH + ( (planeVectorIndex) + (planeIndex) * VEC_PER_PLANE ) * ( 1 + 2 * VLASOV_STENCIL_WIDTH) ) // indices in padded target block, which is of type Vec with VECL // element sin each vector. b_k is the block index in z direction in // ordinary space, i,j,k are the cell ids inside on block (i in vector // elements). -//#define i_trans_pt_blockv(j, k, b_k) ( ( (j) * WID + (k) * WID2 + ((b_k) + 1 ) * WID3) / VECL ) #define i_trans_pt_blockv(planeVectorIndex, planeIndex, blockIndex) ( planeVectorIndex + planeIndex * VEC_PER_PLANE + (blockIndex + 1) * VEC_PER_BLOCK) #define i_trans_ps_blockv_pencil(planeVectorIndex, planeIndex, blockIndex, lengthOfPencil) ( (blockIndex) + VLASOV_STENCIL_WIDTH + ( (planeVectorIndex) + (planeIndex) * VEC_PER_PLANE ) * ( lengthOfPencil + 2 * VLASOV_STENCIL_WIDTH) ) @@ -176,26 +174,18 @@ void computeSpatialTargetCellsForPencils(const dccrg::Dccrg ids = pencils.getIds(iPencil); - //std::cout << "Target cells for pencil " << iPencil << ": "; - // Get pointers for each cell id of the pencil for (int i = 0; i < L; ++i) { targetCells[GID + i + 1] = mpiGrid[ids[i]]; - //std::cout << ids[i] << " "; } - //std::cout << std::endl; // Insert pointers for neighbors of ids.front() and ids.back() auto frontNbrPairs = mpiGrid.get_neighbors_of(ids.front(), neighborhood); auto backNbrPairs = mpiGrid.get_neighbors_of(ids.back(), neighborhood); - // std::cout << "Ghost cells: "; - // std::cout << frontNbrPairs->front().first << " "; - // std::cout << backNbrPairs->back().first << std::endl; vector frontNeighborIds; for( const auto nbrPair: *frontNbrPairs ) { if (nbrPair.second.at(dimension) == -1) { @@ -280,65 +270,6 @@ CellID selectNeighbor(const dccrg::Dccrg return neighbor; } - -// void removeDuplicates(setOfPencils &pencils) { - -// vector duplicatePencilIds; - -// // Loop over all pencils twice to do cross-comparisons -// for (uint myPencilId = 0; myPencilId < pencils.N; ++myPencilId) { - -// vector myCellIds = pencils.getIds(myPencilId); - -// for (uint theirPencilId = 0; theirPencilId < pencils.N; ++theirPencilId) { - -// // Do not compare with self -// if (myPencilId == theirPencilId) { -// continue; -// } - -// // we check if all cells of pencil b ("their") are included in pencil a ("my") -// bool removeThisPencil = true; - -// vector theirCellIds = pencils.getIds(theirPencilId); - -// for (auto theirCellId : theirCellIds) { -// bool matchFound = false; -// for (auto myCellId : myCellIds) { -// // Compare each "my" cell to all "their" cells, if any of them match -// // update a logical value matchFound to true. -// if (myCellId == theirCellId && pencils.path[myPencilId] == pencils.path[theirPencilId]) { -// matchFound = true; -// } -// } -// // If no match was found for this "my" cell, we can end the comparison, these pencils -// // are not duplicates. -// if(!matchFound) { -// removeThisPencil = false; -// continue; -// } -// } - -// if(removeThisPencil) { -// if(std::find(duplicatePencilIds.begin(), duplicatePencilIds.end(), myPencilId) == duplicatePencilIds.end() ) { -// duplicatePencilIds.push_back(theirPencilId); -// } - -// } - -// } - -// } - -// int myRank; -// MPI_Comm_rank(MPI_COMM_WORLD,&myRank); - -// for (auto id : duplicatePencilIds) { -// //pencils.removePencil(id); -// cout << "I am rank " << myRank << ", I would like to remove pencil number " << id << endl; -// } -// } - setOfPencils buildPencilsWithNeighbors( const dccrg::Dccrg &grid, setOfPencils &pencils, const CellID startingId, vector ids, const uint dimension, @@ -450,7 +381,6 @@ setOfPencils buildPencilsWithNeighbors( const dccrg::DccrgsysBoundaryFlag != sysboundarytype::NOT_SYSBOUNDARY ) { - // grid[nextNeighbor]->sysBoundaryFlag == sysboundarytype::DO_NOT_COMPUTE || - // ( grid[nextNeighbor]->sysBoundaryLayer == 2 && - // grid[nextNeighbor]->sysBoundaryFlag != sysboundarytype::NOT_SYSBOUNDARY ) ) { nextNeighbor = INVALID_CELLID; } else { @@ -510,7 +436,6 @@ setOfPencils buildPencilsWithNeighbors( const dccrg::Dccrg &vmesh, const uint lengthOfPencil) { @@ -649,13 +574,6 @@ void getSeedIds(const dccrg::Dccrg& mpiGr !mpiGrid.is_local(nbrPair.first) || !do_translate_cell(mpiGrid[nbrPair.first]) ) { - // ( mpiGrid[nbrPair.first]->sysBoundaryFlag != sysboundarytype::NOT_SYSBOUNDARY && - // mpiGrid[nbrPair.first]->sysBoundaryLayer == 1 ) ) { - - // mpiGrid[nbrPair.first]->sysBoundaryFlag == sysboundarytype::DO_NOT_COMPUTE || - // ( mpiGrid[nbrPair.first]->sysBoundaryLayer == 2 && - // mpiGrid[nbrPair.first]->sysBoundaryFlag != sysboundarytype::NOT_SYSBOUNDARY ) ) { - addToSeedIds = true; } } @@ -804,15 +722,11 @@ void check_ghost_cells(const dccrg::Dccrg const auto* backNeighbors = mpiGrid.get_neighbors_of(ids.back() ,neighborhoodId); for (const auto nbrPair: *frontNeighbors) { - //if((nbrPair.second[dimension] + 1) / pow(2,mpiGrid.get_refinement_level(nbrPair.first)) == -offset) { maxNbrRefLvl = max(maxNbrRefLvl,mpiGrid.mapping.get_refinement_level(nbrPair.first)); - //} } for (const auto nbrPair: *backNeighbors) { - //if((nbrPair.second[dimension] + 1) / pow(2,mpiGrid.get_refinement_level(nbrPair.first)) == offset) { maxNbrRefLvl = max(maxNbrRefLvl,mpiGrid.get_refinement_level(nbrPair.first)); - //} } if (maxNbrRefLvl > maxPencilRefLvl) { @@ -1021,9 +935,6 @@ bool trans_map_1d_amr(const dccrg::Dccrg& abort(); } - // // Remove duplicates - // removeDuplicates(pencils); - // Add the final set of pencils to the pencilSets - vector. // Only one set is created for now but we retain support for multiple sets pencilSets.push_back(pencils); @@ -1094,7 +1005,6 @@ bool trans_map_1d_amr(const dccrg::Dccrg& // Compute spatial neighbors for target cells. // For targets we need the local cells, plus a padding of 1 cell at both ends std::vector targetCells(pencils.sumOfLengths + pencils.N * 2 ); - //std::vector targetsValid(pencils.sumOfLengths + 2 * pencils.N) = false; computeSpatialTargetCellsForPencils(mpiGrid, pencils, dimension, targetCells.data()); @@ -1252,7 +1162,6 @@ bool trans_map_1d_amr(const dccrg::Dccrg& // Realf checksum = 0.0; for(int i = 0; i < WID3 ; i++) { blockData[i] += targetBlockData[GID * WID3 + i] * areaRatio; - // checksum += targetBlockData[GID * WID3 + i] * areaRatio; } } } @@ -1316,10 +1225,6 @@ void update_remote_mapping_contribution_amr( vector receive_origin_index; int neighborhood = 0; - - // For debugging -// int myRank; -// MPI_Comm_rank(MPI_COMM_WORLD,&myRank); //normalize and set neighborhoods if(direction > 0) { From bf640ba15ff0da619ff09002a09e4f7b5eece023 Mon Sep 17 00:00:00 2001 From: Urs Ganse Date: Thu, 25 Apr 2019 12:29:12 +0300 Subject: [PATCH 366/602] Dummy output call for testing direct fsgrid vlsv output. --- Makefile | 52 +++++++++++++++++++++++++-------------------------- vlasiator.cpp | 7 +++++++ 2 files changed, 33 insertions(+), 26 deletions(-) diff --git a/Makefile b/Makefile index 1625c7f16..60167e00b 100644 --- a/Makefile +++ b/Makefile @@ -273,36 +273,36 @@ dro_populations.o: ${DEPS_COMMON} ${DEPS_CELL} parameters.h datareduction/datare ${CMP} ${CXXFLAGS} ${FLAGS} ${MATHFLAGS} -c datareduction/dro_populations.cpp ${INC_DCCRG} ${INC_ZOLTAN} ${INC_MPI} ${INC_BOOST} ${INC_EIGEN} ${INC_VLSV} antisymmetric.o: ${DEPS_SYSBOUND} sysboundary/antisymmetric.h sysboundary/antisymmetric.cpp - ${CMP} ${CXXFLAGS} ${FLAGS} ${MATHFLAGS} -c sysboundary/antisymmetric.cpp ${INC_DCCRG} ${INC_FSGRID} ${INC_ZOLTAN} ${INC_BOOST} ${INC_EIGEN} + ${CMP} ${CXXFLAGS} ${FLAGS} ${MATHFLAGS} -c sysboundary/antisymmetric.cpp ${INC_DCCRG} ${INC_FSGRID} ${INC_ZOLTAN} ${INC_BOOST} ${INC_EIGEN} ${INC_VLSV} donotcompute.o: ${DEPS_SYSBOUND} sysboundary/donotcompute.h sysboundary/donotcompute.cpp - ${CMP} ${CXXFLAGS} ${FLAGS} ${MATHFLAGS} -c sysboundary/donotcompute.cpp ${INC_DCCRG} ${INC_FSGRID} ${INC_ZOLTAN} ${INC_BOOST} ${INC_EIGEN} + ${CMP} ${CXXFLAGS} ${FLAGS} ${MATHFLAGS} -c sysboundary/donotcompute.cpp ${INC_DCCRG} ${INC_FSGRID} ${INC_ZOLTAN} ${INC_BOOST} ${INC_EIGEN} ${INC_VLSV} ionosphere.o: ${DEPS_SYSBOUND} sysboundary/ionosphere.h sysboundary/ionosphere.cpp backgroundfield/backgroundfield.cpp backgroundfield/backgroundfield.h projects/project.h projects/project.cpp fieldsolver/fs_limiters.h - ${CMP} ${CXXFLAGS} ${FLAGS} ${MATHFLAGS} -c sysboundary/ionosphere.cpp ${INC_DCCRG} ${INC_FSGRID} ${INC_ZOLTAN} ${INC_BOOST} ${INC_EIGEN} + ${CMP} ${CXXFLAGS} ${FLAGS} ${MATHFLAGS} -c sysboundary/ionosphere.cpp ${INC_DCCRG} ${INC_FSGRID} ${INC_ZOLTAN} ${INC_BOOST} ${INC_EIGEN} ${INC_VLSV} mesh_data_container.o: ${DEPS_COMMON} mesh_data_container.h mesh_data.h ${CMP} ${CXXFLAGS} ${FLAGS} ${MATHFLAGS} -c mesh_data_container.cpp ${INC_VLSV} ${INC_DCCRG} ${INC_ZOLTAN} ${INC_BOOST} ${INC_FSGRID} project_boundary.o: ${DEPS_SYSBOUND} sysboundary/project_boundary.h sysboundary/project_boundary.cpp - ${CMP} ${CXXFLAGS} ${FLAGS} ${MATHFLAGS} -c sysboundary/project_boundary.cpp ${INC_DCCRG} ${INC_FSGRID} ${INC_ZOLTAN} ${INC_BOOST} ${INC_EIGEN} + ${CMP} ${CXXFLAGS} ${FLAGS} ${MATHFLAGS} -c sysboundary/project_boundary.cpp ${INC_DCCRG} ${INC_FSGRID} ${INC_ZOLTAN} ${INC_BOOST} ${INC_EIGEN} ${INC_VLSV} outflow.o: ${DEPS_COMMON} sysboundary/outflow.h sysboundary/outflow.cpp projects/project.h projects/project.cpp fieldsolver/ldz_magnetic_field.cpp - ${CMP} ${CXXFLAGS} ${FLAGS} -c sysboundary/outflow.cpp ${INC_FSGRID} ${INC_DCCRG} ${INC_ZOLTAN} ${INC_BOOST} ${INC_EIGEN} + ${CMP} ${CXXFLAGS} ${FLAGS} -c sysboundary/outflow.cpp ${INC_FSGRID} ${INC_DCCRG} ${INC_ZOLTAN} ${INC_BOOST} ${INC_EIGEN} ${INC_VLSV} setmaxwellian.o: ${DEPS_SYSBOUND} sysboundary/setmaxwellian.h sysboundary/setmaxwellian.cpp sysboundary/setbyuser.h sysboundary/setbyuser.cpp - ${CMP} ${CXXFLAGS} ${FLAGS} ${MATHFLAGS} -c sysboundary/setmaxwellian.cpp ${INC_DCCRG} ${INC_FSGRID} ${INC_ZOLTAN} ${INC_BOOST} ${INC_EIGEN} + ${CMP} ${CXXFLAGS} ${FLAGS} ${MATHFLAGS} -c sysboundary/setmaxwellian.cpp ${INC_DCCRG} ${INC_FSGRID} ${INC_ZOLTAN} ${INC_BOOST} ${INC_EIGEN} ${INC_VLSV} setbyuser.o: ${DEPS_SYSBOUND} sysboundary/setbyuser.h sysboundary/setbyuser.cpp - ${CMP} ${CXXFLAGS} ${FLAGS} ${MATHFLAGS} -c sysboundary/setbyuser.cpp ${INC_DCCRG} ${INC_FSGRID} ${INC_ZOLTAN} ${INC_BOOST} ${INC_EIGEN} + ${CMP} ${CXXFLAGS} ${FLAGS} ${MATHFLAGS} -c sysboundary/setbyuser.cpp ${INC_DCCRG} ${INC_FSGRID} ${INC_ZOLTAN} ${INC_BOOST} ${INC_EIGEN} ${INC_VLSV} sysboundary.o: ${DEPS_COMMON} sysboundary/sysboundary.h sysboundary/sysboundary.cpp sysboundary/sysboundarycondition.h sysboundary/sysboundarycondition.cpp sysboundary/donotcompute.h sysboundary/donotcompute.cpp sysboundary/ionosphere.h sysboundary/ionosphere.cpp sysboundary/outflow.h sysboundary/outflow.cpp sysboundary/setmaxwellian.h sysboundary/setmaxwellian.cpp sysboundary/setbyuser.h sysboundary/setbyuser.cpp - ${CMP} ${CXXFLAGS} ${FLAGS} ${MATHFLAGS} -c sysboundary/sysboundary.cpp ${INC_DCCRG} ${INC_FSGRID} ${INC_ZOLTAN} ${INC_BOOST} ${INC_EIGEN} + ${CMP} ${CXXFLAGS} ${FLAGS} ${MATHFLAGS} -c sysboundary/sysboundary.cpp ${INC_DCCRG} ${INC_FSGRID} ${INC_ZOLTAN} ${INC_BOOST} ${INC_EIGEN} ${INC_VLSV} sysboundarycondition.o: ${DEPS_COMMON} sysboundary/sysboundarycondition.h sysboundary/sysboundarycondition.cpp sysboundary/donotcompute.h sysboundary/donotcompute.cpp sysboundary/ionosphere.h sysboundary/ionosphere.cpp sysboundary/outflow.h sysboundary/outflow.cpp sysboundary/setmaxwellian.h sysboundary/setmaxwellian.cpp sysboundary/setbyuser.h sysboundary/setbyuser.cpp - ${CMP} ${CXXFLAGS} ${FLAGS} ${MATHFLAGS} -c sysboundary/sysboundarycondition.cpp ${INC_DCCRG} ${INC_FSGRID} ${INC_ZOLTAN} ${INC_BOOST} ${INC_EIGEN} + ${CMP} ${CXXFLAGS} ${FLAGS} ${MATHFLAGS} -c sysboundary/sysboundarycondition.cpp ${INC_DCCRG} ${INC_FSGRID} ${INC_ZOLTAN} ${INC_BOOST} ${INC_EIGEN} ${INC_VLSV} read_gaussian_population.o: definitions.h readparameters.h projects/read_gaussian_population.h projects/read_gaussian_population.cpp ${CMP} ${CXXFLAGS} ${FLAGS} -c projects/read_gaussian_population.cpp @@ -386,22 +386,22 @@ projectTriAxisSearch.o: ${DEPS_COMMON} $(DEPS_PROJECTS) projects/projectTriAxisS ${CMP} ${CXXFLAGS} ${FLAGS} ${MATHFLAGS} -c projects/projectTriAxisSearch.cpp ${INC_DCCRG} ${INC_ZOLTAN} ${INC_BOOST} ${INC_EIGEN} ${INC_FSGRID} poisson_solver.o: ${DEPS_COMMON} ${DEPS_CELL} poisson_solver/poisson_solver.h poisson_solver/poisson_solver.cpp - $(CMP) $(CXXFLAGS) $(FLAGS) ${MATHFLAGS} -c poisson_solver/poisson_solver.cpp ${INC_DCCRG} ${INC_FSGRID} ${INC_BOOST} ${INC_ZOLTAN} + $(CMP) $(CXXFLAGS) $(FLAGS) ${MATHFLAGS} -c poisson_solver/poisson_solver.cpp ${INC_DCCRG} ${INC_FSGRID} ${INC_BOOST} ${INC_ZOLTAN} ${INC_VLSV} poisson_solver_cg.o: ${DEPS_COMMON} ${DEPS_CELL} poisson_solver/poisson_solver.h poisson_solver/poisson_solver_cg.h poisson_solver/poisson_solver_cg.cpp - $(CMP) $(CXXFLAGS) ${MATHFLAGS} $(FLAGS) -c poisson_solver/poisson_solver_cg.cpp ${INC_DCCRG} ${INC_FSGRID} ${INC_BOOST} ${INC_ZOLTAN} + $(CMP) $(CXXFLAGS) ${MATHFLAGS} $(FLAGS) -c poisson_solver/poisson_solver_cg.cpp ${INC_DCCRG} ${INC_FSGRID} ${INC_BOOST} ${INC_ZOLTAN} ${INC_VLSV} poisson_solver_jacobi.o: ${DEPS_COMMON} ${DEPS_CELL} poisson_solver/poisson_solver.h poisson_solver/poisson_solver_jacobi.h poisson_solver/poisson_solver_jacobi.cpp - $(CMP) $(CXXFLAGS) ${MATHFLAGS} $(FLAGS) -c poisson_solver/poisson_solver_jacobi.cpp ${INC_DCCRG} ${INC_FSGRID} ${INC_BOOST} ${INC_ZOLTAN} + $(CMP) $(CXXFLAGS) ${MATHFLAGS} $(FLAGS) -c poisson_solver/poisson_solver_jacobi.cpp ${INC_DCCRG} ${INC_FSGRID} ${INC_BOOST} ${INC_ZOLTAN} ${INC_VLSV} poisson_solver_sor.o: ${DEPS_COMMON} ${DEPS_CELL} poisson_solver/poisson_solver.h poisson_solver/poisson_solver_sor.h poisson_solver/poisson_solver_sor.cpp - $(CMP) $(CXXFLAGS) ${MATHFLAGS} $(FLAGS) -c poisson_solver/poisson_solver_sor.cpp ${INC_DCCRG} ${INC_FSGRID} ${INC_BOOST} ${INC_ZOLTAN} + $(CMP) $(CXXFLAGS) ${MATHFLAGS} $(FLAGS) -c poisson_solver/poisson_solver_sor.cpp ${INC_DCCRG} ${INC_FSGRID} ${INC_BOOST} ${INC_ZOLTAN} ${INC_VLSV} poisson_test.o: ${DEPS_COMMON} ${DEPS_CELL} projects/project.h projects/project.cpp projects/Poisson/poisson_test.h projects/Poisson/poisson_test.cpp - $(CMP) $(CXXFLAGS) ${MATHFLAGS} $(FLAGS) -c projects/Poisson/poisson_test.cpp ${INC_DCCRG} ${INC_FSGRID} ${INC_ZOLTAN} ${INC_BOOST} ${INC_EIGEN} + $(CMP) $(CXXFLAGS) ${MATHFLAGS} $(FLAGS) -c projects/Poisson/poisson_test.cpp ${INC_DCCRG} ${INC_FSGRID} ${INC_ZOLTAN} ${INC_BOOST} ${INC_EIGEN} ${INC_VLSV} spatial_cell.o: ${DEPS_CELL} spatial_cell.cpp - $(CMP) $(CXXFLAGS) ${MATHFLAGS} $(FLAGS) -c spatial_cell.cpp $(INC_BOOST) ${INC_DCCRG} ${INC_EIGEN} ${INC_ZOLTAN} ${INC_VECTORCLASS} ${INC_FSGRID} + $(CMP) $(CXXFLAGS) ${MATHFLAGS} $(FLAGS) -c spatial_cell.cpp $(INC_BOOST) ${INC_DCCRG} ${INC_EIGEN} ${INC_ZOLTAN} ${INC_VECTORCLASS} ${INC_FSGRID} ${INC_VLSV} ifeq ($(MESH),AMR) vlasovmover.o: ${DEPS_VLSVMOVER_AMR} @@ -437,38 +437,38 @@ vlasovmover.o: ${DEPS_VLSVMOVER} endif cpu_moments.o: ${DEPS_CPU_MOMENTS} - ${CMP} ${CXXFLAGS} ${FLAG_OPENMP} ${MATHFLAGS} ${FLAGS} -c vlasovsolver/cpu_moments.cpp ${INC_DCCRG} ${INC_BOOST} ${INC_ZOLTAN} ${INC_PROFILE} ${INC_FSGRID} + ${CMP} ${CXXFLAGS} ${FLAG_OPENMP} ${MATHFLAGS} ${FLAGS} -c vlasovsolver/cpu_moments.cpp ${INC_DCCRG} ${INC_BOOST} ${INC_ZOLTAN} ${INC_PROFILE} ${INC_FSGRID} ${INC_VLSV} derivatives.o: ${DEPS_FSOLVER} fieldsolver/fs_limiters.h fieldsolver/fs_limiters.cpp fieldsolver/derivatives.hpp fieldsolver/derivatives.cpp - ${CMP} ${CXXFLAGS} ${FLAGS} -c fieldsolver/derivatives.cpp -I$(CURDIR) ${INC_BOOST} ${INC_EIGEN} ${INC_DCCRG} ${INC_FSGRID} ${INC_PROFILE} ${INC_ZOLTAN} + ${CMP} ${CXXFLAGS} ${FLAGS} -c fieldsolver/derivatives.cpp -I$(CURDIR) ${INC_BOOST} ${INC_EIGEN} ${INC_DCCRG} ${INC_FSGRID} ${INC_PROFILE} ${INC_ZOLTAN} ${INC_VLSV} fs_common.o: ${DEPS_FSOLVER} fieldsolver/fs_limiters.h fieldsolver/fs_limiters.cpp - ${CMP} ${CXXFLAGS} ${FLAGS} -c fieldsolver/fs_common.cpp -I$(CURDIR) ${INC_BOOST} ${INC_EIGEN} ${INC_DCCRG} ${INC_FSGRID} ${INC_PROFILE} ${INC_ZOLTAN} + ${CMP} ${CXXFLAGS} ${FLAGS} -c fieldsolver/fs_common.cpp -I$(CURDIR) ${INC_BOOST} ${INC_EIGEN} ${INC_DCCRG} ${INC_FSGRID} ${INC_PROFILE} ${INC_ZOLTAN} ${INC_VLSV} fs_limiters.o: ${DEPS_FSOLVER} fieldsolver/fs_limiters.h fieldsolver/fs_limiters.cpp - ${CMP} ${CXXFLAGS} ${FLAGS} -c fieldsolver/fs_limiters.cpp -I$(CURDIR) ${INC_BOOST} ${INC_EIGEN} ${INC_FSGRID} ${INC_PROFILE} ${INC_ZOLTAN} + ${CMP} ${CXXFLAGS} ${FLAGS} -c fieldsolver/fs_limiters.cpp -I$(CURDIR) ${INC_BOOST} ${INC_EIGEN} ${INC_FSGRID} ${INC_PROFILE} ${INC_ZOLTAN} ${INC_VLSV} londrillo_delzanna.o: ${DEPS_FSOLVER} parameters.h common.h fieldsolver/fs_common.h fieldsolver/fs_common.cpp fieldsolver/derivatives.hpp fieldsolver/ldz_electric_field.hpp fieldsolver/ldz_hall.hpp fieldsolver/ldz_magnetic_field.hpp fieldsolver/ldz_main.cpp fieldsolver/ldz_volume.hpp fieldsolver/ldz_volume.hpp - ${CMP} ${CXXFLAGS} ${FLAGS} -c fieldsolver/ldz_main.cpp -o londrillo_delzanna.o -I$(CURDIR) ${INC_BOOST} ${INC_EIGEN} ${INC_DCCRG} ${INC_FSGRID} ${INC_PROFILE} ${INC_ZOLTAN} + ${CMP} ${CXXFLAGS} ${FLAGS} -c fieldsolver/ldz_main.cpp -o londrillo_delzanna.o -I$(CURDIR) ${INC_BOOST} ${INC_EIGEN} ${INC_DCCRG} ${INC_FSGRID} ${INC_PROFILE} ${INC_ZOLTAN} ${INC_VLSV} ldz_electric_field.o: ${DEPS_FSOLVER} fieldsolver/ldz_electric_field.hpp fieldsolver/ldz_electric_field.cpp - ${CMP} ${CXXFLAGS} ${FLAGS} -c fieldsolver/ldz_electric_field.cpp ${INC_BOOST} ${INC_FSGRID} ${INC_DCCRG} ${INC_PROFILE} ${INC_ZOLTAN} + ${CMP} ${CXXFLAGS} ${FLAGS} -c fieldsolver/ldz_electric_field.cpp ${INC_BOOST} ${INC_FSGRID} ${INC_DCCRG} ${INC_PROFILE} ${INC_ZOLTAN} ${INC_VLSV} ldz_hall.o: ${DEPS_FSOLVER} fieldsolver/ldz_hall.hpp fieldsolver/ldz_hall.cpp - ${CMP} ${CXXFLAGS} ${MATHFLAGS} ${FLAGS} -c fieldsolver/ldz_hall.cpp ${INC_BOOST} ${INC_FSGRID} ${INC_DCCRG} ${INC_PROFILE} ${INC_ZOLTAN} + ${CMP} ${CXXFLAGS} ${MATHFLAGS} ${FLAGS} -c fieldsolver/ldz_hall.cpp ${INC_BOOST} ${INC_FSGRID} ${INC_DCCRG} ${INC_PROFILE} ${INC_ZOLTAN} ${INC_VLSV} ldz_gradpe.o: ${DEPS_FSOLVER} fieldsolver/ldz_gradpe.hpp fieldsolver/ldz_gradpe.cpp - ${CMP} ${CXXFLAGS} ${MATHFLAGS} ${FLAGS} -c fieldsolver/ldz_gradpe.cpp ${INC_BOOST} ${INC_FSGRID} ${INC_DCCRG} ${INC_PROFILE} ${INC_ZOLTAN} + ${CMP} ${CXXFLAGS} ${MATHFLAGS} ${FLAGS} -c fieldsolver/ldz_gradpe.cpp ${INC_BOOST} ${INC_FSGRID} ${INC_DCCRG} ${INC_PROFILE} ${INC_ZOLTAN} ${INC_VLSV} ldz_magnetic_field.o: ${DEPS_FSOLVER} fieldsolver/ldz_magnetic_field.hpp fieldsolver/ldz_magnetic_field.cpp - ${CMP} ${CXXFLAGS} ${MATHFLAGS} ${FLAGS} -c fieldsolver/ldz_magnetic_field.cpp ${INC_BOOST} ${INC_FSGRID} ${INC_DCCRG} ${INC_PROFILE} ${INC_ZOLTAN} + ${CMP} ${CXXFLAGS} ${MATHFLAGS} ${FLAGS} -c fieldsolver/ldz_magnetic_field.cpp ${INC_BOOST} ${INC_FSGRID} ${INC_DCCRG} ${INC_PROFILE} ${INC_ZOLTAN} ${INC_VLSV} ldz_volume.o: ${DEPS_FSOLVER} fieldsolver/ldz_volume.hpp fieldsolver/ldz_volume.cpp - ${CMP} ${CXXFLAGS} ${FLAGS} -c fieldsolver/ldz_volume.cpp ${INC_BOOST} ${INC_FSGRID} ${INC_DCCRG} ${INC_PROFILE} ${INC_ZOLTAN} + ${CMP} ${CXXFLAGS} ${FLAGS} -c fieldsolver/ldz_volume.cpp ${INC_BOOST} ${INC_FSGRID} ${INC_DCCRG} ${INC_PROFILE} ${INC_ZOLTAN} ${INC_VLSV} gridGlue.o: ${DEPS_FSOLVER} fieldsolver/gridGlue.hpp fieldsolver/gridGlue.cpp - ${CMP} ${CXXFLAGS} ${FLAGS} -c fieldsolver/gridGlue.cpp ${INC_BOOST} ${INC_FSGRID} ${INC_DCCRG} ${INC_PROFILE} ${INC_ZOLTAN} + ${CMP} ${CXXFLAGS} ${FLAGS} -c fieldsolver/gridGlue.cpp ${INC_BOOST} ${INC_FSGRID} ${INC_DCCRG} ${INC_PROFILE} ${INC_ZOLTAN} ${INC_VLSV} vlasiator.o: ${DEPS_COMMON} readparameters.h parameters.h ${DEPS_PROJECTS} grid.h vlasovmover.h ${DEPS_CELL} vlasiator.cpp iowrite.h fieldsolver/gridGlue.hpp ${CMP} ${CXXFLAGS} ${FLAG_OPENMP} ${FLAGS} -c vlasiator.cpp ${INC_MPI} ${INC_DCCRG} ${INC_FSGRID} ${INC_BOOST} ${INC_EIGEN} ${INC_ZOLTAN} ${INC_PROFILE} ${INC_VLSV} diff --git a/vlasiator.cpp b/vlasiator.cpp index 759570385..f311b3307 100644 --- a/vlasiator.cpp +++ b/vlasiator.cpp @@ -493,6 +493,13 @@ int main(int argn,char* args[]) { } phiprof::stop("Init field propagator"); + vlsv::Writer fsgridtestwriter; + fsgridtestwriter.open("fsgridtest.vlsv", MPI_COMM_WORLD, 0, MPI_INFO_NULL); + momentsGrid.createVlsvMesh(fsgridtestwriter,"fsgrid"); + + std::function&)> rhoWriter = [](std::array& cell)->double{return cell[fsgrids::RHOM];}; + momentsGrid.vlsvOutputVariable(fsgridtestwriter, rhoWriter, "fsgrid", "rhom"); + // Initialize Poisson solver (if used) if (P::propagatePotential == true) { phiprof::start("Init Poisson solver"); From 806aa1b0180217e6f6649b5840048261a651de5d Mon Sep 17 00:00:00 2001 From: Urs Ganse Date: Thu, 25 Apr 2019 13:49:41 +0300 Subject: [PATCH 367/602] Properly close fsgrid test output file, so that XML footer gets written. --- vlasiator.cpp | 3 +++ 1 file changed, 3 insertions(+) diff --git a/vlasiator.cpp b/vlasiator.cpp index f311b3307..c66cf14e3 100644 --- a/vlasiator.cpp +++ b/vlasiator.cpp @@ -499,6 +499,9 @@ int main(int argn,char* args[]) { std::function&)> rhoWriter = [](std::array& cell)->double{return cell[fsgrids::RHOM];}; momentsGrid.vlsvOutputVariable(fsgridtestwriter, rhoWriter, "fsgrid", "rhom"); + fsgridtestwriter.close(); + std::cerr<< "Fsgrid output written." << std::endl; + exit(1); // Initialize Poisson solver (if used) if (P::propagatePotential == true) { From 34b3c90f3a2bab9043bd02544c5c6c12e023f1b4 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Thu, 2 May 2019 15:32:52 +0300 Subject: [PATCH 368/602] Changes requested by Sebastian. - the coordinates passed to get_existing_cell() are taken at the center of the fsgrid cell - j index in the inner (face) loop changed to iface. Type changed from uint to int. --- sysboundary/antisymmetric.cpp | 25 +++++++++++++++---------- sysboundary/ionosphere.cpp | 18 ++++++++++++------ sysboundary/outflow.cpp | 22 ++++++++++++++-------- sysboundary/project_boundary.cpp | 20 +++++++++++++------- sysboundary/setbyuser.cpp | 23 +++++++++++++++-------- 5 files changed, 69 insertions(+), 39 deletions(-) diff --git a/sysboundary/antisymmetric.cpp b/sysboundary/antisymmetric.cpp index 26639b99d..4f6c7323b 100644 --- a/sysboundary/antisymmetric.cpp +++ b/sysboundary/antisymmetric.cpp @@ -115,7 +115,7 @@ namespace SBC { // Comparison of the array defining which faces to use and the // array telling on which faces this cell is doAssign = false; - for (uint j=0; j<6; j++) doAssign = doAssign || (facesToProcess[j] && isThisCellOnAFace[j]); + for (int j=0; j<6; j++) doAssign = doAssign || (facesToProcess[j] && isThisCellOnAFace[j]); if (doAssign) { uint flag = getIndex(); //if (x < Parameters::xmin + 2*Parameters::dx_ini) flag = sysboundarytype::DO_NOT_COMPUTE; @@ -126,13 +126,21 @@ namespace SBC { } } - // Assign boundary flags to local fsgrid cells + const auto inv2powMaxRefLvl = pow(2,-P::amrMaxSpatialRefLevel); + + // Assign boundary flags to local fsgrid cells const std::array gridDims(technicalGrid.getLocalSize()); for (int k=0; ksysBoundaryFlag = sysboundarytype::DO_NOT_COMPUTE; } else { technicalGrid.get(i,j,k)->sysBoundaryFlag = this->getIndex(); diff --git a/sysboundary/ionosphere.cpp b/sysboundary/ionosphere.cpp index 69fe7f0b1..5f8325e0e 100644 --- a/sysboundary/ionosphere.cpp +++ b/sysboundary/ionosphere.cpp @@ -207,13 +207,22 @@ namespace SBC { } } - // Assign boundary flags to local fsgrid cells + const auto inv2powMaxRefLvl = pow(2,-P::amrMaxSpatialRefLevel); + + // Assign boundary flags to local fsgrid cells const std::array gridDims(technicalGrid.getLocalSize()); for (int k=0; kgeometry,this->center) < this->radius) { + if(getR(cellCenterCoords[0],cellCenterCoords[1],cellCenterCoords[2],this->geometry,this->center) < this->radius) { technicalGrid.get(i,j,k)->sysBoundaryFlag = this->getIndex(); } diff --git a/sysboundary/outflow.cpp b/sysboundary/outflow.cpp index 2159ca14f..765bc8358 100644 --- a/sysboundary/outflow.cpp +++ b/sysboundary/outflow.cpp @@ -237,19 +237,28 @@ namespace SBC { // Comparison of the array defining which faces to use and the array telling on which faces this cell is doAssign = false; - for(uint j=0; j<6; j++) doAssign = doAssign || (facesToProcess[j] && isThisCellOnAFace[j]); + for(int j=0; j<6; j++) doAssign = doAssign || (facesToProcess[j] && isThisCellOnAFace[j]); if(doAssign) { mpiGrid[dccrgId]->sysBoundaryFlag = this->getIndex(); } } + const auto inv2powMaxRefLvl = pow(2,-P::amrMaxSpatialRefLevel); + // Assign boundary flags to local fsgrid cells const std::array gridDims(technicalGrid.getLocalSize()); for (int k=0; ksysBoundaryFlag = this->getIndex(); } diff --git a/sysboundary/project_boundary.cpp b/sysboundary/project_boundary.cpp index e260b272a..9913f2ed8 100644 --- a/sysboundary/project_boundary.cpp +++ b/sysboundary/project_boundary.cpp @@ -113,19 +113,28 @@ namespace SBC { determineFace(isThisCellOnAFace.data(), x, y, z, dx, dy, dz); // Comparison of the array defining which faces to use and the array telling on which faces this cell is doAssign = false; - for(uint j=0; j<6; j++) doAssign = doAssign || (facesToProcess[j] && isThisCellOnAFace[j]); + for(int j=0; j<6; j++) doAssign = doAssign || (facesToProcess[j] && isThisCellOnAFace[j]); if(doAssign) { mpiGrid[cells[i]]->sysBoundaryFlag = this->getIndex(); } } + const auto inv2powMaxRefLvl = pow(2,-P::amrMaxSpatialRefLevel); + // Assign boundary flags to local fsgrid cells const std::array gridDims(technicalGrid.getLocalSize()); for (int k=0; ksysBoundaryFlag = this->getIndex(); } diff --git a/sysboundary/setbyuser.cpp b/sysboundary/setbyuser.cpp index 1d5181591..4aca59b56 100644 --- a/sysboundary/setbyuser.cpp +++ b/sysboundary/setbyuser.cpp @@ -98,19 +98,29 @@ namespace SBC { determineFace(isThisCellOnAFace.data(), x, y, z, dx, dy, dz); // Comparison of the array defining which faces to use and the array telling on which faces this cell is doAssign = false; - for(uint j=0; j<6; j++) doAssign = doAssign || (facesToProcess[j] && isThisCellOnAFace[j]); + for(int j=0; j<6; j++) doAssign = doAssign || (facesToProcess[j] && isThisCellOnAFace[j]); if(doAssign) { mpiGrid[cells[i]]->sysBoundaryFlag = this->getIndex(); } } + const auto inv2powMaxRefLvl = pow(2,-P::amrMaxSpatialRefLevel); + // Assign boundary flags to local fsgrid cells const std::array gridDims(technicalGrid.getLocalSize()); for (int k=0; ksysBoundaryFlag = this->getIndex(); } From 4f2fbd9afeb39ed0cadf736700e3a93f21ecb957 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Fri, 3 May 2019 12:14:19 +0300 Subject: [PATCH 369/602] Getting DX,DY,DZ directly from technicalGrid instead of params using the refinement level. --- sysboundary/antisymmetric.cpp | 8 +++----- sysboundary/ionosphere.cpp | 8 +++----- sysboundary/outflow.cpp | 8 +++----- sysboundary/project_boundary.cpp | 8 +++----- sysboundary/setbyuser.cpp | 8 +++----- 5 files changed, 15 insertions(+), 25 deletions(-) diff --git a/sysboundary/antisymmetric.cpp b/sysboundary/antisymmetric.cpp index 4f6c7323b..60a3a0cf4 100644 --- a/sysboundary/antisymmetric.cpp +++ b/sysboundary/antisymmetric.cpp @@ -125,8 +125,6 @@ namespace SBC { mpiGrid[cells[c]]->sysBoundaryFlag = flag; } } - - const auto inv2powMaxRefLvl = pow(2,-P::amrMaxSpatialRefLevel); // Assign boundary flags to local fsgrid cells const std::array gridDims(technicalGrid.getLocalSize()); @@ -137,9 +135,9 @@ namespace SBC { // Shift to the center of the fsgrid cell auto cellCenterCoords = coords; - cellCenterCoords[0] += 0.5 * P::dx_ini * inv2powMaxRefLvl; - cellCenterCoords[1] += 0.5 * P::dy_ini * inv2powMaxRefLvl; - cellCenterCoords[2] += 0.5 * P::dz_ini * inv2powMaxRefLvl; + cellCenterCoords[0] += 0.5 * technicalGrid.DX; + cellCenterCoords[1] += 0.5 * technicalGrid.DY; + cellCenterCoords[2] += 0.5 * technicalGrid.DZ; const auto refLvl = mpiGrid.get_refinement_level(mpiGrid.get_existing_cell(cellCenterCoords)); if(refLvl == -1) { cerr << "Error, could not get refinement level of remote DCCRG cell " << __FILE__ << " " << __LINE__ << endl; diff --git a/sysboundary/ionosphere.cpp b/sysboundary/ionosphere.cpp index 5f8325e0e..3463faf55 100644 --- a/sysboundary/ionosphere.cpp +++ b/sysboundary/ionosphere.cpp @@ -207,8 +207,6 @@ namespace SBC { } } - const auto inv2powMaxRefLvl = pow(2,-P::amrMaxSpatialRefLevel); - // Assign boundary flags to local fsgrid cells const std::array gridDims(technicalGrid.getLocalSize()); for (int k=0; ksysBoundaryFlag = this->getIndex(); } } - - const auto inv2powMaxRefLvl = pow(2,-P::amrMaxSpatialRefLevel); // Assign boundary flags to local fsgrid cells const std::array gridDims(technicalGrid.getLocalSize()); @@ -254,9 +252,9 @@ namespace SBC { // Shift to the center of the fsgrid cell auto cellCenterCoords = coords; - cellCenterCoords[0] += 0.5 * P::dx_ini * inv2powMaxRefLvl; - cellCenterCoords[1] += 0.5 * P::dy_ini * inv2powMaxRefLvl; - cellCenterCoords[2] += 0.5 * P::dz_ini * inv2powMaxRefLvl; + cellCenterCoords[0] += 0.5 * technicalGrid.DX; + cellCenterCoords[1] += 0.5 * technicalGrid.DY; + cellCenterCoords[2] += 0.5 * technicalGrid.DZ; const auto refLvl = mpiGrid.get_refinement_level(mpiGrid.get_existing_cell(cellCenterCoords)); if(refLvl == -1) { diff --git a/sysboundary/project_boundary.cpp b/sysboundary/project_boundary.cpp index 9913f2ed8..ec5945b59 100644 --- a/sysboundary/project_boundary.cpp +++ b/sysboundary/project_boundary.cpp @@ -118,8 +118,6 @@ namespace SBC { mpiGrid[cells[i]]->sysBoundaryFlag = this->getIndex(); } } - - const auto inv2powMaxRefLvl = pow(2,-P::amrMaxSpatialRefLevel); // Assign boundary flags to local fsgrid cells const std::array gridDims(technicalGrid.getLocalSize()); @@ -130,9 +128,9 @@ namespace SBC { // Shift to the center of the fsgrid cell auto cellCenterCoords = coords; - cellCenterCoords[0] += 0.5 * P::dx_ini * inv2powMaxRefLvl; - cellCenterCoords[1] += 0.5 * P::dy_ini * inv2powMaxRefLvl; - cellCenterCoords[2] += 0.5 * P::dz_ini * inv2powMaxRefLvl; + cellCenterCoords[0] += 0.5 * technicalGrid.DX; + cellCenterCoords[1] += 0.5 * technicalGrid.DY; + cellCenterCoords[2] += 0.5 * technicalGrid.DZ; const auto refLvl = mpiGrid.get_refinement_level(mpiGrid.get_existing_cell(cellCenterCoords)); if(refLvl == -1) { diff --git a/sysboundary/setbyuser.cpp b/sysboundary/setbyuser.cpp index 4aca59b56..061338280 100644 --- a/sysboundary/setbyuser.cpp +++ b/sysboundary/setbyuser.cpp @@ -103,8 +103,6 @@ namespace SBC { mpiGrid[cells[i]]->sysBoundaryFlag = this->getIndex(); } } - - const auto inv2powMaxRefLvl = pow(2,-P::amrMaxSpatialRefLevel); // Assign boundary flags to local fsgrid cells const std::array gridDims(technicalGrid.getLocalSize()); @@ -116,9 +114,9 @@ namespace SBC { // Shift to the center of the fsgrid cell auto cellCenterCoords = coords; - cellCenterCoords[0] += 0.5 * P::dx_ini * inv2powMaxRefLvl; - cellCenterCoords[1] += 0.5 * P::dy_ini * inv2powMaxRefLvl; - cellCenterCoords[2] += 0.5 * P::dz_ini * inv2powMaxRefLvl; + cellCenterCoords[0] += 0.5 * technicalGrid.DX; + cellCenterCoords[1] += 0.5 * technicalGrid.DY; + cellCenterCoords[2] += 0.5 * technicalGrid.DZ; const auto refLvl = mpiGrid.get_refinement_level(mpiGrid.get_existing_cell(cellCenterCoords)); if(refLvl == -1) { From f29a22c846ba8d88df8df3cd53ccfe8928c5dd76 Mon Sep 17 00:00:00 2001 From: Urs Ganse Date: Fri, 3 May 2019 16:42:37 +0300 Subject: [PATCH 370/602] (Untested) code for fsgrid data reducers. --- datareduction/datareducer.cpp | 40 ++++++- datareduction/datareducer.h | 12 ++ datareduction/datareductionoperator.cpp | 62 ++++++---- datareduction/datareductionoperator.h | 52 ++++++--- iowrite.cpp | 145 +++++++++++++++++++++++- iowrite.h | 20 ++++ vlasiator.cpp | 39 ++++++- 7 files changed, 325 insertions(+), 45 deletions(-) diff --git a/datareduction/datareducer.cpp b/datareduction/datareducer.cpp index 99462de96..93e390bd8 100644 --- a/datareduction/datareducer.cpp +++ b/datareduction/datareducer.cpp @@ -160,7 +160,22 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti } if(*it == "FsGridRank") { // Map of spatial decomposition of the FsGrid into MPI ranks - outputReducer->addOperator(new DRO::FsGridRank); + outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("FsGridRank",[]( + FsGrid< std::array, 2>& perBGrid, + FsGrid< std::array, 2>& EGrid, + FsGrid< std::array, 2>& EHallGrid, + FsGrid< std::array, 2>& EGradPeGrid, + FsGrid< std::array, 2>& momentsGrid, + FsGrid< std::array, 2>& dPerBGrid, + FsGrid< std::array, 2>& dMomentsGrid, + FsGrid< std::array, 2>& BgBGrid, + FsGrid< std::array, 2>& volGrid, + FsGrid< fsgrids::technical, 2>& technicalGrid)->std::vector { + + std::array& gridSize = technicalGrid.getLocalSize(); + std::vector retval(gridSize[0]*gridSize[1]*gridSize[2],technicalGrid.getRank()); + } + )); continue; } if(*it == "BoundaryType") { @@ -570,3 +585,26 @@ bool DataReducer::writeData(const unsigned int& operatorID, } return writingOperator->writeData(mpiGrid,cells,meshName,vlsvWriter); } + +/** Write all data thet the given DataReductionOperator wants to obtain from fsgrid into the output file. + */ +bool DataReducer::writeFsGridData( + FsGrid< std::array, 2>& perBGrid, + FsGrid< std::array, 2>& EGrid, + FsGrid< std::array, 2>& EHallGrid, + FsGrid< std::array, 2>& EGradPeGrid, + FsGrid< std::array, 2>& momentsGrid, + FsGrid< std::array, 2>& dPerBGrid, + FsGrid< std::array, 2>& dMomentsGrid, + FsGrid< std::array, 2>& BgBGrid, + FsGrid< std::array, 2>& volGrid, + FsGrid< fsgrids::technical, 2>& technicalGrid, const std::string& meshName, const unsigned int operatorID, vlsv::Writer& vlsvWriter) { + + if (operatorID >= operators.size()) return false; + DRO::DataReductionOperatorFsGrid* DROf = dynamic_cast(operators[operatorID]); + if(!DROf) { + return false; + } else { + return DROf->writeFsGridData(perBGrid, EGrid, EHallGrid, EGradPeGrid, momentsGrid, dPerBGrid, dMomentsGrid, BgBGrid, volGrid, technicalGrid, meshName, vlsvWriter); + } +} diff --git a/datareduction/datareducer.h b/datareduction/datareducer.h index 70aea990f..b54fe7dea 100644 --- a/datareduction/datareducer.h +++ b/datareduction/datareducer.h @@ -24,6 +24,7 @@ #define DATAREDUCER_H #include +#include "fsgrid.hpp" #include "../spatial_cell.hpp" #include "datareductionoperator.h" @@ -52,6 +53,17 @@ class DataReducer { const dccrg::Dccrg& mpiGrid, const std::vector& cells,const std::string& meshName, vlsv::Writer& vlsvWriter); + bool writeFsGridData( + FsGrid< std::array, 2>& perBGrid, + FsGrid< std::array, 2>& EGrid, + FsGrid< std::array, 2>& EHallGrid, + FsGrid< std::array, 2>& EGradPeGrid, + FsGrid< std::array, 2>& momentsGrid, + FsGrid< std::array, 2>& dPerBGrid, + FsGrid< std::array, 2>& dMomentsGrid, + FsGrid< std::array, 2>& BgBGrid, + FsGrid< std::array, 2>& volGrid, + FsGrid< fsgrids::technical, 2>& technicalGrid, const std::string& meshName, const unsigned int operatorID, vlsv::Writer& vlsvWriter); private: /** Private copy-constructor to prevent copying the class. diff --git a/datareduction/datareductionoperator.cpp b/datareduction/datareductionoperator.cpp index d26d70b5e..9a672ff1f 100644 --- a/datareduction/datareductionoperator.cpp +++ b/datareduction/datareductionoperator.cpp @@ -112,9 +112,47 @@ namespace DRO { return true; } + std::string DataReductionOperatorFsGrid::getName() const {return variableName;} + bool DataReductionOperatorFsGrid::getDataVectorInfo(std::string& dataType,unsigned int& dataSize,unsigned int& vectorSize) const { + dataType = "float"; + dataSize = sizeof(double); + vectorSize = 1; + return true; + } + //bool DataReductionOperatorFsGrid::reduceData(const SpatialCell* cell,char* buffer) { + // // This returns false, since it will handle writing itself in writeFsGridData below. + // return false; + //} + bool DataReductionOperatorFsGrid::setSpatialCell(const SpatialCell* cell) { + return true; + } + + + bool DataReductionOperatorFsGrid::writeFsGridData( + FsGrid< std::array, 2>& perBGrid, + FsGrid< std::array, 2>& EGrid, + FsGrid< std::array, 2>& EHallGrid, + FsGrid< std::array, 2>& EGradPeGrid, + FsGrid< std::array, 2>& momentsGrid, + FsGrid< std::array, 2>& dPerBGrid, + FsGrid< std::array, 2>& dMomentsGrid, + FsGrid< std::array, 2>& BgBGrid, + FsGrid< std::array, 2>& volGrid, + FsGrid< fsgrids::technical, 2>& technicalGrid, const std::string& meshName, vlsv::Writer& vlsvWriter) { + std::map attribs; + attribs["mesh"]=meshName; + attribs["name"]=variableName; + std::vector varBuffer = + lambda(perBGrid,EGrid,EHallGrid,EGradPeGrid,momentsGrid,dPerBGrid,dMomentsGrid,BgBGrid,volGrid,technicalGrid); + if(vlsvWriter.writeArray("VARIABLE",attribs, "float", varBuffer.size(), 1, sizeof(double), reinterpret_cast(varBuffer.data())) == false) { + string message = "The DataReductionOperator " + this->getName() + " failed to write it's data."; + bailout(true, message, __FILE__, __LINE__); + } + return true; + } DataReductionOperatorDerivatives::DataReductionOperatorDerivatives(const std::string& name,const unsigned int parameterIndex,const unsigned int vectorSize): DataReductionOperatorCellParams(name,parameterIndex,vectorSize) { @@ -236,30 +274,6 @@ namespace DRO { return true; } - //FsGrid cartcomm mpi rank - FsGridRank::FsGridRank(): DataReductionOperator() { } - FsGridRank::~FsGridRank() { } - - bool FsGridRank::getDataVectorInfo(std::string& dataType,unsigned int& dataSize,unsigned int& vectorSize) const { - dataType = "int"; - dataSize = sizeof(int); - vectorSize = 1; - return true; - } - - std::string FsGridRank::getName() const {return "FSgrid_rank";} - - bool FsGridRank::reduceData(const SpatialCell* cell,char* buffer) { - const char* ptr = reinterpret_cast(&fsgridRank); - for (uint i=0; iget_cell_parameters()[CellParams::FSGRID_RANK]; - return true; - } - //FsGrids idea of what the boundaryType ist FsGridBoundaryType::FsGridBoundaryType(): DataReductionOperator() { } FsGridBoundaryType::~FsGridBoundaryType() { } diff --git a/datareduction/datareductionoperator.h b/datareduction/datareductionoperator.h index c6162dde6..1eea4b0f5 100644 --- a/datareduction/datareductionoperator.h +++ b/datareduction/datareductionoperator.h @@ -29,6 +29,7 @@ #include #include +#include "fsgrid.hpp" #include "../definitions.h" #include "../spatial_cell.hpp" #include "../parameters.h" @@ -73,6 +74,42 @@ namespace DRO { vlsv::Writer& vlsvWriter) = 0; }; + class DataReductionOperatorFsGrid : public DataReductionOperator { + + public: + typedef std::function( + FsGrid< std::array, 2>& perBGrid, + FsGrid< std::array, 2>& EGrid, + FsGrid< std::array, 2>& EHallGrid, + FsGrid< std::array, 2>& EGradPeGrid, + FsGrid< std::array, 2>& momentsGrid, + FsGrid< std::array, 2>& dPerBGrid, + FsGrid< std::array, 2>& dMomentsGrid, + FsGrid< std::array, 2>& BgBGrid, + FsGrid< std::array, 2>& volGrid, + FsGrid< fsgrids::technical, 2>& technicalGrid)> ReductionLambda; + private: + ReductionLambda lambda; + std::string variableName; + + public: + DataReductionOperatorFsGrid(const std::string& name, ReductionLambda l) : DataReductionOperator(),lambda(l),variableName(name) {}; + virtual std::string getName() const; + virtual bool getDataVectorInfo(std::string& dataType,unsigned int& dataSize,unsigned int& vectorSize) const; + virtual bool setSpatialCell(const SpatialCell* cell); + virtual bool writeFsGridData( + FsGrid< std::array, 2>& perBGrid, + FsGrid< std::array, 2>& EGrid, + FsGrid< std::array, 2>& EHallGrid, + FsGrid< std::array, 2>& EGradPeGrid, + FsGrid< std::array, 2>& momentsGrid, + FsGrid< std::array, 2>& dPerBGrid, + FsGrid< std::array, 2>& dMomentsGrid, + FsGrid< std::array, 2>& BgBGrid, + FsGrid< std::array, 2>& volGrid, + FsGrid< fsgrids::technical, 2>& technicalGrid, const std::string& meshName, vlsv::Writer& vlsvWriter); + }; + class DataReductionOperatorCellParams: public DataReductionOperator { public: DataReductionOperatorCellParams(const std::string& name,const unsigned int parameterIndex,const unsigned int vectorSize); @@ -118,21 +155,6 @@ namespace DRO { int mpiRank; }; - class FsGridRank: public DataReductionOperator { - public: - FsGridRank(); - virtual ~FsGridRank(); - - virtual bool getDataVectorInfo(std::string& dataType,unsigned int& dataSize,unsigned int& vectorSize) const; - virtual std::string getName() const; - virtual bool reduceData(const SpatialCell* cell,char* buffer); - virtual bool setSpatialCell(const SpatialCell* cell); - - protected: - Real rank; - int fsgridRank; - }; - class FsGridBoundaryType: public DataReductionOperator { public: FsGridBoundaryType(); diff --git a/iowrite.cpp b/iowrite.cpp index a159ee164..88cde2b1a 100644 --- a/iowrite.cpp +++ b/iowrite.cpp @@ -303,6 +303,16 @@ bool writeVelocityDistributionData(const uint popID,Writer& vlsvWriter, */ bool writeDataReducer(const dccrg::Dccrg& mpiGrid, const std::vector& cells, + FsGrid< std::array, 2>& perBGrid, + FsGrid< std::array, 2>& EGrid, + FsGrid< std::array, 2>& EHallGrid, + FsGrid< std::array, 2>& EGradPeGrid, + FsGrid< std::array, 2>& momentsGrid, + FsGrid< std::array, 2>& dPerBGrid, + FsGrid< std::array, 2>& dMomentsGrid, + FsGrid< std::array, 2>& BgBGrid, + FsGrid< std::array, 2>& volGrid, + FsGrid< fsgrids::technical, 2>& technicalGrid, const bool writeAsFloat, DataReducer& dataReducer, int dataReducerIndex, @@ -406,6 +416,12 @@ bool writeDataReducer(const dccrg::Dccrg& phiprof::stop("writeArray"); } + } else { + // If the data reducer didn't want to write dccrg data, maybe it will be happy + // dumping data straight from fsgrid into our file. + phiprof::start("writeFsGrid"); + success = dataReducer.writeFsGridData(perBGrid,EGrid,EHallGrid,EGradPeGrid,momentsGrid,dPerBGrid,dMomentsGrid,BgBGrid,volGrid, technicalGrid, "fsgrid", dataReducerIndex, vlsvWriter); + phiprof::stop("writeFsGrid"); } delete[] varBuffer; @@ -793,6 +809,101 @@ bool writeMeshBoundingBox( Writer & vlsvWriter, return success; } +/** Writes the mesh metadata for Visit to read FSGrid variable data. + * @param technicalGrid An fsgrid instance used to extract metadata info. + * @param vlsvWriter file object to write into. + */ +bool writeFsGridMetadata(FsGrid< fsgrids::technical, 2>& technicalGrid, vlsv::Writer& vlsvWriter) { + + std::map xmlAttributes; + const std::string meshName="fsgrid"; + xmlAttributes["mesh"] = meshName; + + //The visit plugin expects MESH_BBOX as a keyword. We only write one + //from the first rank. + std::array& globalSize = technicalGrid.getGlobalSize(); + std::array boundaryBox({globalSize[0], globalSize[1], globalSize[2], + 1,1,1}); + + if(rank == 0) { + const unsigned int arraySize = 6; + const unsigned int vectorSize = 1; + std::cerr << "Writing MESH_BBOX" << std::endl; + vlsvWriter.writeArray("MESH_BBOX", xmlAttributes, arraySize, vectorSize, &boundaryBox[0]); + } else { + const unsigned int arraySize = 0; + const unsigned int vectorSize = 1; + vlsvWriter.writeArray("MESH_BBOX", xmlAttributes, arraySize, vectorSize, &boundaryBox); + } + + // Write three 1-dimensional arrays of node coordinates (x,y,z) for + // visit to create a cartesian grid out of. + std::vector xNodeCoordinates(globalSize[0]+1); + for(uint64_t i=0; i yNodeCoordinates(globalSize[1]+1); + for(uint64_t i=0; i zNodeCoordinates(globalSize[2]+1); + for(uint64_t i=0; i globalIds(localSize[0]*localSize[1]*localSize[2]); + int i=0; + for(int z=0; z globalIndex = technicalGrid.getGlobalIndices(x,y,z); + globalIds[i++] = globalIndex[2]*globalSize[0]*globalSize[1]+ + globalIndex[1]*globalSize[0] + + globalIndex[0]; + } + } + } + + + // writeDomainSizes + std::array meshDomainSize({globalIds.size(), 0}); + std::cerr << "Writing MESH_DOMAIN_SIZES" << std::endl; + vlsvWriter.writeArray("MESH_DOMAIN_SIZES", xmlAttributes, 1, 2, &meshDomainSize[0]); + + // Finally, write mesh object itself. + xmlAttributes.clear(); + xmlAttributes["name"] = meshName; + xmlAttributes["type"] = vlsv::mesh::STRING_UCD_MULTI; + if(P::xperiodic) { xmlAttributes["xperiodic"] = "yes"; } else { xmlAttributes["xperiodic"] = "no"; } + if(P::yperiodic) { xmlAttributes["yperiodic"] = "yes"; } else { xmlAttributes["yperiodic"] = "no"; } + if(P::zperiodic) { xmlAttributes["zperiodic"] = "yes"; } else { xmlAttributes["zperiodic"] = "no"; } + + std::cerr << "Writing MESH" << std::endl; + vlsvWriter.writeArray("MESH", xmlAttributes, globalIds.size(), 1, globalIds.data()); + + return true; +} + /** This function writes the velocity space. * @param mpiGrid Vlasiator's grid. * @param vlsvWriter some vlsv writer with a file open. @@ -887,6 +998,16 @@ bool checkForSameMembers( const vector local_cells, const vector& mpiGrid, + FsGrid< std::array, 2>& perBGrid, + FsGrid< std::array, 2>& EGrid, + FsGrid< std::array, 2>& EHallGrid, + FsGrid< std::array, 2>& EGradPeGrid, + FsGrid< std::array, 2>& momentsGrid, + FsGrid< std::array, 2>& dPerBGrid, + FsGrid< std::array, 2>& dMomentsGrid, + FsGrid< std::array, 2>& BgBGrid, + FsGrid< std::array, 2>& volGrid, + FsGrid< fsgrids::technical, 2>& technicalGrid, DataReducer* dataReducer, const uint& index, const bool writeGhosts ) { @@ -984,6 +1105,10 @@ bool writeGrid(dccrg::Dccrg& mpiGrid, //Write ghost zone domain and local id numbers ( VisIt plugin needs this for MPI ) if( writeGhostZoneDomainAndLocalIdNumbers( mpiGrid, vlsvWriter, meshName, ghost_cells ) == false ) return false; + + //Write FSGrid metadata + if( writeFsGridMetadata( technicalGrid, vlsvWriter ) == false ) return false; + phiprof::stop("metadataIO"); phiprof::start("velocityspaceIO"); if( writeVelocitySpace( mpiGrid, vlsvWriter, index, local_cells ) == false ) return false; @@ -994,7 +1119,10 @@ bool writeGrid(dccrg::Dccrg& mpiGrid, //Determines whether we write in floats or doubles phiprof::start("writeDataReducer"); if (dataReducer != NULL) for( uint i = 0; i < dataReducer->size(); ++i ) { - if( writeDataReducer( mpiGrid, local_cells, (P::writeAsFloat==1), *dataReducer, i, vlsvWriter ) == false ) return false; + if( writeDataReducer( mpiGrid, local_cells, + perBGrid, EGrid, EHallGrid, EGradPeGrid, momentsGrid, dPerBGrid, dMomentsGrid, + BgBGrid, volGrid, technicalGrid, + (P::writeAsFloat==1), *dataReducer, i, vlsvWriter ) == false ) return false; } phiprof::stop("writeDataReducer"); @@ -1039,6 +1167,16 @@ bool writeGrid(dccrg::Dccrg& mpiGrid, \param fileIndex File index, file will be called "name.index.vlsv" */ bool writeRestart(dccrg::Dccrg& mpiGrid, + FsGrid< std::array, 2>& perBGrid, + FsGrid< std::array, 2>& EGrid, + FsGrid< std::array, 2>& EHallGrid, + FsGrid< std::array, 2>& EGradPeGrid, + FsGrid< std::array, 2>& momentsGrid, + FsGrid< std::array, 2>& dPerBGrid, + FsGrid< std::array, 2>& dMomentsGrid, + FsGrid< std::array, 2>& BgBGrid, + FsGrid< std::array, 2>& volGrid, + FsGrid< fsgrids::technical, 2>& technicalGrid, DataReducer& dataReducer, const string& name, const uint& fileIndex, @@ -1159,7 +1297,10 @@ bool writeRestart(dccrg::Dccrg& mpiGrid, //Write necessary variables: const bool writeAsFloat = false; for (uint i=0; i& mpiGrid, + FsGrid< std::array, 2>& perBGrid, + FsGrid< std::array, 2>& EGrid, + FsGrid< std::array, 2>& EHallGrid, + FsGrid< std::array, 2>& EGradPeGrid, + FsGrid< std::array, 2>& momentsGrid, + FsGrid< std::array, 2>& dPerBGrid, + FsGrid< std::array, 2>& dMomentsGrid, + FsGrid< std::array, 2>& BgBGrid, + FsGrid< std::array, 2>& volGrid, + FsGrid< fsgrids::technical, 2>& technicalGrid, DataReducer* dataReducer, const uint& index, const bool writeGhosts = true @@ -56,6 +66,16 @@ bool writeGrid(dccrg::Dccrg& mpiGrid, \param fileIndex File index, file will be called "name.index.vlsv" */ bool writeRestart(dccrg::Dccrg& mpiGrid, + FsGrid< std::array, 2>& perBGrid, + FsGrid< std::array, 2>& EGrid, + FsGrid< std::array, 2>& EHallGrid, + FsGrid< std::array, 2>& EGradPeGrid, + FsGrid< std::array, 2>& momentsGrid, + FsGrid< std::array, 2>& dPerBGrid, + FsGrid< std::array, 2>& dMomentsGrid, + FsGrid< std::array, 2>& BgBGrid, + FsGrid< std::array, 2>& volGrid, + FsGrid< fsgrids::technical, 2>& technicalGrid, DataReducer& dataReducer, const std::string& name, const uint& fileIndex, diff --git a/vlasiator.cpp b/vlasiator.cpp index c66cf14e3..dfb07c4c6 100644 --- a/vlasiator.cpp +++ b/vlasiator.cpp @@ -578,7 +578,18 @@ int main(int argn,char* args[]) { } const bool writeGhosts = true; - if( writeGrid(mpiGrid,&outputReducer,P::systemWriteName.size()-1, writeGhosts) == false ) { + if( writeGrid(mpiGrid, + perBGrid, // TODO: Merge all the fsgrids passed here into one meta-object + EGrid, + EHallGrid, + EGradPeGrid, + momentsGrid, + dPerBGrid, + dMomentsGrid, + BgBGrid, + volGrid, + technicalGrid, + &outputReducer,P::systemWriteName.size()-1, writeGhosts) == false ) { cerr << "FAILED TO WRITE GRID AT " << __FILE__ << " " << __LINE__ << endl; } @@ -797,7 +808,18 @@ int main(int argn,char* args[]) { phiprof::start("write-system"); logFile << "(IO): Writing spatial cell and reduced system data to disk, tstep = " << P::tstep << " t = " << P::t << endl << writeVerbose; const bool writeGhosts = true; - if( writeGrid(mpiGrid,&outputReducer, i, writeGhosts) == false ) { + if( writeGrid(mpiGrid, + perBGrid, // TODO: Merge all the fsgrids passed here into one meta-object + EGrid, + EHallGrid, + EGradPeGrid, + momentsGrid, + dPerBGrid, + dMomentsGrid, + BgBGrid, + volGrid, + technicalGrid, + &outputReducer, i, writeGhosts) == false ) { cerr << "FAILED TO WRITE GRID AT" << __FILE__ << " " << __LINE__ << endl; } P::systemWrites[i]++; @@ -854,7 +876,18 @@ int main(int argn,char* args[]) { if (myRank == MASTER_RANK) logFile << "(IO): Writing restart data to disk, tstep = " << P::tstep << " t = " << P::t << endl << writeVerbose; //Write the restart: - if( writeRestart(mpiGrid,outputReducer,"restart",(uint)P::t, P::restartStripeFactor) == false ) { + if( writeRestart(mpiGrid, + perBGrid, // TODO: Merge all the fsgrids passed here into one meta-object + EGrid, + EHallGrid, + EGradPeGrid, + momentsGrid, + dPerBGrid, + dMomentsGrid, + BgBGrid, + volGrid, + technicalGrid, + outputReducer,"restart",(uint)P::t, P::restartStripeFactor) == false ) { logFile << "(IO): ERROR Failed to write restart!" << endl << writeVerbose; cerr << "FAILED TO WRITE RESTART" << endl; } From 11f4bd1796dfd97711cd5c016b80ef2770c41068 Mon Sep 17 00:00:00 2001 From: Urs Ganse Date: Fri, 3 May 2019 17:26:50 +0300 Subject: [PATCH 371/602] Fix typos and forgotten function definitions. --- MAKE/Makefile.sisu_gcc | 2 +- datareduction/datareductionoperator.cpp | 11 +++++++---- datareduction/datareductionoperator.h | 8 +++++--- iowrite.cpp | 21 +++++++++++++-------- 4 files changed, 26 insertions(+), 16 deletions(-) diff --git a/MAKE/Makefile.sisu_gcc b/MAKE/Makefile.sisu_gcc index bbfc2c8c5..a63736794 100644 --- a/MAKE/Makefile.sisu_gcc +++ b/MAKE/Makefile.sisu_gcc @@ -84,4 +84,4 @@ INC_PROFILE = -I$(LIBRARY_PREFIX)/mpich2/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_V INC_EIGEN = -I$(LIBRARY_PREFIX)/eigen/ INC_DCCRG = -I$(LIBRARY_PREFIX)/dccrg_new_neighbours/ INC_VECTORCLASS = -I$(LIBRARY_PREFIX)/vectorclass -INC_FSGRID = -I$(LIBRARY_PREFIX)/fsgrid +INC_FSGRID = -I/homeappl/home/uganse/fsgrid diff --git a/datareduction/datareductionoperator.cpp b/datareduction/datareductionoperator.cpp index 9a672ff1f..4249d8bb2 100644 --- a/datareduction/datareductionoperator.cpp +++ b/datareduction/datareductionoperator.cpp @@ -119,10 +119,13 @@ namespace DRO { vectorSize = 1; return true; } - //bool DataReductionOperatorFsGrid::reduceData(const SpatialCell* cell,char* buffer) { - // // This returns false, since it will handle writing itself in writeFsGridData below. - // return false; - //} + bool DataReductionOperatorFsGrid::reduceData(const SpatialCell* cell,char* buffer) { + // This returns false, since it will handle writing itself in writeFsGridData below. + return false; + } + bool DataReductionOperatorFsGrid::reduceDiagnostic(const SpatialCell* cell,Real * result) { + return false; + } bool DataReductionOperatorFsGrid::setSpatialCell(const SpatialCell* cell) { return true; } diff --git a/datareduction/datareductionoperator.h b/datareduction/datareductionoperator.h index 1eea4b0f5..fd724c943 100644 --- a/datareduction/datareductionoperator.h +++ b/datareduction/datareductionoperator.h @@ -94,9 +94,11 @@ namespace DRO { public: DataReductionOperatorFsGrid(const std::string& name, ReductionLambda l) : DataReductionOperator(),lambda(l),variableName(name) {}; - virtual std::string getName() const; - virtual bool getDataVectorInfo(std::string& dataType,unsigned int& dataSize,unsigned int& vectorSize) const; - virtual bool setSpatialCell(const SpatialCell* cell); + virtual std::string getName() const; + virtual bool getDataVectorInfo(std::string& dataType,unsigned int& dataSize,unsigned int& vectorSize) const; + virtual bool setSpatialCell(const SpatialCell* cell); + virtual bool reduceData(const SpatialCell* cell,char* buffer); + virtual bool reduceDiagnostic(const SpatialCell* cell,Real * result); virtual bool writeFsGridData( FsGrid< std::array, 2>& perBGrid, FsGrid< std::array, 2>& EGrid, diff --git a/iowrite.cpp b/iowrite.cpp index 88cde2b1a..59aa681f0 100644 --- a/iowrite.cpp +++ b/iowrite.cpp @@ -825,7 +825,7 @@ bool writeFsGridMetadata(FsGrid< fsgrids::technical, 2>& technicalGrid, vlsv::Wr std::array boundaryBox({globalSize[0], globalSize[1], globalSize[2], 1,1,1}); - if(rank == 0) { + if(technicalGrid.getRank() == 0) { const unsigned int arraySize = 6; const unsigned int vectorSize = 1; std::cerr << "Writing MESH_BBOX" << std::endl; @@ -840,17 +840,17 @@ bool writeFsGridMetadata(FsGrid< fsgrids::technical, 2>& technicalGrid, vlsv::Wr // visit to create a cartesian grid out of. std::vector xNodeCoordinates(globalSize[0]+1); for(uint64_t i=0; i yNodeCoordinates(globalSize[1]+1); for(uint64_t i=0; i zNodeCoordinates(globalSize[2]+1); for(uint64_t i=0; i& technicalGrid, vlsv::Wr vlsvWriter.writeArray("MESH_GHOST_LOCALIDS", xmlAttributes, 0, 1, &dummyghost); // Write cell "globalID" numbers, which are just the global array indices. + std::array& localSize = technicalGrid.getLocalSize(); std::vector globalIds(localSize[0]*localSize[1]*localSize[2]); int i=0; for(int z=0; z& technicalGrid, vlsv::Wr xmlAttributes.clear(); xmlAttributes["name"] = meshName; xmlAttributes["type"] = vlsv::mesh::STRING_UCD_MULTI; - if(P::xperiodic) { xmlAttributes["xperiodic"] = "yes"; } else { xmlAttributes["xperiodic"] = "no"; } - if(P::yperiodic) { xmlAttributes["yperiodic"] = "yes"; } else { xmlAttributes["yperiodic"] = "no"; } - if(P::zperiodic) { xmlAttributes["zperiodic"] = "yes"; } else { xmlAttributes["zperiodic"] = "no"; } + // TODO: Dummy values, fix by getting actual periodicity from fsgrid + xmlAttributes["xperiodic"]="no"; + xmlAttributes["yperiodic"]="no"; + xmlAttributes["zperiodic"]="no"; + //if(P::xperiodic) { xmlAttributes["xperiodic"] = "yes"; } else { xmlAttributes["xperiodic"] = "no"; } + //if(P::yperiodic) { xmlAttributes["yperiodic"] = "yes"; } else { xmlAttributes["yperiodic"] = "no"; } + //if(P::zperiodic) { xmlAttributes["zperiodic"] = "yes"; } else { xmlAttributes["zperiodic"] = "no"; } std::cerr << "Writing MESH" << std::endl; vlsvWriter.writeArray("MESH", xmlAttributes, globalIds.size(), 1, globalIds.data()); From 99d1969d1588c6a1224d128dc6f349d216516c56 Mon Sep 17 00:00:00 2001 From: Urs Ganse Date: Fri, 3 May 2019 17:28:09 +0300 Subject: [PATCH 372/602] Whoopsie, revert accidental commit of Makefile change. --- MAKE/Makefile.sisu_gcc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/MAKE/Makefile.sisu_gcc b/MAKE/Makefile.sisu_gcc index a63736794..bbfc2c8c5 100644 --- a/MAKE/Makefile.sisu_gcc +++ b/MAKE/Makefile.sisu_gcc @@ -84,4 +84,4 @@ INC_PROFILE = -I$(LIBRARY_PREFIX)/mpich2/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_V INC_EIGEN = -I$(LIBRARY_PREFIX)/eigen/ INC_DCCRG = -I$(LIBRARY_PREFIX)/dccrg_new_neighbours/ INC_VECTORCLASS = -I$(LIBRARY_PREFIX)/vectorclass -INC_FSGRID = -I/homeappl/home/uganse/fsgrid +INC_FSGRID = -I$(LIBRARY_PREFIX)/fsgrid From 9034fb8575fe2804e04e241292d14c14be925ef0 Mon Sep 17 00:00:00 2001 From: Urs Ganse Date: Fri, 3 May 2019 17:28:34 +0300 Subject: [PATCH 373/602] Remove previous debugging code stub. --- vlasiator.cpp | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/vlasiator.cpp b/vlasiator.cpp index dfb07c4c6..4a3c80cec 100644 --- a/vlasiator.cpp +++ b/vlasiator.cpp @@ -493,16 +493,6 @@ int main(int argn,char* args[]) { } phiprof::stop("Init field propagator"); - vlsv::Writer fsgridtestwriter; - fsgridtestwriter.open("fsgridtest.vlsv", MPI_COMM_WORLD, 0, MPI_INFO_NULL); - momentsGrid.createVlsvMesh(fsgridtestwriter,"fsgrid"); - - std::function&)> rhoWriter = [](std::array& cell)->double{return cell[fsgrids::RHOM];}; - momentsGrid.vlsvOutputVariable(fsgridtestwriter, rhoWriter, "fsgrid", "rhom"); - fsgridtestwriter.close(); - std::cerr<< "Fsgrid output written." << std::endl; - exit(1); - // Initialize Poisson solver (if used) if (P::propagatePotential == true) { phiprof::start("Init Poisson solver"); From ecd80aa747d0a10c837adaf23429e934df4513db Mon Sep 17 00:00:00 2001 From: Grandin Maxime T J Date: Mon, 6 May 2019 11:33:57 +0300 Subject: [PATCH 374/602] Added a loss cone population DRO for precipitation --- datareduction/datareducer.cpp | 7 + datareduction/datareductionoperator.cpp | 121 ++++++++++++++++++ datareduction/datareductionoperator.h | 19 +++ .../Magnetosphere_polar_small.cfg | 9 +- 4 files changed, 152 insertions(+), 4 deletions(-) diff --git a/datareduction/datareducer.cpp b/datareduction/datareducer.cpp index 99462de96..ce79bc029 100644 --- a/datareduction/datareducer.cpp +++ b/datareduction/datareducer.cpp @@ -251,6 +251,13 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti } continue; } + if(*it == "populations_PrecipitationDiffFlux") { + // Per-population precipitation directional differential number flux + for(unsigned int i =0; i < getObjectWrapper().particleSpecies.size(); i++) { + outputReducer->addOperator(new DRO::VariablePrecipitationDiffFlux(i)); + } + continue; + } if(*it == "derivs") { // Derivatives of all quantities that might be of interest outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("drhomdx",fieldsolver::drhomdx,1)); diff --git a/datareduction/datareductionoperator.cpp b/datareduction/datareductionoperator.cpp index d26d70b5e..d51257915 100644 --- a/datareduction/datareductionoperator.cpp +++ b/datareduction/datareductionoperator.cpp @@ -1476,4 +1476,125 @@ namespace DRO { bool VariableEffectiveSparsityThreshold::setSpatialCell(const spatial_cell::SpatialCell* cell) { return true; } + + + // Precipitation directional differential number flux + VariablePrecipitationDiffFlux::VariablePrecipitationDiffFlux(cuint _popID): DataReductionOperator(),popID(_popID) { + popName = getObjectWrapper().particleSpecies[popID].name; + emin = 0.1; // keV + emax = 100.0; // keV + nChannels = 16; // number of energy channels, logarithmically spaced between emin and emax + cosAngle = cos(10.*M_PI/180.0); // cosine of fixed loss cone angle + for (int i=0; i sumWeights(nChannels,0.0); + + std::array B; + B[0] = cell->parameters[CellParams::PERBXVOL] + cell->parameters[CellParams::BGBXVOL]; + B[1] = cell->parameters[CellParams::PERBYVOL] + cell->parameters[CellParams::BGBYVOL]; + B[2] = cell->parameters[CellParams::PERBZVOL] + cell->parameters[CellParams::BGBZVOL]; + + // Unit B-field direction + creal normB = sqrt(B[0]*B[0] + B[1]*B[1] + B[2]*B[2]); + std::array b_unit; + for (uint i=0; i<3; i++){ + B[i] /= normB; + } + + // If southern hemisphere, loss cone is around -B + if (cell->parameters[CellParams::ZCRD] + 0.5*cell->parameters[CellParams::DZ] < 0.0){ + for (uint i=0; i<3; i++){ + B[i] = -B[i]; + } + } + + + # pragma omp parallel + { + std::vector thread_lossCone_sum(nChannels,0.0); + std::vector thread_count(nChannels,0.0); + + const Real* parameters = cell->get_block_parameters(popID); + const Realf* block_data = cell->get_data(popID); + + # pragma omp for + for (vmesh::LocalID n=0; nget_number_of_velocity_blocks(popID); n++) { + for (uint k = 0; k < WID; ++k) for (uint j = 0; j < WID; ++j) for (uint i = 0; i < WID; ++i) { + const Real VX + = parameters[n * BlockParams::N_VELOCITY_BLOCK_PARAMS + BlockParams::VXCRD] + + (i + 0.5)*parameters[n * BlockParams::N_VELOCITY_BLOCK_PARAMS + BlockParams::DVX]; + const Real VY + = parameters[n * BlockParams::N_VELOCITY_BLOCK_PARAMS + BlockParams::VYCRD] + + (j + 0.5)*parameters[n * BlockParams::N_VELOCITY_BLOCK_PARAMS + BlockParams::DVY]; + const Real VZ + = parameters[n * BlockParams::N_VELOCITY_BLOCK_PARAMS + BlockParams::VZCRD] + + (k + 0.5)*parameters[n * BlockParams::N_VELOCITY_BLOCK_PARAMS + BlockParams::DVZ]; + + const Real DV3 + = parameters[n * BlockParams::N_VELOCITY_BLOCK_PARAMS + BlockParams::DVX] + * parameters[n * BlockParams::N_VELOCITY_BLOCK_PARAMS + BlockParams::DVY] + * parameters[n * BlockParams::N_VELOCITY_BLOCK_PARAMS + BlockParams::DVZ]; + + const Real normV = sqrt(VX*VX + VY*VY + VZ*VZ); + const Real VdotB_norm = (B[0]*VX + B[1]*VY + B[2]*VZ)/normV; + Real countAndGate = floor(VdotB_norm/cosAngle); // gate function: 0 outside loss cone, 1 inside + countAndGate = max(0.,countAndGate); + const Real energy = 0.5 * getObjectWrapper().particleSpecies[popID].mass * normV*normV / physicalconstants::CHARGE * 1e-3; // in keV + + // Find the correct energy bin number to update + int binNumber = round((log(energy) - log(emin)) / log(emax/emin) * (nChannels-1)); + binNumber = max(binNumber,0); // anything < emin goes to the lowest channel + binNumber = min(binNumber,nChannels-1); // anything > emax goes to the highest channel + + thread_lossCone_sum[binNumber] += block_data[n * SIZE_VELBLOCK + cellIndex(i,j,k)] * countAndGate * normV*normV * DV3; + thread_count[binNumber] += countAndGate * DV3; + } + } + for (int i=0; i(dataDiffFlux.data()); + for (uint i = 0; i < nChannels*sizeof(Real); ++i) buffer[i] = ptr[i]; + return true; + } + + bool VariablePrecipitationDiffFlux::setSpatialCell(const SpatialCell* cell) { + return true; + } } // namespace DRO diff --git a/datareduction/datareductionoperator.h b/datareduction/datareductionoperator.h index c6162dde6..851c44991 100644 --- a/datareduction/datareductionoperator.h +++ b/datareduction/datareductionoperator.h @@ -507,6 +507,25 @@ namespace DRO { std::string popName; }; + // Precipitation directional differential number flux + class VariablePrecipitationDiffFlux: public DataReductionOperator { + public: + VariablePrecipitationDiffFlux(cuint popID); + virtual ~VariablePrecipitationDiffFlux(); + + virtual bool getDataVectorInfo(std::string& dataType,unsigned int& dataSize,unsigned int& vectorSize) const; + virtual std::string getName() const; + virtual bool reduceData(const SpatialCell* cell,char* buffer); + virtual bool setSpatialCell(const SpatialCell* cell); + + protected: + uint popID; + std::string popName; + int nChannels; + Real emin, emax; + Real cosAngle; + std::vector channels, dataDiffFlux; + }; } // namespace DRO #endif diff --git a/testpackage/tests/Magnetosphere_polar_small/Magnetosphere_polar_small.cfg b/testpackage/tests/Magnetosphere_polar_small/Magnetosphere_polar_small.cfg index ba223c2c4..66ccc94e6 100644 --- a/testpackage/tests/Magnetosphere_polar_small/Magnetosphere_polar_small.cfg +++ b/testpackage/tests/Magnetosphere_polar_small/Magnetosphere_polar_small.cfg @@ -12,12 +12,12 @@ charge = 1 diagnostic_write_interval = 10 write_initial_state = 0 -system_write_t_interval = 20 +system_write_t_interval = 10 system_write_file_name = bulk system_write_distribution_stride = 0 system_write_distribution_xline_stride = 10 -system_write_distribution_yline_stride = 10 -system_write_distribution_zline_stride = 1 +system_write_distribution_yline_stride = 0 +system_write_distribution_zline_stride = 10 #[bailout] #write_restart = 0 @@ -32,7 +32,7 @@ y_min = -5.0e6 y_max = 5.0e6 z_min = -250.0e6 z_max = 250.0e6 -t_max = 20.05 +t_max = 1000.05 [proton_vspace] @@ -76,6 +76,7 @@ output = BoundaryType output = MPIrank output = populations_Blocks output = fSaved +output = populations_PrecipitationDiffFlux diagnostic = populations_Blocks From 970ea8ad789b1f6138d8eeadc0bd18410dd4c7fe Mon Sep 17 00:00:00 2001 From: Urs Ganse Date: Tue, 7 May 2019 11:50:11 +0300 Subject: [PATCH 375/602] Change fsgrid reducer lambda to return rvalue reference --- datareduction/datareducer.cpp | 3 ++- datareduction/datareductionoperator.h | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/datareduction/datareducer.cpp b/datareduction/datareducer.cpp index 93e390bd8..4cd6fb665 100644 --- a/datareduction/datareducer.cpp +++ b/datareduction/datareducer.cpp @@ -170,10 +170,11 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti FsGrid< std::array, 2>& dMomentsGrid, FsGrid< std::array, 2>& BgBGrid, FsGrid< std::array, 2>& volGrid, - FsGrid< fsgrids::technical, 2>& technicalGrid)->std::vector { + FsGrid< fsgrids::technical, 2>& technicalGrid)->std::vector&& { std::array& gridSize = technicalGrid.getLocalSize(); std::vector retval(gridSize[0]*gridSize[1]*gridSize[2],technicalGrid.getRank()); + return std::move(retval); } )); continue; diff --git a/datareduction/datareductionoperator.h b/datareduction/datareductionoperator.h index fd724c943..14b1f13be 100644 --- a/datareduction/datareductionoperator.h +++ b/datareduction/datareductionoperator.h @@ -77,7 +77,7 @@ namespace DRO { class DataReductionOperatorFsGrid : public DataReductionOperator { public: - typedef std::function( + typedef std::function&&( FsGrid< std::array, 2>& perBGrid, FsGrid< std::array, 2>& EGrid, FsGrid< std::array, 2>& EHallGrid, From 94ff42b6d5c42015d2522718de87209050d28fb9 Mon Sep 17 00:00:00 2001 From: Urs Ganse Date: Tue, 7 May 2019 12:15:24 +0300 Subject: [PATCH 376/602] Revert rvalue reference commit. Was thinking the wrong way around. --- datareduction/datareducer.cpp | 4 ++-- datareduction/datareductionoperator.h | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/datareduction/datareducer.cpp b/datareduction/datareducer.cpp index 4cd6fb665..12a3751c0 100644 --- a/datareduction/datareducer.cpp +++ b/datareduction/datareducer.cpp @@ -170,11 +170,11 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti FsGrid< std::array, 2>& dMomentsGrid, FsGrid< std::array, 2>& BgBGrid, FsGrid< std::array, 2>& volGrid, - FsGrid< fsgrids::technical, 2>& technicalGrid)->std::vector&& { + FsGrid< fsgrids::technical, 2>& technicalGrid)->std::vector { std::array& gridSize = technicalGrid.getLocalSize(); std::vector retval(gridSize[0]*gridSize[1]*gridSize[2],technicalGrid.getRank()); - return std::move(retval); + return retval; } )); continue; diff --git a/datareduction/datareductionoperator.h b/datareduction/datareductionoperator.h index 14b1f13be..fd724c943 100644 --- a/datareduction/datareductionoperator.h +++ b/datareduction/datareductionoperator.h @@ -77,7 +77,7 @@ namespace DRO { class DataReductionOperatorFsGrid : public DataReductionOperator { public: - typedef std::function&&( + typedef std::function( FsGrid< std::array, 2>& perBGrid, FsGrid< std::array, 2>& EGrid, FsGrid< std::array, 2>& EHallGrid, From 6f04c87aa4592d8e48d0bdf04aed3866bf75fa1f Mon Sep 17 00:00:00 2001 From: Urs Ganse Date: Tue, 7 May 2019 12:15:50 +0300 Subject: [PATCH 377/602] Remove superfluous debugging output of fsgrid writes in iowrite. --- iowrite.cpp | 4 ---- 1 file changed, 4 deletions(-) diff --git a/iowrite.cpp b/iowrite.cpp index 59aa681f0..f336d86ac 100644 --- a/iowrite.cpp +++ b/iowrite.cpp @@ -828,7 +828,6 @@ bool writeFsGridMetadata(FsGrid< fsgrids::technical, 2>& technicalGrid, vlsv::Wr if(technicalGrid.getRank() == 0) { const unsigned int arraySize = 6; const unsigned int vectorSize = 1; - std::cerr << "Writing MESH_BBOX" << std::endl; vlsvWriter.writeArray("MESH_BBOX", xmlAttributes, arraySize, vectorSize, &boundaryBox[0]); } else { const unsigned int arraySize = 0; @@ -851,7 +850,6 @@ bool writeFsGridMetadata(FsGrid< fsgrids::technical, 2>& technicalGrid, vlsv::Wr zNodeCoordinates[i] = technicalGrid.getPhysicalCoords(0,0,i)[2]; } if(technicalGrid.getRank() == 0) { - std::cerr << "Writing MESH_NODE_CRDS" << std::endl; // Write this data only on rank 0 vlsvWriter.writeArray("MESH_NODE_CRDS_X", xmlAttributes, globalSize[0]+1, 1, xNodeCoordinates.data()); vlsvWriter.writeArray("MESH_NODE_CRDS_Y", xmlAttributes, globalSize[1]+1, 1, yNodeCoordinates.data()); @@ -888,7 +886,6 @@ bool writeFsGridMetadata(FsGrid< fsgrids::technical, 2>& technicalGrid, vlsv::Wr // writeDomainSizes std::array meshDomainSize({globalIds.size(), 0}); - std::cerr << "Writing MESH_DOMAIN_SIZES" << std::endl; vlsvWriter.writeArray("MESH_DOMAIN_SIZES", xmlAttributes, 1, 2, &meshDomainSize[0]); // Finally, write mesh object itself. @@ -903,7 +900,6 @@ bool writeFsGridMetadata(FsGrid< fsgrids::technical, 2>& technicalGrid, vlsv::Wr //if(P::yperiodic) { xmlAttributes["yperiodic"] = "yes"; } else { xmlAttributes["yperiodic"] = "no"; } //if(P::zperiodic) { xmlAttributes["zperiodic"] = "yes"; } else { xmlAttributes["zperiodic"] = "no"; } - std::cerr << "Writing MESH" << std::endl; vlsvWriter.writeArray("MESH", xmlAttributes, globalIds.size(), 1, globalIds.data()); return true; From 372631525de61a142aaa24cfa05e0a49d021731f Mon Sep 17 00:00:00 2001 From: Urs Ganse Date: Tue, 7 May 2019 13:43:40 +0300 Subject: [PATCH 378/602] More fsGrid data reducers converted. --- datareduction/datareducer.cpp | 39 ++++++++++++++++++++----- datareduction/datareductionoperator.cpp | 5 +++- 2 files changed, 35 insertions(+), 9 deletions(-) diff --git a/datareduction/datareducer.cpp b/datareduction/datareducer.cpp index 12a3751c0..ee3879f26 100644 --- a/datareduction/datareducer.cpp +++ b/datareduction/datareducer.cpp @@ -170,13 +170,13 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti FsGrid< std::array, 2>& dMomentsGrid, FsGrid< std::array, 2>& BgBGrid, FsGrid< std::array, 2>& volGrid, - FsGrid< fsgrids::technical, 2>& technicalGrid)->std::vector { - - std::array& gridSize = technicalGrid.getLocalSize(); - std::vector retval(gridSize[0]*gridSize[1]*gridSize[2],technicalGrid.getRank()); - return retval; - } - )); + FsGrid< fsgrids::technical, 2>& technicalGrid)->std::vector { + + std::array& gridSize = technicalGrid.getLocalSize(); + std::vector retval(gridSize[0]*gridSize[1]*gridSize[2],technicalGrid.getRank()); + return retval; + } + )); continue; } if(*it == "BoundaryType") { @@ -186,7 +186,30 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti } if(*it == "FsGridBoundaryType") { // Type of boundarycells as stored in FSGrid - outputReducer->addOperator(new DRO::FsGridBoundaryType); + outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("FsGridBoundaryType",[]( + FsGrid< std::array, 2>& perBGrid, + FsGrid< std::array, 2>& EGrid, + FsGrid< std::array, 2>& EHallGrid, + FsGrid< std::array, 2>& EGradPeGrid, + FsGrid< std::array, 2>& momentsGrid, + FsGrid< std::array, 2>& dPerBGrid, + FsGrid< std::array, 2>& dMomentsGrid, + FsGrid< std::array, 2>& BgBGrid, + FsGrid< std::array, 2>& volGrid, + FsGrid< fsgrids::technical, 2>& technicalGrid)->std::vector { + + std::array& gridSize = technicalGrid.getLocalSize(); + std::vector retval(gridSize[0]*gridSize[1]*gridSize[2]); + for(int z=0; zsysBoundaryFlag; + } + } + } + return retval; + } + )); continue; } if(*it == "BoundaryLayer") { diff --git a/datareduction/datareductionoperator.cpp b/datareduction/datareductionoperator.cpp index 4249d8bb2..96b14c31d 100644 --- a/datareduction/datareductionoperator.cpp +++ b/datareduction/datareductionoperator.cpp @@ -149,7 +149,10 @@ namespace DRO { std::vector varBuffer = lambda(perBGrid,EGrid,EHallGrid,EGradPeGrid,momentsGrid,dPerBGrid,dMomentsGrid,BgBGrid,volGrid,technicalGrid); - if(vlsvWriter.writeArray("VARIABLE",attribs, "float", varBuffer.size(), 1, sizeof(double), reinterpret_cast(varBuffer.data())) == false) { + + std::array& gridSize = technicalGrid.getLocalSize(); + int vectorSize = varBuffer.size() / (gridSize[0]*gridSize[1]*gridSize[2]); + if(vlsvWriter.writeArray("VARIABLE",attribs, "float", gridSize[0]*gridSize[1]*gridSize[2], vectorSize, sizeof(double), reinterpret_cast(varBuffer.data())) == false) { string message = "The DataReductionOperator " + this->getName() + " failed to write it's data."; bailout(true, message, __FILE__, __LINE__); } From 9503927192ba497150b39aa556a2e5e445526dc9 Mon Sep 17 00:00:00 2001 From: Urs Ganse Date: Tue, 7 May 2019 14:16:36 +0300 Subject: [PATCH 379/602] Remove old (now-useless) fsgrid debugging reducers. --- datareduction/datareductionoperator.cpp | 24 ------------------------ datareduction/datareductionoperator.h | 15 --------------- 2 files changed, 39 deletions(-) diff --git a/datareduction/datareductionoperator.cpp b/datareduction/datareductionoperator.cpp index 96b14c31d..d1ff5c274 100644 --- a/datareduction/datareductionoperator.cpp +++ b/datareduction/datareductionoperator.cpp @@ -280,30 +280,6 @@ namespace DRO { return true; } - //FsGrids idea of what the boundaryType ist - FsGridBoundaryType::FsGridBoundaryType(): DataReductionOperator() { } - FsGridBoundaryType::~FsGridBoundaryType() { } - - bool FsGridBoundaryType::getDataVectorInfo(std::string& dataType,unsigned int& dataSize,unsigned int& vectorSize) const { - dataType = "int"; - dataSize = sizeof(int); - vectorSize = 1; - return true; - } - - std::string FsGridBoundaryType::getName() const {return "FSgrid_boundaryType";} - - bool FsGridBoundaryType::reduceData(const SpatialCell* cell,char* buffer) { - const char* ptr = reinterpret_cast(&fsgridBoundaryType); - for (uint i=0; iget_cell_parameters()[CellParams::FSGRID_BOUNDARYTYPE]; - return true; - } - // BoundaryType BoundaryType::BoundaryType(): DataReductionOperator() { } BoundaryType::~BoundaryType() { } diff --git a/datareduction/datareductionoperator.h b/datareduction/datareductionoperator.h index fd724c943..37f0c9639 100644 --- a/datareduction/datareductionoperator.h +++ b/datareduction/datareductionoperator.h @@ -156,21 +156,6 @@ namespace DRO { Real rank; int mpiRank; }; - - class FsGridBoundaryType: public DataReductionOperator { - public: - FsGridBoundaryType(); - virtual ~FsGridBoundaryType(); - - virtual bool getDataVectorInfo(std::string& dataType,unsigned int& dataSize,unsigned int& vectorSize) const; - virtual std::string getName() const; - virtual bool reduceData(const SpatialCell* cell,char* buffer); - virtual bool setSpatialCell(const SpatialCell* cell); - - protected: - Real rank; - int fsgridBoundaryType; - }; class BoundaryType: public DataReductionOperator { public: From aef62ae30fbf6739378c43d3268b71acc9ad2057 Mon Sep 17 00:00:00 2001 From: Urs Ganse Date: Tue, 7 May 2019 14:17:00 +0300 Subject: [PATCH 380/602] Add fsgrid B datareducers. Tested! --- datareduction/datareducer.cpp | 102 +++++++++++++++++++++++++++++++++- 1 file changed, 100 insertions(+), 2 deletions(-) diff --git a/datareduction/datareducer.cpp b/datareduction/datareducer.cpp index ee3879f26..bd9af63e8 100644 --- a/datareduction/datareducer.cpp +++ b/datareduction/datareducer.cpp @@ -36,18 +36,114 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti for (it = P::outputVariableList.begin(); it != P::outputVariableList.end(); it++) { + if(*it == "fs_B") { // Bulk magnetic field at Yee-Lattice locations + outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fs_B",[]( + FsGrid< std::array, 2>& perBGrid, + FsGrid< std::array, 2>& EGrid, + FsGrid< std::array, 2>& EHallGrid, + FsGrid< std::array, 2>& EGradPeGrid, + FsGrid< std::array, 2>& momentsGrid, + FsGrid< std::array, 2>& dPerBGrid, + FsGrid< std::array, 2>& dMomentsGrid, + FsGrid< std::array, 2>& BgBGrid, + FsGrid< std::array, 2>& volGrid, + FsGrid< fsgrids::technical, 2>& technicalGrid)->std::vector { + + std::array& gridSize = technicalGrid.getLocalSize(); + std::vector retval(gridSize[0]*gridSize[1]*gridSize[2]*3); + + // Iterate through fsgrid cells and extract boundary flag + for(int z=0; zaddOperator(new DRO::VariableB); continue; } + if(*it == "fs_BackgroundB") { // Static (typically dipole) magnetic field part + outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fs_background_B",[]( + FsGrid< std::array, 2>& perBGrid, + FsGrid< std::array, 2>& EGrid, + FsGrid< std::array, 2>& EHallGrid, + FsGrid< std::array, 2>& EGradPeGrid, + FsGrid< std::array, 2>& momentsGrid, + FsGrid< std::array, 2>& dPerBGrid, + FsGrid< std::array, 2>& dMomentsGrid, + FsGrid< std::array, 2>& BgBGrid, + FsGrid< std::array, 2>& volGrid, + FsGrid< fsgrids::technical, 2>& technicalGrid)->std::vector { + + std::array& gridSize = technicalGrid.getLocalSize(); + std::vector retval(gridSize[0]*gridSize[1]*gridSize[2]*3); + + // Iterate through fsgrid cells and extract boundary flag + for(int z=0; zaddOperator(new DRO::DataReductionOperatorCellParams("background_B",CellParams::BGBX,3)); continue; } - if(*it == "PerturbedB") { // Fluctuating magnetic field part - outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("perturbed_B",CellParams::PERBX,3)); + if(*it == "fs_PerturbedB") { // Fluctuating magnetic field part + outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fs_perturbed_B",[]( + FsGrid< std::array, 2>& perBGrid, + FsGrid< std::array, 2>& EGrid, + FsGrid< std::array, 2>& EHallGrid, + FsGrid< std::array, 2>& EGradPeGrid, + FsGrid< std::array, 2>& momentsGrid, + FsGrid< std::array, 2>& dPerBGrid, + FsGrid< std::array, 2>& dMomentsGrid, + FsGrid< std::array, 2>& BgBGrid, + FsGrid< std::array, 2>& volGrid, + FsGrid< fsgrids::technical, 2>& technicalGrid)->std::vector { + + std::array& gridSize = technicalGrid.getLocalSize(); + std::vector retval(gridSize[0]*gridSize[1]*gridSize[2]*3); + + // Iterate through fsgrid cells and extract boundary flag + for(int z=0; zaddOperator(new DRO::DataReductionOperatorCellParams("perturbed_B",CellParams::PERBX,3)); + // continue; + //} if(*it == "E") { // Bulk electric field at Yee-lattice locations outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("E",CellParams::EX,3)); continue; @@ -200,6 +296,8 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti std::array& gridSize = technicalGrid.getLocalSize(); std::vector retval(gridSize[0]*gridSize[1]*gridSize[2]); + + // Iterate through fsgrid cells and extract boundary flag for(int z=0; z Date: Tue, 7 May 2019 14:29:51 +0300 Subject: [PATCH 381/602] FsGrid E Datareducer. --- datareduction/datareducer.cpp | 39 +++++++++++++++++++++++++++++++---- 1 file changed, 35 insertions(+), 4 deletions(-) diff --git a/datareduction/datareducer.cpp b/datareduction/datareducer.cpp index bd9af63e8..a3628e2a1 100644 --- a/datareduction/datareducer.cpp +++ b/datareduction/datareducer.cpp @@ -140,10 +140,41 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti )); continue; } - //if(*it == "PerturbedB") { // Fluctuating magnetic field part - // outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("perturbed_B",CellParams::PERBX,3)); - // continue; - //} + if(*it == "PerturbedB") { // Fluctuating magnetic field part + outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("perturbed_B",CellParams::PERBX,3)); + continue; + } + if(*it == "fs_E") { // Bulk electric field at Yee-lattice locations + outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fs_E",[]( + FsGrid< std::array, 2>& perBGrid, + FsGrid< std::array, 2>& EGrid, + FsGrid< std::array, 2>& EHallGrid, + FsGrid< std::array, 2>& EGradPeGrid, + FsGrid< std::array, 2>& momentsGrid, + FsGrid< std::array, 2>& dPerBGrid, + FsGrid< std::array, 2>& dMomentsGrid, + FsGrid< std::array, 2>& BgBGrid, + FsGrid< std::array, 2>& volGrid, + FsGrid< fsgrids::technical, 2>& technicalGrid)->std::vector { + + std::array& gridSize = technicalGrid.getLocalSize(); + std::vector retval(gridSize[0]*gridSize[1]*gridSize[2]*3); + + // Iterate through fsgrid cells and extract boundary flag + for(int z=0; zaddOperator(new DRO::DataReductionOperatorCellParams("E",CellParams::EX,3)); continue; From d4eb42f05847dd2f82f5725afea5e5b7db3ee7db Mon Sep 17 00:00:00 2001 From: Urs Ganse Date: Tue, 7 May 2019 14:32:34 +0300 Subject: [PATCH 382/602] Changed maxFsDt reducer to write straight from FsGrid --- datareduction/datareducer.cpp | 27 ++++++++++++++++++++++++++- 1 file changed, 26 insertions(+), 1 deletion(-) diff --git a/datareduction/datareducer.cpp b/datareduction/datareducer.cpp index a3628e2a1..0c052b795 100644 --- a/datareduction/datareducer.cpp +++ b/datareduction/datareducer.cpp @@ -277,7 +277,32 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti } if(*it == "MaxFieldsdt") { // Maximum timestep constraint as calculated by the fieldsolver - outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("max_fields_dt",CellParams::MAXFDT,1)); + outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("FsGridRank",[]( + FsGrid< std::array, 2>& perBGrid, + FsGrid< std::array, 2>& EGrid, + FsGrid< std::array, 2>& EHallGrid, + FsGrid< std::array, 2>& EGradPeGrid, + FsGrid< std::array, 2>& momentsGrid, + FsGrid< std::array, 2>& dPerBGrid, + FsGrid< std::array, 2>& dMomentsGrid, + FsGrid< std::array, 2>& BgBGrid, + FsGrid< std::array, 2>& volGrid, + FsGrid< fsgrids::technical, 2>& technicalGrid)->std::vector { + + std::array& gridSize = technicalGrid.getLocalSize(); + std::vector retval(gridSize[0]*gridSize[1]*gridSize[2]); + + // Iterate through fsgrid cells and extract boundary flag + for(int z=0; zmaxFsDt; + } + } + } + return retval; + } + )); continue; } if(*it == "MPIrank") { From 4f78d6c968378bd37539306be27c7f038dd042a8 Mon Sep 17 00:00:00 2001 From: Urs Ganse Date: Tue, 7 May 2019 14:39:28 +0300 Subject: [PATCH 383/602] Partially revert commit bf640ba (Makefile changes) --- Makefile | 50 +++++++++++++++++++++++++------------------------- 1 file changed, 25 insertions(+), 25 deletions(-) diff --git a/Makefile b/Makefile index 60167e00b..4f092b8ea 100644 --- a/Makefile +++ b/Makefile @@ -273,13 +273,13 @@ dro_populations.o: ${DEPS_COMMON} ${DEPS_CELL} parameters.h datareduction/datare ${CMP} ${CXXFLAGS} ${FLAGS} ${MATHFLAGS} -c datareduction/dro_populations.cpp ${INC_DCCRG} ${INC_ZOLTAN} ${INC_MPI} ${INC_BOOST} ${INC_EIGEN} ${INC_VLSV} antisymmetric.o: ${DEPS_SYSBOUND} sysboundary/antisymmetric.h sysboundary/antisymmetric.cpp - ${CMP} ${CXXFLAGS} ${FLAGS} ${MATHFLAGS} -c sysboundary/antisymmetric.cpp ${INC_DCCRG} ${INC_FSGRID} ${INC_ZOLTAN} ${INC_BOOST} ${INC_EIGEN} ${INC_VLSV} + ${CMP} ${CXXFLAGS} ${FLAGS} ${MATHFLAGS} -c sysboundary/antisymmetric.cpp ${INC_DCCRG} ${INC_FSGRID} ${INC_ZOLTAN} ${INC_BOOST} ${INC_EIGEN} donotcompute.o: ${DEPS_SYSBOUND} sysboundary/donotcompute.h sysboundary/donotcompute.cpp - ${CMP} ${CXXFLAGS} ${FLAGS} ${MATHFLAGS} -c sysboundary/donotcompute.cpp ${INC_DCCRG} ${INC_FSGRID} ${INC_ZOLTAN} ${INC_BOOST} ${INC_EIGEN} ${INC_VLSV} + ${CMP} ${CXXFLAGS} ${FLAGS} ${MATHFLAGS} -c sysboundary/donotcompute.cpp ${INC_DCCRG} ${INC_FSGRID} ${INC_ZOLTAN} ${INC_BOOST} ${INC_EIGEN} ionosphere.o: ${DEPS_SYSBOUND} sysboundary/ionosphere.h sysboundary/ionosphere.cpp backgroundfield/backgroundfield.cpp backgroundfield/backgroundfield.h projects/project.h projects/project.cpp fieldsolver/fs_limiters.h - ${CMP} ${CXXFLAGS} ${FLAGS} ${MATHFLAGS} -c sysboundary/ionosphere.cpp ${INC_DCCRG} ${INC_FSGRID} ${INC_ZOLTAN} ${INC_BOOST} ${INC_EIGEN} ${INC_VLSV} + ${CMP} ${CXXFLAGS} ${FLAGS} ${MATHFLAGS} -c sysboundary/ionosphere.cpp ${INC_DCCRG} ${INC_FSGRID} ${INC_ZOLTAN} ${INC_BOOST} ${INC_EIGEN} mesh_data_container.o: ${DEPS_COMMON} mesh_data_container.h mesh_data.h @@ -289,20 +289,20 @@ project_boundary.o: ${DEPS_SYSBOUND} sysboundary/project_boundary.h sysboundary/ ${CMP} ${CXXFLAGS} ${FLAGS} ${MATHFLAGS} -c sysboundary/project_boundary.cpp ${INC_DCCRG} ${INC_FSGRID} ${INC_ZOLTAN} ${INC_BOOST} ${INC_EIGEN} ${INC_VLSV} outflow.o: ${DEPS_COMMON} sysboundary/outflow.h sysboundary/outflow.cpp projects/project.h projects/project.cpp fieldsolver/ldz_magnetic_field.cpp - ${CMP} ${CXXFLAGS} ${FLAGS} -c sysboundary/outflow.cpp ${INC_FSGRID} ${INC_DCCRG} ${INC_ZOLTAN} ${INC_BOOST} ${INC_EIGEN} ${INC_VLSV} + ${CMP} ${CXXFLAGS} ${FLAGS} -c sysboundary/outflow.cpp ${INC_FSGRID} ${INC_DCCRG} ${INC_ZOLTAN} ${INC_BOOST} ${INC_EIGEN} setmaxwellian.o: ${DEPS_SYSBOUND} sysboundary/setmaxwellian.h sysboundary/setmaxwellian.cpp sysboundary/setbyuser.h sysboundary/setbyuser.cpp - ${CMP} ${CXXFLAGS} ${FLAGS} ${MATHFLAGS} -c sysboundary/setmaxwellian.cpp ${INC_DCCRG} ${INC_FSGRID} ${INC_ZOLTAN} ${INC_BOOST} ${INC_EIGEN} ${INC_VLSV} + ${CMP} ${CXXFLAGS} ${FLAGS} ${MATHFLAGS} -c sysboundary/setmaxwellian.cpp ${INC_DCCRG} ${INC_FSGRID} ${INC_ZOLTAN} ${INC_BOOST} ${INC_EIGEN} setbyuser.o: ${DEPS_SYSBOUND} sysboundary/setbyuser.h sysboundary/setbyuser.cpp - ${CMP} ${CXXFLAGS} ${FLAGS} ${MATHFLAGS} -c sysboundary/setbyuser.cpp ${INC_DCCRG} ${INC_FSGRID} ${INC_ZOLTAN} ${INC_BOOST} ${INC_EIGEN} ${INC_VLSV} + ${CMP} ${CXXFLAGS} ${FLAGS} ${MATHFLAGS} -c sysboundary/setbyuser.cpp ${INC_DCCRG} ${INC_FSGRID} ${INC_ZOLTAN} ${INC_BOOST} ${INC_EIGEN} sysboundary.o: ${DEPS_COMMON} sysboundary/sysboundary.h sysboundary/sysboundary.cpp sysboundary/sysboundarycondition.h sysboundary/sysboundarycondition.cpp sysboundary/donotcompute.h sysboundary/donotcompute.cpp sysboundary/ionosphere.h sysboundary/ionosphere.cpp sysboundary/outflow.h sysboundary/outflow.cpp sysboundary/setmaxwellian.h sysboundary/setmaxwellian.cpp sysboundary/setbyuser.h sysboundary/setbyuser.cpp - ${CMP} ${CXXFLAGS} ${FLAGS} ${MATHFLAGS} -c sysboundary/sysboundary.cpp ${INC_DCCRG} ${INC_FSGRID} ${INC_ZOLTAN} ${INC_BOOST} ${INC_EIGEN} ${INC_VLSV} + ${CMP} ${CXXFLAGS} ${FLAGS} ${MATHFLAGS} -c sysboundary/sysboundary.cpp ${INC_DCCRG} ${INC_FSGRID} ${INC_ZOLTAN} ${INC_BOOST} ${INC_EIGEN} sysboundarycondition.o: ${DEPS_COMMON} sysboundary/sysboundarycondition.h sysboundary/sysboundarycondition.cpp sysboundary/donotcompute.h sysboundary/donotcompute.cpp sysboundary/ionosphere.h sysboundary/ionosphere.cpp sysboundary/outflow.h sysboundary/outflow.cpp sysboundary/setmaxwellian.h sysboundary/setmaxwellian.cpp sysboundary/setbyuser.h sysboundary/setbyuser.cpp - ${CMP} ${CXXFLAGS} ${FLAGS} ${MATHFLAGS} -c sysboundary/sysboundarycondition.cpp ${INC_DCCRG} ${INC_FSGRID} ${INC_ZOLTAN} ${INC_BOOST} ${INC_EIGEN} ${INC_VLSV} + ${CMP} ${CXXFLAGS} ${FLAGS} ${MATHFLAGS} -c sysboundary/sysboundarycondition.cpp ${INC_DCCRG} ${INC_FSGRID} ${INC_ZOLTAN} ${INC_BOOST} ${INC_EIGEN} read_gaussian_population.o: definitions.h readparameters.h projects/read_gaussian_population.h projects/read_gaussian_population.cpp ${CMP} ${CXXFLAGS} ${FLAGS} -c projects/read_gaussian_population.cpp @@ -386,22 +386,22 @@ projectTriAxisSearch.o: ${DEPS_COMMON} $(DEPS_PROJECTS) projects/projectTriAxisS ${CMP} ${CXXFLAGS} ${FLAGS} ${MATHFLAGS} -c projects/projectTriAxisSearch.cpp ${INC_DCCRG} ${INC_ZOLTAN} ${INC_BOOST} ${INC_EIGEN} ${INC_FSGRID} poisson_solver.o: ${DEPS_COMMON} ${DEPS_CELL} poisson_solver/poisson_solver.h poisson_solver/poisson_solver.cpp - $(CMP) $(CXXFLAGS) $(FLAGS) ${MATHFLAGS} -c poisson_solver/poisson_solver.cpp ${INC_DCCRG} ${INC_FSGRID} ${INC_BOOST} ${INC_ZOLTAN} ${INC_VLSV} + $(CMP) $(CXXFLAGS) $(FLAGS) ${MATHFLAGS} -c poisson_solver/poisson_solver.cpp ${INC_DCCRG} ${INC_FSGRID} ${INC_BOOST} ${INC_ZOLTAN} poisson_solver_cg.o: ${DEPS_COMMON} ${DEPS_CELL} poisson_solver/poisson_solver.h poisson_solver/poisson_solver_cg.h poisson_solver/poisson_solver_cg.cpp - $(CMP) $(CXXFLAGS) ${MATHFLAGS} $(FLAGS) -c poisson_solver/poisson_solver_cg.cpp ${INC_DCCRG} ${INC_FSGRID} ${INC_BOOST} ${INC_ZOLTAN} ${INC_VLSV} + $(CMP) $(CXXFLAGS) ${MATHFLAGS} $(FLAGS) -c poisson_solver/poisson_solver_cg.cpp ${INC_DCCRG} ${INC_FSGRID} ${INC_BOOST} ${INC_ZOLTAN} poisson_solver_jacobi.o: ${DEPS_COMMON} ${DEPS_CELL} poisson_solver/poisson_solver.h poisson_solver/poisson_solver_jacobi.h poisson_solver/poisson_solver_jacobi.cpp - $(CMP) $(CXXFLAGS) ${MATHFLAGS} $(FLAGS) -c poisson_solver/poisson_solver_jacobi.cpp ${INC_DCCRG} ${INC_FSGRID} ${INC_BOOST} ${INC_ZOLTAN} ${INC_VLSV} + $(CMP) $(CXXFLAGS) ${MATHFLAGS} $(FLAGS) -c poisson_solver/poisson_solver_jacobi.cpp ${INC_DCCRG} ${INC_FSGRID} ${INC_BOOST} ${INC_ZOLTAN} poisson_solver_sor.o: ${DEPS_COMMON} ${DEPS_CELL} poisson_solver/poisson_solver.h poisson_solver/poisson_solver_sor.h poisson_solver/poisson_solver_sor.cpp - $(CMP) $(CXXFLAGS) ${MATHFLAGS} $(FLAGS) -c poisson_solver/poisson_solver_sor.cpp ${INC_DCCRG} ${INC_FSGRID} ${INC_BOOST} ${INC_ZOLTAN} ${INC_VLSV} + $(CMP) $(CXXFLAGS) ${MATHFLAGS} $(FLAGS) -c poisson_solver/poisson_solver_sor.cpp ${INC_DCCRG} ${INC_FSGRID} ${INC_BOOST} ${INC_ZOLTAN} poisson_test.o: ${DEPS_COMMON} ${DEPS_CELL} projects/project.h projects/project.cpp projects/Poisson/poisson_test.h projects/Poisson/poisson_test.cpp - $(CMP) $(CXXFLAGS) ${MATHFLAGS} $(FLAGS) -c projects/Poisson/poisson_test.cpp ${INC_DCCRG} ${INC_FSGRID} ${INC_ZOLTAN} ${INC_BOOST} ${INC_EIGEN} ${INC_VLSV} + $(CMP) $(CXXFLAGS) ${MATHFLAGS} $(FLAGS) -c projects/Poisson/poisson_test.cpp ${INC_DCCRG} ${INC_FSGRID} ${INC_ZOLTAN} ${INC_BOOST} ${INC_EIGEN} spatial_cell.o: ${DEPS_CELL} spatial_cell.cpp - $(CMP) $(CXXFLAGS) ${MATHFLAGS} $(FLAGS) -c spatial_cell.cpp $(INC_BOOST) ${INC_DCCRG} ${INC_EIGEN} ${INC_ZOLTAN} ${INC_VECTORCLASS} ${INC_FSGRID} ${INC_VLSV} + $(CMP) $(CXXFLAGS) ${MATHFLAGS} $(FLAGS) -c spatial_cell.cpp $(INC_BOOST) ${INC_DCCRG} ${INC_EIGEN} ${INC_ZOLTAN} ${INC_VECTORCLASS} ${INC_FSGRID} ifeq ($(MESH),AMR) vlasovmover.o: ${DEPS_VLSVMOVER_AMR} @@ -437,38 +437,38 @@ vlasovmover.o: ${DEPS_VLSVMOVER} endif cpu_moments.o: ${DEPS_CPU_MOMENTS} - ${CMP} ${CXXFLAGS} ${FLAG_OPENMP} ${MATHFLAGS} ${FLAGS} -c vlasovsolver/cpu_moments.cpp ${INC_DCCRG} ${INC_BOOST} ${INC_ZOLTAN} ${INC_PROFILE} ${INC_FSGRID} ${INC_VLSV} + ${CMP} ${CXXFLAGS} ${FLAG_OPENMP} ${MATHFLAGS} ${FLAGS} -c vlasovsolver/cpu_moments.cpp ${INC_DCCRG} ${INC_BOOST} ${INC_ZOLTAN} ${INC_PROFILE} ${INC_FSGRID} derivatives.o: ${DEPS_FSOLVER} fieldsolver/fs_limiters.h fieldsolver/fs_limiters.cpp fieldsolver/derivatives.hpp fieldsolver/derivatives.cpp - ${CMP} ${CXXFLAGS} ${FLAGS} -c fieldsolver/derivatives.cpp -I$(CURDIR) ${INC_BOOST} ${INC_EIGEN} ${INC_DCCRG} ${INC_FSGRID} ${INC_PROFILE} ${INC_ZOLTAN} ${INC_VLSV} + ${CMP} ${CXXFLAGS} ${FLAGS} -c fieldsolver/derivatives.cpp -I$(CURDIR) ${INC_BOOST} ${INC_EIGEN} ${INC_DCCRG} ${INC_FSGRID} ${INC_PROFILE} ${INC_ZOLTAN} fs_common.o: ${DEPS_FSOLVER} fieldsolver/fs_limiters.h fieldsolver/fs_limiters.cpp - ${CMP} ${CXXFLAGS} ${FLAGS} -c fieldsolver/fs_common.cpp -I$(CURDIR) ${INC_BOOST} ${INC_EIGEN} ${INC_DCCRG} ${INC_FSGRID} ${INC_PROFILE} ${INC_ZOLTAN} ${INC_VLSV} + ${CMP} ${CXXFLAGS} ${FLAGS} -c fieldsolver/fs_common.cpp -I$(CURDIR) ${INC_BOOST} ${INC_EIGEN} ${INC_DCCRG} ${INC_FSGRID} ${INC_PROFILE} ${INC_ZOLTAN} fs_limiters.o: ${DEPS_FSOLVER} fieldsolver/fs_limiters.h fieldsolver/fs_limiters.cpp - ${CMP} ${CXXFLAGS} ${FLAGS} -c fieldsolver/fs_limiters.cpp -I$(CURDIR) ${INC_BOOST} ${INC_EIGEN} ${INC_FSGRID} ${INC_PROFILE} ${INC_ZOLTAN} ${INC_VLSV} + ${CMP} ${CXXFLAGS} ${FLAGS} -c fieldsolver/fs_limiters.cpp -I$(CURDIR) ${INC_BOOST} ${INC_EIGEN} ${INC_FSGRID} ${INC_PROFILE} ${INC_ZOLTAN} londrillo_delzanna.o: ${DEPS_FSOLVER} parameters.h common.h fieldsolver/fs_common.h fieldsolver/fs_common.cpp fieldsolver/derivatives.hpp fieldsolver/ldz_electric_field.hpp fieldsolver/ldz_hall.hpp fieldsolver/ldz_magnetic_field.hpp fieldsolver/ldz_main.cpp fieldsolver/ldz_volume.hpp fieldsolver/ldz_volume.hpp - ${CMP} ${CXXFLAGS} ${FLAGS} -c fieldsolver/ldz_main.cpp -o londrillo_delzanna.o -I$(CURDIR) ${INC_BOOST} ${INC_EIGEN} ${INC_DCCRG} ${INC_FSGRID} ${INC_PROFILE} ${INC_ZOLTAN} ${INC_VLSV} + ${CMP} ${CXXFLAGS} ${FLAGS} -c fieldsolver/ldz_main.cpp -o londrillo_delzanna.o -I$(CURDIR) ${INC_BOOST} ${INC_EIGEN} ${INC_DCCRG} ${INC_FSGRID} ${INC_PROFILE} ${INC_ZOLTAN} ldz_electric_field.o: ${DEPS_FSOLVER} fieldsolver/ldz_electric_field.hpp fieldsolver/ldz_electric_field.cpp - ${CMP} ${CXXFLAGS} ${FLAGS} -c fieldsolver/ldz_electric_field.cpp ${INC_BOOST} ${INC_FSGRID} ${INC_DCCRG} ${INC_PROFILE} ${INC_ZOLTAN} ${INC_VLSV} + ${CMP} ${CXXFLAGS} ${FLAGS} -c fieldsolver/ldz_electric_field.cpp ${INC_BOOST} ${INC_FSGRID} ${INC_DCCRG} ${INC_PROFILE} ${INC_ZOLTAN} ldz_hall.o: ${DEPS_FSOLVER} fieldsolver/ldz_hall.hpp fieldsolver/ldz_hall.cpp - ${CMP} ${CXXFLAGS} ${MATHFLAGS} ${FLAGS} -c fieldsolver/ldz_hall.cpp ${INC_BOOST} ${INC_FSGRID} ${INC_DCCRG} ${INC_PROFILE} ${INC_ZOLTAN} ${INC_VLSV} + ${CMP} ${CXXFLAGS} ${MATHFLAGS} ${FLAGS} -c fieldsolver/ldz_hall.cpp ${INC_BOOST} ${INC_FSGRID} ${INC_DCCRG} ${INC_PROFILE} ${INC_ZOLTAN} ldz_gradpe.o: ${DEPS_FSOLVER} fieldsolver/ldz_gradpe.hpp fieldsolver/ldz_gradpe.cpp - ${CMP} ${CXXFLAGS} ${MATHFLAGS} ${FLAGS} -c fieldsolver/ldz_gradpe.cpp ${INC_BOOST} ${INC_FSGRID} ${INC_DCCRG} ${INC_PROFILE} ${INC_ZOLTAN} ${INC_VLSV} + ${CMP} ${CXXFLAGS} ${MATHFLAGS} ${FLAGS} -c fieldsolver/ldz_gradpe.cpp ${INC_BOOST} ${INC_FSGRID} ${INC_DCCRG} ${INC_PROFILE} ${INC_ZOLTAN} ldz_magnetic_field.o: ${DEPS_FSOLVER} fieldsolver/ldz_magnetic_field.hpp fieldsolver/ldz_magnetic_field.cpp - ${CMP} ${CXXFLAGS} ${MATHFLAGS} ${FLAGS} -c fieldsolver/ldz_magnetic_field.cpp ${INC_BOOST} ${INC_FSGRID} ${INC_DCCRG} ${INC_PROFILE} ${INC_ZOLTAN} ${INC_VLSV} + ${CMP} ${CXXFLAGS} ${MATHFLAGS} ${FLAGS} -c fieldsolver/ldz_magnetic_field.cpp ${INC_BOOST} ${INC_FSGRID} ${INC_DCCRG} ${INC_PROFILE} ${INC_ZOLTAN} ldz_volume.o: ${DEPS_FSOLVER} fieldsolver/ldz_volume.hpp fieldsolver/ldz_volume.cpp - ${CMP} ${CXXFLAGS} ${FLAGS} -c fieldsolver/ldz_volume.cpp ${INC_BOOST} ${INC_FSGRID} ${INC_DCCRG} ${INC_PROFILE} ${INC_ZOLTAN} ${INC_VLSV} + ${CMP} ${CXXFLAGS} ${FLAGS} -c fieldsolver/ldz_volume.cpp ${INC_BOOST} ${INC_FSGRID} ${INC_DCCRG} ${INC_PROFILE} ${INC_ZOLTAN} gridGlue.o: ${DEPS_FSOLVER} fieldsolver/gridGlue.hpp fieldsolver/gridGlue.cpp - ${CMP} ${CXXFLAGS} ${FLAGS} -c fieldsolver/gridGlue.cpp ${INC_BOOST} ${INC_FSGRID} ${INC_DCCRG} ${INC_PROFILE} ${INC_ZOLTAN} ${INC_VLSV} + ${CMP} ${CXXFLAGS} ${FLAGS} -c fieldsolver/gridGlue.cpp ${INC_BOOST} ${INC_FSGRID} ${INC_DCCRG} ${INC_PROFILE} ${INC_ZOLTAN} vlasiator.o: ${DEPS_COMMON} readparameters.h parameters.h ${DEPS_PROJECTS} grid.h vlasovmover.h ${DEPS_CELL} vlasiator.cpp iowrite.h fieldsolver/gridGlue.hpp ${CMP} ${CXXFLAGS} ${FLAG_OPENMP} ${FLAGS} -c vlasiator.cpp ${INC_MPI} ${INC_DCCRG} ${INC_FSGRID} ${INC_BOOST} ${INC_EIGEN} ${INC_ZOLTAN} ${INC_PROFILE} ${INC_VLSV} From da9dd5b734580fd82ae19e8dfd8f58eba45e3072 Mon Sep 17 00:00:00 2001 From: Urs Ganse Date: Tue, 7 May 2019 15:27:42 +0300 Subject: [PATCH 384/602] Added FsGridBoundaryLayer reducer. --- datareduction/datareducer.cpp | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/datareduction/datareducer.cpp b/datareduction/datareducer.cpp index 0c052b795..9612cfb14 100644 --- a/datareduction/datareducer.cpp +++ b/datareduction/datareducer.cpp @@ -371,6 +371,36 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti outputReducer->addOperator(new DRO::BoundaryLayer); continue; } + if(*it == "FsGridBoundaryLayer") { + // Type of boundarycells as stored in FSGrid + outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("FsGridBoundaryLayer",[]( + FsGrid< std::array, 2>& perBGrid, + FsGrid< std::array, 2>& EGrid, + FsGrid< std::array, 2>& EHallGrid, + FsGrid< std::array, 2>& EGradPeGrid, + FsGrid< std::array, 2>& momentsGrid, + FsGrid< std::array, 2>& dPerBGrid, + FsGrid< std::array, 2>& dMomentsGrid, + FsGrid< std::array, 2>& BgBGrid, + FsGrid< std::array, 2>& volGrid, + FsGrid< fsgrids::technical, 2>& technicalGrid)->std::vector { + + std::array& gridSize = technicalGrid.getLocalSize(); + std::vector retval(gridSize[0]*gridSize[1]*gridSize[2]); + + // Iterate through fsgrid cells and extract boundary flag + for(int z=0; zsysBoundaryLayer; + } + } + } + return retval; + } + )); + continue; + } if (*it == "populations_Blocks") { // Per-population velocity space block counts for(unsigned int i =0; i < getObjectWrapper().particleSpecies.size(); i++) { From 446317b9138c59925a84b97a3fb1256a7b77b90e Mon Sep 17 00:00:00 2001 From: Urs Ganse Date: Tue, 7 May 2019 17:31:27 +0300 Subject: [PATCH 385/602] Fsgrid moment reducers (rho and V) --- datareduction/datareducer.cpp | 89 +++++++++++++++++++++++++++++++++++ 1 file changed, 89 insertions(+) diff --git a/datareduction/datareducer.cpp b/datareduction/datareducer.cpp index 9612cfb14..81eff5071 100644 --- a/datareduction/datareducer.cpp +++ b/datareduction/datareducer.cpp @@ -183,10 +183,68 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("rhom",CellParams::RHOM,1)); continue; } + if(*it == "fs_Rhom") { // Overall mass density (summed over all populations) + outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fs_rhom",[]( + FsGrid< std::array, 2>& perBGrid, + FsGrid< std::array, 2>& EGrid, + FsGrid< std::array, 2>& EHallGrid, + FsGrid< std::array, 2>& EGradPeGrid, + FsGrid< std::array, 2>& momentsGrid, + FsGrid< std::array, 2>& dPerBGrid, + FsGrid< std::array, 2>& dMomentsGrid, + FsGrid< std::array, 2>& BgBGrid, + FsGrid< std::array, 2>& volGrid, + FsGrid< fsgrids::technical, 2>& technicalGrid)->std::vector { + + std::array& gridSize = technicalGrid.getLocalSize(); + std::vector retval(gridSize[0]*gridSize[1]*gridSize[2]); + + // Iterate through fsgrid cells and extract boundary flag + for(int z=0; zaddOperator(new DRO::DataReductionOperatorCellParams("rhoq",CellParams::RHOQ,1)); continue; } + if(*it == "fs_Rhoq") { // Overall charge density (summed over all populations) + outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fs_rhoq",[]( + FsGrid< std::array, 2>& perBGrid, + FsGrid< std::array, 2>& EGrid, + FsGrid< std::array, 2>& EHallGrid, + FsGrid< std::array, 2>& EGradPeGrid, + FsGrid< std::array, 2>& momentsGrid, + FsGrid< std::array, 2>& dPerBGrid, + FsGrid< std::array, 2>& dMomentsGrid, + FsGrid< std::array, 2>& BgBGrid, + FsGrid< std::array, 2>& volGrid, + FsGrid< fsgrids::technical, 2>& technicalGrid)->std::vector { + + std::array& gridSize = technicalGrid.getLocalSize(); + std::vector retval(gridSize[0]*gridSize[1]*gridSize[2]); + + // Iterate through fsgrid cells and extract boundary flag + for(int z=0; zaddOperator(new DRO::DataReductionOperatorCellParams("V",CellParams::VX,3)); continue; } + if(*it == "fs_V") { // Overall effective bulk density defining the center-of-mass frame from all populations + outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fs_background_B",[]( + FsGrid< std::array, 2>& perBGrid, + FsGrid< std::array, 2>& EGrid, + FsGrid< std::array, 2>& EHallGrid, + FsGrid< std::array, 2>& EGradPeGrid, + FsGrid< std::array, 2>& momentsGrid, + FsGrid< std::array, 2>& dPerBGrid, + FsGrid< std::array, 2>& dMomentsGrid, + FsGrid< std::array, 2>& BgBGrid, + FsGrid< std::array, 2>& volGrid, + FsGrid< fsgrids::technical, 2>& technicalGrid)->std::vector { + + std::array& gridSize = technicalGrid.getLocalSize(); + std::vector retval(gridSize[0]*gridSize[1]*gridSize[2]*3); + + // Iterate through fsgrid cells and extract boundary flag + for(int z=0; z Date: Tue, 7 May 2019 17:35:54 +0300 Subject: [PATCH 386/602] Fsgrid pressure data reducer. --- datareduction/datareducer.cpp | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/datareduction/datareducer.cpp b/datareduction/datareducer.cpp index 81eff5071..304985052 100644 --- a/datareduction/datareducer.cpp +++ b/datareduction/datareducer.cpp @@ -555,6 +555,37 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti outputReducer->addOperator(new DRO::VariablePressureSolver); continue; } + if(*it == "fs_Pressure") { + // Overall scalar pressure from all populations + outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fs_Pressure",[]( + FsGrid< std::array, 2>& perBGrid, + FsGrid< std::array, 2>& EGrid, + FsGrid< std::array, 2>& EHallGrid, + FsGrid< std::array, 2>& EGradPeGrid, + FsGrid< std::array, 2>& momentsGrid, + FsGrid< std::array, 2>& dPerBGrid, + FsGrid< std::array, 2>& dMomentsGrid, + FsGrid< std::array, 2>& BgBGrid, + FsGrid< std::array, 2>& volGrid, + FsGrid< fsgrids::technical, 2>& technicalGrid)->std::vector { + + std::array& gridSize = technicalGrid.getLocalSize(); + std::vector retval(gridSize[0]*gridSize[1]*gridSize[2]); + + // Iterate through fsgrid cells and extract boundary flag + for(int z=0; z Date: Wed, 8 May 2019 14:22:17 +0300 Subject: [PATCH 387/602] BVol datareducer from FSGrid --- datareduction/datareducer.cpp | 34 ++++++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/datareduction/datareducer.cpp b/datareduction/datareducer.cpp index 304985052..8d007b0b6 100644 --- a/datareduction/datareducer.cpp +++ b/datareduction/datareducer.cpp @@ -542,6 +542,40 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti outputReducer->addOperator(new DRO::VariableBVol); continue; } + if(*it == "fs_volB") { // Static (typically dipole) magnetic field part + outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fs_volB",[]( + FsGrid< std::array, 2>& perBGrid, + FsGrid< std::array, 2>& EGrid, + FsGrid< std::array, 2>& EHallGrid, + FsGrid< std::array, 2>& EGradPeGrid, + FsGrid< std::array, 2>& momentsGrid, + FsGrid< std::array, 2>& dPerBGrid, + FsGrid< std::array, 2>& dMomentsGrid, + FsGrid< std::array, 2>& BgBGrid, + FsGrid< std::array, 2>& volGrid, + FsGrid< fsgrids::technical, 2>& technicalGrid)->std::vector { + + std::array& gridSize = technicalGrid.getLocalSize(); + std::vector retval(gridSize[0]*gridSize[1]*gridSize[2]*3); + + // Iterate through fsgrid cells and extract total BVOL + for(int z=0; zaddOperator(new DRO::DataReductionOperatorCellParams("BGB_vol",CellParams::BGBXVOL,3)); continue; From 435e6b0b5f9ab2e9ef22bd4fa441993f56e1c54f Mon Sep 17 00:00:00 2001 From: Urs Ganse Date: Wed, 8 May 2019 14:28:28 +0300 Subject: [PATCH 388/602] indentation fixes --- datareduction/datareducer.cpp | 436 +++++++++++++++++----------------- 1 file changed, 218 insertions(+), 218 deletions(-) diff --git a/datareduction/datareducer.cpp b/datareduction/datareducer.cpp index 8d007b0b6..6c60d25af 100644 --- a/datareduction/datareducer.cpp +++ b/datareduction/datareducer.cpp @@ -47,27 +47,27 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti FsGrid< std::array, 2>& dMomentsGrid, FsGrid< std::array, 2>& BgBGrid, FsGrid< std::array, 2>& volGrid, - FsGrid< fsgrids::technical, 2>& technicalGrid)->std::vector { - - std::array& gridSize = technicalGrid.getLocalSize(); - std::vector retval(gridSize[0]*gridSize[1]*gridSize[2]*3); - - // Iterate through fsgrid cells and extract boundary flag - for(int z=0; z& technicalGrid)->std::vector { + + std::array& gridSize = technicalGrid.getLocalSize(); + std::vector retval(gridSize[0]*gridSize[1]*gridSize[2]*3); + + // Iterate through fsgrid cells and extract boundary flag + for(int z=0; z, 2>& dMomentsGrid, FsGrid< std::array, 2>& BgBGrid, FsGrid< std::array, 2>& volGrid, - FsGrid< fsgrids::technical, 2>& technicalGrid)->std::vector { - - std::array& gridSize = technicalGrid.getLocalSize(); - std::vector retval(gridSize[0]*gridSize[1]*gridSize[2]*3); - - // Iterate through fsgrid cells and extract boundary flag - for(int z=0; z& technicalGrid)->std::vector { + + std::array& gridSize = technicalGrid.getLocalSize(); + std::vector retval(gridSize[0]*gridSize[1]*gridSize[2]*3); + + // Iterate through fsgrid cells and extract boundary flag + for(int z=0; z, 2>& dMomentsGrid, FsGrid< std::array, 2>& BgBGrid, FsGrid< std::array, 2>& volGrid, - FsGrid< fsgrids::technical, 2>& technicalGrid)->std::vector { - - std::array& gridSize = technicalGrid.getLocalSize(); - std::vector retval(gridSize[0]*gridSize[1]*gridSize[2]*3); - - // Iterate through fsgrid cells and extract boundary flag - for(int z=0; z& technicalGrid)->std::vector { + + std::array& gridSize = technicalGrid.getLocalSize(); + std::vector retval(gridSize[0]*gridSize[1]*gridSize[2]*3); + + // Iterate through fsgrid cells and extract boundary flag + for(int z=0; z, 2>& dMomentsGrid, FsGrid< std::array, 2>& BgBGrid, FsGrid< std::array, 2>& volGrid, - FsGrid< fsgrids::technical, 2>& technicalGrid)->std::vector { - - std::array& gridSize = technicalGrid.getLocalSize(); - std::vector retval(gridSize[0]*gridSize[1]*gridSize[2]*3); - - // Iterate through fsgrid cells and extract boundary flag - for(int z=0; z& technicalGrid)->std::vector { + + std::array& gridSize = technicalGrid.getLocalSize(); + std::vector retval(gridSize[0]*gridSize[1]*gridSize[2]*3); + + // Iterate through fsgrid cells and extract boundary flag + for(int z=0; z, 2>& dMomentsGrid, FsGrid< std::array, 2>& BgBGrid, FsGrid< std::array, 2>& volGrid, - FsGrid< fsgrids::technical, 2>& technicalGrid)->std::vector { - - std::array& gridSize = technicalGrid.getLocalSize(); - std::vector retval(gridSize[0]*gridSize[1]*gridSize[2]); - - // Iterate through fsgrid cells and extract boundary flag - for(int z=0; z& technicalGrid)->std::vector { + + std::array& gridSize = technicalGrid.getLocalSize(); + std::vector retval(gridSize[0]*gridSize[1]*gridSize[2]); + + // Iterate through fsgrid cells and extract boundary flag + for(int z=0; z, 2>& dMomentsGrid, FsGrid< std::array, 2>& BgBGrid, FsGrid< std::array, 2>& volGrid, - FsGrid< fsgrids::technical, 2>& technicalGrid)->std::vector { - - std::array& gridSize = technicalGrid.getLocalSize(); - std::vector retval(gridSize[0]*gridSize[1]*gridSize[2]); - - // Iterate through fsgrid cells and extract boundary flag - for(int z=0; z& technicalGrid)->std::vector { + + std::array& gridSize = technicalGrid.getLocalSize(); + std::vector retval(gridSize[0]*gridSize[1]*gridSize[2]); + + // Iterate through fsgrid cells and extract boundary flag + for(int z=0; z, 2>& dMomentsGrid, FsGrid< std::array, 2>& BgBGrid, FsGrid< std::array, 2>& volGrid, - FsGrid< fsgrids::technical, 2>& technicalGrid)->std::vector { - - std::array& gridSize = technicalGrid.getLocalSize(); - std::vector retval(gridSize[0]*gridSize[1]*gridSize[2]*3); - - // Iterate through fsgrid cells and extract boundary flag - for(int z=0; z& technicalGrid)->std::vector { + + std::array& gridSize = technicalGrid.getLocalSize(); + std::vector retval(gridSize[0]*gridSize[1]*gridSize[2]*3); + + // Iterate through fsgrid cells and extract boundary flag + for(int z=0; z, 2>& dMomentsGrid, FsGrid< std::array, 2>& BgBGrid, FsGrid< std::array, 2>& volGrid, - FsGrid< fsgrids::technical, 2>& technicalGrid)->std::vector { - - std::array& gridSize = technicalGrid.getLocalSize(); - std::vector retval(gridSize[0]*gridSize[1]*gridSize[2]); - - // Iterate through fsgrid cells and extract boundary flag - for(int z=0; zmaxFsDt; - } - } - } - return retval; - } - )); + FsGrid< fsgrids::technical, 2>& technicalGrid)->std::vector { + + std::array& gridSize = technicalGrid.getLocalSize(); + std::vector retval(gridSize[0]*gridSize[1]*gridSize[2]); + + // Iterate through fsgrid cells and extract boundary flag + for(int z=0; zmaxFsDt; + } + } + } + return retval; + } + )); continue; } if(*it == "MPIrank") { @@ -411,13 +411,13 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti FsGrid< std::array, 2>& dMomentsGrid, FsGrid< std::array, 2>& BgBGrid, FsGrid< std::array, 2>& volGrid, - FsGrid< fsgrids::technical, 2>& technicalGrid)->std::vector { - - std::array& gridSize = technicalGrid.getLocalSize(); - std::vector retval(gridSize[0]*gridSize[1]*gridSize[2],technicalGrid.getRank()); - return retval; - } - )); + FsGrid< fsgrids::technical, 2>& technicalGrid)->std::vector { + + std::array& gridSize = technicalGrid.getLocalSize(); + std::vector retval(gridSize[0]*gridSize[1]*gridSize[2],technicalGrid.getRank()); + return retval; + } + )); continue; } if(*it == "BoundaryType") { @@ -437,22 +437,22 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti FsGrid< std::array, 2>& dMomentsGrid, FsGrid< std::array, 2>& BgBGrid, FsGrid< std::array, 2>& volGrid, - FsGrid< fsgrids::technical, 2>& technicalGrid)->std::vector { - - std::array& gridSize = technicalGrid.getLocalSize(); - std::vector retval(gridSize[0]*gridSize[1]*gridSize[2]); - - // Iterate through fsgrid cells and extract boundary flag - for(int z=0; zsysBoundaryFlag; - } - } - } - return retval; - } - )); + FsGrid< fsgrids::technical, 2>& technicalGrid)->std::vector { + + std::array& gridSize = technicalGrid.getLocalSize(); + std::vector retval(gridSize[0]*gridSize[1]*gridSize[2]); + + // Iterate through fsgrid cells and extract boundary flag + for(int z=0; zsysBoundaryFlag; + } + } + } + return retval; + } + )); continue; } if(*it == "BoundaryLayer") { @@ -472,22 +472,22 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti FsGrid< std::array, 2>& dMomentsGrid, FsGrid< std::array, 2>& BgBGrid, FsGrid< std::array, 2>& volGrid, - FsGrid< fsgrids::technical, 2>& technicalGrid)->std::vector { - - std::array& gridSize = technicalGrid.getLocalSize(); - std::vector retval(gridSize[0]*gridSize[1]*gridSize[2]); - - // Iterate through fsgrid cells and extract boundary flag - for(int z=0; zsysBoundaryLayer; - } - } - } - return retval; - } - )); + FsGrid< fsgrids::technical, 2>& technicalGrid)->std::vector { + + std::array& gridSize = technicalGrid.getLocalSize(); + std::vector retval(gridSize[0]*gridSize[1]*gridSize[2]); + + // Iterate through fsgrid cells and extract boundary flag + for(int z=0; zsysBoundaryLayer; + } + } + } + return retval; + } + )); continue; } if (*it == "populations_Blocks") { @@ -553,27 +553,27 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti FsGrid< std::array, 2>& dMomentsGrid, FsGrid< std::array, 2>& BgBGrid, FsGrid< std::array, 2>& volGrid, - FsGrid< fsgrids::technical, 2>& technicalGrid)->std::vector { - - std::array& gridSize = technicalGrid.getLocalSize(); - std::vector retval(gridSize[0]*gridSize[1]*gridSize[2]*3); - - // Iterate through fsgrid cells and extract total BVOL - for(int z=0; z& technicalGrid)->std::vector { + + std::array& gridSize = technicalGrid.getLocalSize(); + std::vector retval(gridSize[0]*gridSize[1]*gridSize[2]*3); + + // Iterate through fsgrid cells and extract total BVOL + for(int z=0; z, 2>& dMomentsGrid, FsGrid< std::array, 2>& BgBGrid, FsGrid< std::array, 2>& volGrid, - FsGrid< fsgrids::technical, 2>& technicalGrid)->std::vector { - - std::array& gridSize = technicalGrid.getLocalSize(); - std::vector retval(gridSize[0]*gridSize[1]*gridSize[2]); - - // Iterate through fsgrid cells and extract boundary flag - for(int z=0; z& technicalGrid)->std::vector { + + std::array& gridSize = technicalGrid.getLocalSize(); + std::vector retval(gridSize[0]*gridSize[1]*gridSize[2]); + + // Iterate through fsgrid cells and extract boundary flag + for(int z=0; z Date: Wed, 8 May 2019 15:17:01 +0300 Subject: [PATCH 389/602] Changed to scaling vector potential dipole only as a function of X to allow better initialization of tail region and flanks --- backgroundfield/vectordipole.cpp | 207 +++++++++++++---------- backgroundfield/vectordipole.hpp | 4 +- projects/Magnetosphere/Magnetosphere.cpp | 10 +- projects/Magnetosphere/Magnetosphere.h | 4 +- 4 files changed, 130 insertions(+), 95 deletions(-) diff --git a/backgroundfield/vectordipole.cpp b/backgroundfield/vectordipole.cpp index f002b937b..25b9112c3 100644 --- a/backgroundfield/vectordipole.cpp +++ b/backgroundfield/vectordipole.cpp @@ -31,7 +31,7 @@ Background magnetic field class of Vlasiator. // tilt_angle_phi is from the z-axis in radians // tilt_angle_theta is from the Sun-Earth-line in radians -void VectorDipole::initialize(const double moment,const double center_x, const double center_y, const double center_z, const double tilt_angle_phi=0, const double tilt_angle_theta=0, const double radius_f, const double radius_z){ +void VectorDipole::initialize(const double moment,const double center_x, const double center_y, const double center_z, const double tilt_angle_phi=0, const double tilt_angle_theta=0, const double xlimit_f, const double xlimit_z){ this->initialized = true; q[0]=-sin(tilt_angle_phi)*cos(tilt_angle_theta)*moment; @@ -42,8 +42,11 @@ void VectorDipole::initialize(const double moment,const double center_x, const d center[1]=center_y; center[2]=center_z; - radius[0]=radius_f; - radius[1]=radius_z; + // Scale dipole as a function of x-coordinate + xlimit[0]=xlimit_f; // Full dipole when x < xlimit_f + xlimit[1]=xlimit_z; // Zero field when x > xlimit_z + + // TODO: If values for xlimit are zero, instead place them as 15 RE and Xmax-2*cellsize? } @@ -65,8 +68,8 @@ double VectorDipole::call( double x, double y, double z) const // r2=minimumR*minimumR; return 0.0; //set zero field inside dipole - if(r2>=radius[1]*radius[1]) - return 0.0; //set zero field and derivatives outside "zero radius" + if(r[0]>=xlimit[1]) + return 0.0; //set zero field and derivatives outside "zero x limit" /* This function is called from within other calls, one component at a time. The component in question is defined using the _fComponent index. If a derivative @@ -77,11 +80,11 @@ double VectorDipole::call( double x, double y, double z) const const double rdotq=q[0]*r[0] + q[1]*r[1] +q[2]*r[2]; const double B=( 3*r[_fComponent]*rdotq-q[_fComponent]*r2)/r5; - if(_derivative == 0) && (r1 <= radius[0]) - // Full dipole field within full radius - return B; + if(_derivative == 0) && (r[0] <= xlimit[0]) + // Full dipole field within full xlimit + return B; - if(_derivative == 1) && (r1 <= radius[0]){ + if(_derivative == 1) && (r[0] <= xlimit[0]){ //first derivatives of full field unsigned int sameComponent; if(_dComponent==_fComponent) @@ -97,30 +100,30 @@ double VectorDipole::call( double x, double y, double z) const 3*rdotq*sameComponent)/r5; } - /* Within transition range (between "full radius" and "zero radius"), use + /* Within transition range (between "full x limit" and "zero x limit"), use a vector potential scaled with the smootherstep function. Calculated - and coded by Markus Battarbee, 30.04.2019 */ + and coded by Markus Battarbee, 08.05.2019 */ // Calculate vector potential within transition range double A[3]; A[0] = (q[1]*r[2]-q[2]*r[1]) / (r2*r1); A[1] = (q[2]*r[0]-q[0]*r[2]) / (r2*r1); A[2] = (q[0]*r[1]-q[1]*r[0]) / (r2*r1); - // Coordinate within smootherstep function - const double Sx = -(r1-radius[1])/(radius[1]-radius[0]); - const double Sx2 = Sx*Sx; - // Smootherstep and its radial derivative - const double S2 = 6.*Sx2*Sx2*Sx - 15.*Sx2*Sx2 + 10.*Sx2*Sx; - const double dS2dr = -(30.*Sx2*Sx2 - 60.*Sx2*Sx + 30.*Sx2)/(radius[1]-radius[0]); + // Coordinate within smootherstep function (x-coordinate only) + const double s = -(r[0]-xlimit[1])/(xlimit[1]-xlimit[0]); + const double ss = s*s; + // Smootherstep and its x-directional derivative + const double S2 = 6.*ss*ss*s - 15.*ss*ss + 10.*ss*s; + const double dS2dx = -(30.*ss*ss - 60.*ss*s + 30.*ss)/(xlimit[1]-xlimit[0]); // Cartesian derivatives of S2 double dS2cart[3]; - dS2cart[0] = (r[0]/r1)*dS2dr; - dS2cart[1] = (r[1]/r1)*dS2dr; - dS2cart[2] = (r[2]/r1)*dS2dr; + dS2cart[0] = dS2dx; //(r[0]/r1)*dS2dr; + dS2cart[1] = 0; //(r[1]/r1)*dS2dr; + dS2cart[2] = 0; //(r[2]/r1)*dS2dr; - if(_derivative == 0) && (r1 > radius[0]) { - /* Within transition range (between radius[0] and radius[1]) we + if(_derivative == 0) && (r1 > xlimit[0]) { + /* Within transition range (between xlimit[0] and xlimit[1]) we multiply the magnetic field with the S2 smootherstep function and add an additional corrective term to remove divergence. This is based on using the dipole field vector potential and scaling @@ -129,56 +132,64 @@ double VectorDipole::call( double x, double y, double z) const Notation: q = dipole moment (vector) r = position vector + x = x-coordinate r[0] R = position distance The regular dipole field vector potential A(r) = (mu0/4 pi R^3) * (q cross r) The smootherstep function - ( 0, x<=0 - S2(Sx) = ( 6x^5 -15x^4 +10x^3, 0<=x<=1 - ( 1, x>=1 + ( 0, s<=0 + S2(s) = ( 6s^5 -15s^4 +10s^3, 0<=s<=1 + ( 1, s>=1 Radial distance scaling for S2 - Sx = -(R-radius[1])/(radius[1]-radius[0]) + s = -(x-xlimit[1])/(xlimit[1]-xlimit[0]) + ds = -dx/(xlimit[1]-xlimit[0]) - The scaled vector potential is A'(r) = A(r)*S2(Sx) + The scaled vector potential is A'(r) = A(r)*S2(s) The scaled magnetic field is B'(r) = del cross A'(r) - =(NRL)= S2(Sx) del cross A(r) + del S2(Sx) cross A(r) - = S2(Sx) B(r) + del S2(Sx) cross A(r) + =(NRL)= S2(s) del cross A(r) + del S2(s) cross A(r) + = S2(s) B(r) + del S2(s) cross A(r) */ double delS2crossA[3]; - delS2crossA[0] = dS2cart[1]*A[2] - dS2cart[2]*A[1]; - delS2crossA[1] = dS2cart[2]*A[0] - dS2cart[0]*A[2]; - delS2crossA[2] = dS2cart[0]*A[1] - dS2cart[1]*A[0]; + //delS2crossA[0] = dS2cart[1]*A[2] - dS2cart[2]*A[1]; + //delS2crossA[1] = dS2cart[2]*A[0] - dS2cart[0]*A[2]; + //delS2crossA[2] = dS2cart[0]*A[1] - dS2cart[1]*A[0]; + // Don't calculate zero terms + delS2crossA[0] = 0; + delS2crossA[1] = -dS2cart[0]*A[2]; + delS2crossA[2] = dS2cart[0]*A[1]; return S2*B + delS2crossA[_fComponent]; } - else if(_derivative == 1) && (r1 > radius[0]) { + else if(_derivative == 1) && (r1 > xlimit[0]) { /* first derivatives of field calculated from diminishing vector potential - del B'(r) = S2(Sx) del B(r) + B(r) del S2(Sx) + del (del S2(Sx) cross A(r)) + del B'(r) = S2(s) del B(r) + B(r) del S2(s) + del (del S2(s) cross A(r)) component-wise: - del Bx = S2(Sx) del Bx + del S2(Sx) Bx + del(del S2(Sx) cross A)@i=x - del By = S2(Sx) del By + del S2(Sx) By + del(del S2(Sx) cross A)@i=y - del Bz = S2(Sx) del Bz + del S2(Sx) Bz + del(del S2(Sx) cross A)@i=z + del Bx = S2(s) del Bx + del S2(s) Bx + del(del S2(s) cross A)@i=x + del By = S2(s) del By + del S2(s) By + del(del S2(s) cross A)@i=y + del Bz = S2(s) del Bz + del S2(s) Bz + del(del S2(s) cross A)@i=z where - del(del S2(Sx) cross A)@i=x = del (dS2/dy Az - dS/dz Ay) - = del(dS/dy) Az + dS/dy del Az - del(DS/dz) Ay - dS/dz del Ay + del(del S2(s) cross A)@i=x = del (dS2/dy Az - dS2/dz Ay) + = del(dS2/dy) Az + dS2/dy del Az - del(DS/dz) Ay - dS2/dz del Ay + + del(del S2(s) cross A)@i=y = del (dS2/dz Ax - dS2/dx Az) + = del(dS2/dz) Ax + dS2/dz del Ax - del(DS/dx) Az - dS2/dx del Az - del(del S2(Sx) cross A)@i=y = del (dS2/dz Ax - dS/dx Az) - = del(dS/dz) Ax + dS/dz del Ax - del(DS/dx) Az - dS/dx del Az + del(del S2(s) cross A)@i=z = del (dS2/dx Ay - dS2/dy Ax) + = del(dS2/dx) Ay + dS2/dx del Ay - del(DS/dy) Ax - dS2/dy del Ax - del(del S2(Sx) cross A)@i=z = del (dS2/dx Ay - dS/dy Ax) - = del(dS/dx) Ay + dS/dx del Ay - del(DS/dy) Ax - dS/dy del Ax + note that dS2/dy == dS2/dz == 0 **********/ unsigned int sameComponent; @@ -194,49 +205,73 @@ double VectorDipole::call( double x, double y, double z) const 3*rdotq*sameComponent)/r5; // Calculate del Ax, del Ay, del Az - double delAx[3]; - double delAy[3]; - double delAz[3]; - delAx[0] = (-3./(r2*r2*r1))*(q[1]*r[2]-q[2]*r[1])*r[0]; - delAx[1] = (-3./(r2*r2*r1))*(q[1]*r[2]-q[2]*r[1])*r[1] -q[2]/(r2*r1); - delAx[2] = (-3./(r2*r2*r1))*(q[1]*r[2]-q[2]*r[1])*r[2] +q[1]/(r2*r1); - delAy[0] = (-3./(r2*r2*r1))*(q[2]*r[0]-q[0]*r[2])*r[0] +q[2]/(r2*r1); - delAy[1] = (-3./(r2*r2*r1))*(q[2]*r[0]-q[0]*r[2])*r[1]; - delAy[2] = (-3./(r2*r2*r1))*(q[2]*r[0]-q[0]*r[2])*r[2] -q[0]/(r2*r1); - delAz[0] = (-3./(r2*r2*r1))*(q[0]*r[1]-q[1]*r[0])*r[0] -q[1]/(r2*r1); - delAz[1] = (-3./(r2*r2*r1))*(q[0]*r[1]-q[1]*r[0])*r[1] +q[0]/(r2*r1); - delAz[2] = (-3./(r2*r2*r1))*(q[0]*r[1]-q[1]*r[0])*r[2]; - - // Calculate del (dS2/dx), del (dS2/dy), del (dS2/dz) - double deldS2dx[3]; - double deldS2dy[3]; - double deldS2dz[3]; - deldS2dx[0] = (-r[0]/(r2*r2*r1))*dS2dr*r[0] + dS2dr/r1; - deldS2dx[1] = (-r[0]/(r2*r2*r1))*dS2dr*r[1]; - deldS2dx[2] = (-r[0]/(r2*r2*r1))*dS2dr*r[2]; - deldS2dy[0] = (-r[1]/(r2*r2*r1))*dS2dr*r[0]; - deldS2dy[1] = (-r[1]/(r2*r2*r1))*dS2dr*r[1] + dS2dr/r1; - deldS2dy[2] = (-r[1]/(r2*r2*r1))*dS2dr*r[2]; - deldS2dz[0] = (-r[2]/(r2*r2*r1))*dS2dr*r[0]; - deldS2dz[1] = (-r[2]/(r2*r2*r1))*dS2dr*r[1]; - deldS2dz[2] = (-r[2]/(r2*r2*r1))*dS2dr*r[2] + dS2dr/r1; - - // Calculate del(del S2(Sx) cross A)@i=x, del(del S2(Sx) cross A)@i=y, del(del S2(Sx) cross A)@i=z - double ddS2crossA[3][3]; - // derivatives of X-directional field - ddS2crossA[0][0] = deldS2dy[0]*A[2] + dS2cart[1]*delAz[0] - deldS2dz[0]*A[1] - dS2cart[2]*delAy[0]; - ddS2crossA[0][1] = deldS2dy[1]*A[2] + dS2cart[1]*delAz[1] - deldS2dz[1]*A[1] - dS2cart[2]*delAy[1]; - ddS2crossA[0][2] = deldS2dy[2]*A[2] + dS2cart[1]*delAz[2] - deldS2dz[2]*A[1] - dS2cart[2]*delAy[2]; - // derivatives of Y-directional field - ddS2crossA[1][0] = deldS2dz[0]*A[0] + dS2cart[2]*delAx[0] - deldS2dx[0]*A[2] - dS2cart[0]*delAz[0]; - ddS2crossA[1][1] = deldS2dz[1]*A[0] + dS2cart[2]*delAx[1] - deldS2dx[1]*A[2] - dS2cart[0]*delAz[1]; - ddS2crossA[1][2] = deldS2dz[2]*A[0] + dS2cart[2]*delAx[2] - deldS2dx[2]*A[2] - dS2cart[0]*delAz[2]; - // derivatives of Z-directional field - ddS2crossA[2][0] = deldS2dx[0]*A[1] + dS2cart[0]*delAy[0] - deldS2dy[0]*A[0] - dS2cart[1]*delAx[0]; - ddS2crossA[2][1] = deldS2dx[1]*A[1] + dS2cart[0]*delAy[1] - deldS2dy[1]*A[0] - dS2cart[1]*delAx[1]; - ddS2crossA[2][2] = deldS2dx[2]*A[1] + dS2cart[0]*delAy[2] - deldS2dy[2]*A[0] - dS2cart[1]*delAx[2]; - - return S2*delB + dS2cart[_dComponent]*B + ddS2crossA[_fComponent][_dComponent]; + double delAy[3]; + double delAz[3]; + delAy[0] = (-3./(r2*r2*r1))*(q[2]*r[0]-q[0]*r[2])*r[0] +q[2]/(r2*r1); + delAy[1] = (-3./(r2*r2*r1))*(q[2]*r[0]-q[0]*r[2])*r[1]; + delAy[2] = (-3./(r2*r2*r1))*(q[2]*r[0]-q[0]*r[2])*r[2] -q[0]/(r2*r1); + delAz[0] = (-3./(r2*r2*r1))*(q[0]*r[1]-q[1]*r[0])*r[0] -q[1]/(r2*r1); + delAz[1] = (-3./(r2*r2*r1))*(q[0]*r[1]-q[1]*r[0])*r[1] +q[0]/(r2*r1); + delAz[2] = (-3./(r2*r2*r1))*(q[0]*r[1]-q[1]*r[0])*r[2]; + // derivatives of x-directional component of A are not needed here + //double delAx[3]; + //delAx[0] = (-3./(r2*r2*r1))*(q[1]*r[2]-q[2]*r[1])*r[0]; + //delAx[1] = (-3./(r2*r2*r1))*(q[1]*r[2]-q[2]*r[1])*r[1] -q[2]/(r2*r1); + //delAx[2] = (-3./(r2*r2*r1))*(q[1]*r[2]-q[2]*r[1])*r[2] +q[1]/(r2*r1); + + // Calculate del (dS2/dx), del (dS2/dy), del (dS2/dz) + // Of course now only del (dS2/dx) is non-zero + ddidS2dx = 60.*(2.*ss*s - 3.*ss + s)/((xlimit[1]-xlimit[0])*(xlimit[1]-xlimit[0])); + double deldS2dx[3]; + //double deldS2dy[3]; + //double deldS2dz[3]; + deldS2dx[0] = ddidS2dx; + deldS2dx[1] = 0; + deldS2dx[2] = 0; + /* + ddidS2dr = 60.*(2.*ss*s - 3.*ss + s)/(r2*(xlimit[1]-xlimit[0])*(xlimit[1]-xlimit[0])); + deldS2dx[0] = ddidS2dr*r[0]*r[0] -(r[0]/(r2*r1))*dS2dr*r[0] + dS2dr/r1; + deldS2dx[1] = ddidS2dr*r[0]*r[1] -(r[0]/(r2*r1))*dS2dr*r[1]; + deldS2dx[2] = ddidS2dr*r[0]*r[2] -(r[0]/(r2*r1))*dS2dr*r[2]; + deldS2dy[0] = ddidS2dr*r[1]*r[0] -(r[1]/(r2*r1))*dS2dr*r[0]; + deldS2dy[1] = ddidS2dr*r[1]*r[1] -(r[1]/(r2*r1))*dS2dr*r[1] + dS2dr/r1; + deldS2dy[2] = ddidS2dr*r[1]*r[2] -(r[1]/(r2*r1))*dS2dr*r[2]; + deldS2dz[0] = ddidS2dr*r[2]*r[0] -(r[2]/(r2*r1))*dS2dr*r[0]; + deldS2dz[1] = ddidS2dr*r[2]*r[1] -(r[2]/(r2*r1))*dS2dr*r[1]; + deldS2dz[2] = ddidS2dr*r[2]*r[2] -(r[2]/(r2*r1))*dS2dr*r[2] + dS2dr/r1; + + // Calculate del(del S2(s) cross A)@i=x, del(del S2(s) cross A)@i=y, del(del S2(s) cross A)@i=z + double ddS2crossA[3][3]; + // derivatives of X-directional field + ddS2crossA[0][0] = deldS2dy[0]*A[2] + dS2cart[1]*delAz[0] - deldS2dz[0]*A[1] - dS2cart[2]*delAy[0]; + ddS2crossA[0][1] = deldS2dy[1]*A[2] + dS2cart[1]*delAz[1] - deldS2dz[1]*A[1] - dS2cart[2]*delAy[1]; + ddS2crossA[0][2] = deldS2dy[2]*A[2] + dS2cart[1]*delAz[2] - deldS2dz[2]*A[1] - dS2cart[2]*delAy[2]; + // derivatives of Y-directional field + ddS2crossA[1][0] = deldS2dz[0]*A[0] + dS2cart[2]*delAx[0] - deldS2dx[0]*A[2] - dS2cart[0]*delAz[0]; + ddS2crossA[1][1] = deldS2dz[1]*A[0] + dS2cart[2]*delAx[1] - deldS2dx[1]*A[2] - dS2cart[0]*delAz[1]; + ddS2crossA[1][2] = deldS2dz[2]*A[0] + dS2cart[2]*delAx[2] - deldS2dx[2]*A[2] - dS2cart[0]*delAz[2]; + // derivatives of Z-directional field + ddS2crossA[2][0] = deldS2dx[0]*A[1] + dS2cart[0]*delAy[0] - deldS2dy[0]*A[0] - dS2cart[1]*delAx[0]; + ddS2crossA[2][1] = deldS2dx[1]*A[1] + dS2cart[0]*delAy[1] - deldS2dy[1]*A[0] - dS2cart[1]*delAx[1]; + ddS2crossA[2][2] = deldS2dx[2]*A[1] + dS2cart[0]*delAy[2] - deldS2dy[2]*A[0] - dS2cart[1]*delAx[2]; + */ + + // Only include components which are nonzero + double ddS2crossA[3][3]; + // derivatives of X-directional field + ddS2crossA[0][0] = 0; + ddS2crossA[0][1] = 0; + ddS2crossA[0][2] = 0; + // derivatives of Y-directional field + ddS2crossA[1][0] = - deldS2dx[0]*A[2] - dS2cart[0]*delAz[0]; + ddS2crossA[1][1] = - deldS2dx[1]*A[2] - dS2cart[0]*delAz[1]; + ddS2crossA[1][2] = - deldS2dx[2]*A[2] - dS2cart[0]*delAz[2]; + // derivatives of Z-directional field + ddS2crossA[2][0] = deldS2dx[0]*A[1] + dS2cart[0]*delAy[0]; + ddS2crossA[2][1] = deldS2dx[1]*A[1] + dS2cart[0]*delAy[1]; + ddS2crossA[2][2] = deldS2dx[2]*A[1] + dS2cart[0]*delAy[2]; + + return S2*delB + dS2cart[_dComponent]*B + ddS2crossA[_fComponent][_dComponent]; } return 0; // dummy, but prevents gcc from yelling diff --git a/backgroundfield/vectordipole.hpp b/backgroundfield/vectordipole.hpp index bf8c2e817..ce2d81a69 100644 --- a/backgroundfield/vectordipole.hpp +++ b/backgroundfield/vectordipole.hpp @@ -35,13 +35,13 @@ class VectorDipole: public FieldFunction { bool initialized; double q[3]; // Dipole moment; set to (0,0,moment) for z-aligned double center[3]; // Coordinates where the dipole sits; set to (0,0,0) - double radius[2]; // Radial extents of full and zero dipole + double xlimit[2]; // X-coodrinate extents of full and zero dipole public: VectorDipole(){ this->initialized = false; } - void initialize(const double moment,const double center_x, const double center_y, const double center_z, const double tilt_angle_phi, const double tilt_angle_theta, const double radius_f, const double radius_z); + void initialize(const double moment,const double center_x, const double center_y, const double center_z, const double tilt_angle_phi, const double tilt_angle_theta, const double xlimit_f, const double xlimit_z); virtual double call(double x, double y, double z) const; virtual ~Dipole() {} }; diff --git a/projects/Magnetosphere/Magnetosphere.cpp b/projects/Magnetosphere/Magnetosphere.cpp index f752b5471..b78d1540a 100644 --- a/projects/Magnetosphere/Magnetosphere.cpp +++ b/projects/Magnetosphere/Magnetosphere.cpp @@ -60,8 +60,8 @@ namespace projects { RP::add("Magnetosphere.dipoleTiltPhi","Magnitude of dipole tilt in radians", 0.0); RP::add("Magnetosphere.dipoleTiltTheta","Direction of dipole tilt from Sun-Earth-line in radians", 0.0); - RP::add("Magnetosphere.dipoleRadiusFull","Radius up to which dipole is at full strength, in metres", 1.59275e8); // 25 RE - RP::add("Magnetosphere.dipoleRadiusFull","Radius after which dipole is at zero strength, in metres", 1.9113e8); // 30 RE + RP::add("Magnetosphere.dipoleXFull","X-coordinate up to which dipole is at full strength, in metres", 9.5565e7); // 15 RE + RP::add("Magnetosphere.dipoleXFull","X-coordinate after which dipole is at zero strength, in metres", 1.9113e8); // 30 RE // Per-population parameters for(uint i=0; i< getObjectWrapper().particleSpecies.size(); i++) { @@ -162,11 +162,11 @@ namespace projects { if(myRank == MASTER_RANK) cerr << __FILE__ << ":" << __LINE__ << " ERROR: This option has not been added!" << endl; exit(1); } - if(!Readparameters::get("Magnetosphere.dipoleRadiusFull", this->dipoleRadiusFull)) { + if(!Readparameters::get("Magnetosphere.dipoleXFull", this->dipoleXFull)) { if(myRank == MASTER_RANK) cerr << __FILE__ << ":" << __LINE__ << " ERROR: This option has not been added!" << endl; exit(1); } - if(!Readparameters::get("Magnetosphere.dipoleRadiusZero", this->dipoleRadiusZero)) { + if(!Readparameters::get("Magnetosphere.dipoleXZero", this->dipoleXZero)) { if(myRank == MASTER_RANK) cerr << __FILE__ << ":" << __LINE__ << " ERROR: This option has not been added!" << endl; exit(1); } @@ -310,7 +310,7 @@ namespace projects { setBackgroundField(bgFieldDipole, BgBGrid, true); break; case 4: // Vector potential dipole, vanishes after a given radius - bgVectorDipole.initialize(126.2e6 *this->dipoleScalingFactor, 0.0, 0.0, 0.0, this->dipoleTiltPhi, this->dipoleTiltTheta, this->dipoleRadiusFull, this->dipoleRadiusZero ); + bgVectorDipole.initialize(126.2e6 *this->dipoleScalingFactor, 0.0, 0.0, 0.0, this->dipoleTiltPhi, this->dipoleTiltTheta, this->dipoleXFull, this->dipoleXZero ); setBackgroundField(bgVectorDipole, BgBGrid); break; diff --git a/projects/Magnetosphere/Magnetosphere.h b/projects/Magnetosphere/Magnetosphere.h index 4fe4ca993..827a4d587 100644 --- a/projects/Magnetosphere/Magnetosphere.h +++ b/projects/Magnetosphere/Magnetosphere.h @@ -92,8 +92,8 @@ namespace projects { Real dipoleTiltPhi; Real dipoleTiltTheta; - Real dipoleRadiusFull; - Real dipoleRadiusNone; + Real dipoleXFull; + Real dipoleXNone; std::vector speciesParams; }; // class Magnetosphere From 4d13eb817ceccf914701a6284bf4f6d149f2b64e Mon Sep 17 00:00:00 2001 From: Urs Ganse Date: Wed, 8 May 2019 15:21:54 +0300 Subject: [PATCH 390/602] Rename all relevant output quantities to either vg_ or fg_ This way, the user can freely select which grid data should be written from. Default names rename and have been chosen to have suitable default behaviour. --- datareduction/datareducer.cpp | 123 +++++++++++++++++++++------------- 1 file changed, 77 insertions(+), 46 deletions(-) diff --git a/datareduction/datareducer.cpp b/datareduction/datareducer.cpp index 6c60d25af..495383761 100644 --- a/datareduction/datareducer.cpp +++ b/datareduction/datareducer.cpp @@ -36,8 +36,8 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti for (it = P::outputVariableList.begin(); it != P::outputVariableList.end(); it++) { - if(*it == "fs_B") { // Bulk magnetic field at Yee-Lattice locations - outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fs_B",[]( + if(*it == "fg_B" || *it == "B") { // Bulk magnetic field at Yee-Lattice locations + outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fg_B",[]( FsGrid< std::array, 2>& perBGrid, FsGrid< std::array, 2>& EGrid, FsGrid< std::array, 2>& EHallGrid, @@ -52,7 +52,7 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti std::array& gridSize = technicalGrid.getLocalSize(); std::vector retval(gridSize[0]*gridSize[1]*gridSize[2]*3); - // Iterate through fsgrid cells and extract boundary flag + // Iterate through fsgrid cells and extract total magnetic field for(int z=0; zaddOperator(new DRO::VariableB); continue; } - if(*it == "fs_BackgroundB") { // Static (typically dipole) magnetic field part - outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fs_background_B",[]( + if(*it == "fg_BackgroundB" || *it == "BackgroundB") { // Static (typically dipole) magnetic field part + outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fg_background_B",[]( FsGrid< std::array, 2>& perBGrid, FsGrid< std::array, 2>& EGrid, FsGrid< std::array, 2>& EHallGrid, @@ -90,7 +90,7 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti std::array& gridSize = technicalGrid.getLocalSize(); std::vector retval(gridSize[0]*gridSize[1]*gridSize[2]*3); - // Iterate through fsgrid cells and extract boundary flag + // Iterate through fsgrid cells and extract background B for(int z=0; zaddOperator(new DRO::DataReductionOperatorCellParams("background_B",CellParams::BGBX,3)); continue; } - if(*it == "fs_PerturbedB") { // Fluctuating magnetic field part - outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fs_perturbed_B",[]( + if(*it == "fg_PerturbedB" || *it == "PerturbedB") { // Fluctuating magnetic field part + outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fg_perturbed_B",[]( FsGrid< std::array, 2>& perBGrid, FsGrid< std::array, 2>& EGrid, FsGrid< std::array, 2>& EHallGrid, @@ -125,7 +125,7 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti std::array& gridSize = technicalGrid.getLocalSize(); std::vector retval(gridSize[0]*gridSize[1]*gridSize[2]*3); - // Iterate through fsgrid cells and extract boundary flag + // Iterate through fsgrid cells and extract values for(int z=0; zaddOperator(new DRO::DataReductionOperatorCellParams("perturbed_B",CellParams::PERBX,3)); continue; } - if(*it == "fs_E") { // Bulk electric field at Yee-lattice locations - outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fs_E",[]( + if(*it == "fg_E" || *it== "E") { // Bulk electric field at Yee-lattice locations + outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fg_E",[]( FsGrid< std::array, 2>& perBGrid, FsGrid< std::array, 2>& EGrid, FsGrid< std::array, 2>& EHallGrid, @@ -160,7 +160,7 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti std::array& gridSize = technicalGrid.getLocalSize(); std::vector retval(gridSize[0]*gridSize[1]*gridSize[2]*3); - // Iterate through fsgrid cells and extract boundary flag + // Iterate through fsgrid cells and extract E values for(int z=0; zaddOperator(new DRO::DataReductionOperatorCellParams("E",CellParams::EX,3)); continue; } - if(*it == "Rhom") { // Overall mass density (summed over all populations) + if(*it == "vg_Rhom" || *it == "Rhom") { // Overall mass density (summed over all populations) outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("rhom",CellParams::RHOM,1)); continue; } - if(*it == "fs_Rhom") { // Overall mass density (summed over all populations) - outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fs_rhom",[]( + if(*it == "fg_Rhom") { // Overall mass density (summed over all populations) + outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fg_rhom",[]( FsGrid< std::array, 2>& perBGrid, FsGrid< std::array, 2>& EGrid, FsGrid< std::array, 2>& EHallGrid, @@ -199,7 +199,7 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti std::array& gridSize = technicalGrid.getLocalSize(); std::vector retval(gridSize[0]*gridSize[1]*gridSize[2]); - // Iterate through fsgrid cells and extract boundary flag + // Iterate through fsgrid cells and extract rho valuesg for(int z=0; zaddOperator(new DRO::DataReductionOperatorCellParams("rhoq",CellParams::RHOQ,1)); continue; } - if(*it == "fs_Rhoq") { // Overall charge density (summed over all populations) - outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fs_rhoq",[]( + if(*it == "fg_Rhoq") { // Overall charge density (summed over all populations) + outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fg_rhoq",[]( FsGrid< std::array, 2>& perBGrid, FsGrid< std::array, 2>& EGrid, FsGrid< std::array, 2>& EHallGrid, @@ -232,7 +232,7 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti std::array& gridSize = technicalGrid.getLocalSize(); std::vector retval(gridSize[0]*gridSize[1]*gridSize[2]); - // Iterate through fsgrid cells and extract boundary flag + // Iterate through fsgrid cells and extract charge density for(int z=0; zaddOperator(new DRO::DataReductionOperatorCellParams("V",CellParams::VX,3)); continue; } - if(*it == "fs_V") { // Overall effective bulk density defining the center-of-mass frame from all populations - outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fs_background_B",[]( + if(*it == "fg_V") { // Overall effective bulk density defining the center-of-mass frame from all populations + outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fg_background_B",[]( FsGrid< std::array, 2>& perBGrid, FsGrid< std::array, 2>& EGrid, FsGrid< std::array, 2>& EHallGrid, @@ -274,7 +274,7 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti std::array& gridSize = technicalGrid.getLocalSize(); std::vector retval(gridSize[0]*gridSize[1]*gridSize[2]*3); - // Iterate through fsgrid cells and extract boundary flag + // Iterate through fsgrid cells and extract bulk Velocity for(int z=0; zaddOperator(new DRO::DataReductionOperatorCellParams("LB_weight",CellParams::LBWEIGHTCOUNTER,1)); continue; @@ -364,9 +364,9 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti } continue; } - if(*it == "MaxFieldsdt") { + if(*it == "MaxFieldsdt" || *it == "fg_MaxFieldsdt") { // Maximum timestep constraint as calculated by the fieldsolver - outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("FsGridRank",[]( + outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("MaxFieldsdt",[]( FsGrid< std::array, 2>& perBGrid, FsGrid< std::array, 2>& EGrid, FsGrid< std::array, 2>& EHallGrid, @@ -381,7 +381,7 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti std::array& gridSize = technicalGrid.getLocalSize(); std::vector retval(gridSize[0]*gridSize[1]*gridSize[2]); - // Iterate through fsgrid cells and extract boundary flag + // Iterate through fsgrid cells and extract field solver timestep limit for(int z=0; zaddOperator(new DRO::MPIrank); continue; } - if(*it == "FsGridRank") { + if(*it == "FsGridRank" || *it == "fg_rank") { // Map of spatial decomposition of the FsGrid into MPI ranks outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("FsGridRank",[]( FsGrid< std::array, 2>& perBGrid, @@ -420,12 +420,12 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti )); continue; } - if(*it == "BoundaryType") { + if(*it == "BoundaryType" || *it == "vg_BoundaryType") { // Type of boundarycells outputReducer->addOperator(new DRO::BoundaryType); continue; } - if(*it == "FsGridBoundaryType") { + if(*it == "fg_BoundaryType") { // Type of boundarycells as stored in FSGrid outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("FsGridBoundaryType",[]( FsGrid< std::array, 2>& perBGrid, @@ -455,12 +455,12 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti )); continue; } - if(*it == "BoundaryLayer") { + if(*it == "BoundaryLayer" || *it == "vg_BoundaryLayer") { // For boundaries with multiple layers: layer count per cell outputReducer->addOperator(new DRO::BoundaryLayer); continue; } - if(*it == "FsGridBoundaryLayer") { + if(*it == "fg_BoundaryLayer") { // Type of boundarycells as stored in FSGrid outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("FsGridBoundaryLayer",[]( FsGrid< std::array, 2>& perBGrid, @@ -477,7 +477,7 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti std::array& gridSize = technicalGrid.getLocalSize(); std::vector retval(gridSize[0]*gridSize[1]*gridSize[2]); - // Iterate through fsgrid cells and extract boundary flag + // Iterate through fsgrid cells and extract boundary layer for(int z=0; zaddOperator(new DRO::DataReductionOperatorCellParams("E_vol",CellParams::EXVOL,3)); continue; } + if(*it == "fg_VolE") { + outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fg_volB",[]( + FsGrid< std::array, 2>& perBGrid, + FsGrid< std::array, 2>& EGrid, + FsGrid< std::array, 2>& EHallGrid, + FsGrid< std::array, 2>& EGradPeGrid, + FsGrid< std::array, 2>& momentsGrid, + FsGrid< std::array, 2>& dPerBGrid, + FsGrid< std::array, 2>& dMomentsGrid, + FsGrid< std::array, 2>& BgBGrid, + FsGrid< std::array, 2>& volGrid, + FsGrid< fsgrids::technical, 2>& technicalGrid)->std::vector { + + std::array& gridSize = technicalGrid.getLocalSize(); + std::vector retval(gridSize[0]*gridSize[1]*gridSize[2]*3); + + // Iterate through fsgrid cells and extract EVOL + for(int z=0; zaddOperator(new DRO::DataReductionOperatorCellParams("EXHALL_000_100",CellParams::EXHALL_000_100,1)); @@ -537,13 +568,13 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("EGRADPE",CellParams::EXGRADPE,3)); continue; } - if(*it == "VolB") { + if(*it == "VolB" || *it == "vg_VolB") { // Volume-averaged magnetic field outputReducer->addOperator(new DRO::VariableBVol); continue; } - if(*it == "fs_volB") { // Static (typically dipole) magnetic field part - outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fs_volB",[]( + if(*it == "fg_volB") { // Static (typically dipole) magnetic field part + outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fg_volB",[]( FsGrid< std::array, 2>& perBGrid, FsGrid< std::array, 2>& EGrid, FsGrid< std::array, 2>& EHallGrid, @@ -584,14 +615,14 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("PERB_vol",CellParams::PERBXVOL,3)); continue; } - if(*it == "Pressure") { + if(*it == "Pressure" || *it== "vg_Pressure") { // Overall scalar pressure from all populations outputReducer->addOperator(new DRO::VariablePressureSolver); continue; } - if(*it == "fs_Pressure") { + if(*it == "fg_Pressure") { // Overall scalar pressure from all populations - outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fs_Pressure",[]( + outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fg_Pressure",[]( FsGrid< std::array, 2>& perBGrid, FsGrid< std::array, 2>& EGrid, FsGrid< std::array, 2>& EHallGrid, From 7b7d2db48ce65367a7fb01a898c2e0c9a3cc5f9b Mon Sep 17 00:00:00 2001 From: Markus Battarbee Date: Wed, 8 May 2019 15:37:34 +0300 Subject: [PATCH 391/602] Added radially scaling vector dipole files as reference --- backgroundfield/vectorRdipole.cpp_unused | 250 +++++++++++++++++++++++ backgroundfield/vectorRdipole.hpp_unused | 50 +++++ 2 files changed, 300 insertions(+) create mode 100644 backgroundfield/vectorRdipole.cpp_unused create mode 100644 backgroundfield/vectorRdipole.hpp_unused diff --git a/backgroundfield/vectorRdipole.cpp_unused b/backgroundfield/vectorRdipole.cpp_unused new file mode 100644 index 000000000..c94d21fba --- /dev/null +++ b/backgroundfield/vectorRdipole.cpp_unused @@ -0,0 +1,250 @@ +/* + * This file is part of Vlasiator. + * Copyright 2010-2016 Finnish Meteorological Institute + * Copyright 2017-2019 University of Helsinki + * + * For details of usage, see the COPYING file and read the "Rules of the Road" + * at http://www.physics.helsinki.fi/vlasiator/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ +/* +Background magnetic field class of Vlasiator. +*/ + +#include +#include +#include "vectordipole.hpp" +#include "../common.h" + +// tilt_angle_phi is from the z-axis in radians +// tilt_angle_theta is from the Sun-Earth-line in radians +void VectorDipole::initialize(const double moment,const double center_x, const double center_y, const double center_z, const double tilt_angle_phi=0, const double tilt_angle_theta=0, const double radius_f, const double radius_z){ + this->initialized = true; + + q[0]=-sin(tilt_angle_phi)*cos(tilt_angle_theta)*moment; + q[1]=-sin(tilt_angle_phi)*sin(tilt_angle_theta)*moment; + q[2]=-cos(tilt_angle_phi)*moment; + + center[0]=center_x; + center[1]=center_y; + center[2]=center_z; + + radius[0]=radius_f; + radius[1]=radius_z; +} + + + +double VectorDipole::call( double x, double y, double z) const +{ + const double minimumR=1e-3*physicalconstants::R_E; //The dipole field is defined to be outside of Earth, and units are in meters + if(this->initialized==false) + return 0.0; + double r[3]; + + r[0]= x-center[0]; + r[1]= y-center[1]; + r[2]= z-center[2]; + + double r2 = r[0]*r[0]+r[1]*r[1]+r[2]*r[2]; + + if(r2=radius[1]*radius[1]) + return 0.0; //set zero field and derivatives outside "zero radius" + + /* This function is called from within other calls, one component at a time. + The component in question is defined using the _fComponent index. If a derivative + is requested, the direction of the derivative is defined using _dComponent. */ + + const double r1 = sqrt(r2); + const double r5 = (r2*r2*r1); + const double rdotq=q[0]*r[0] + q[1]*r[1] +q[2]*r[2]; + const double B=( 3*r[_fComponent]*rdotq-q[_fComponent]*r2)/r5; + + if(_derivative == 0) && (r1 <= radius[0]) + // Full dipole field within full radius + return B; + + if(_derivative == 1) && (r1 <= radius[0]){ + //first derivatives of full field + unsigned int sameComponent; + if(_dComponent==_fComponent) + sameComponent=1; + else + sameComponent=0; + + /* Confirmed Battarbee 26.04.2019: This is the correct + 3D dipole derivative. */ + return -5*B*r[_dComponent]/r2+ + (3*q[_dComponent]*r[_fComponent] - + 2*q[_fComponent]*r[_dComponent] + + 3*rdotq*sameComponent)/r5; + } + + /* Within transition range (between "full radius" and "zero radius"), use + a vector potential scaled with the smootherstep function. Calculated + and coded by Markus Battarbee, 30.04.2019 */ + + // Calculate vector potential within transition range + double A[3]; + A[0] = (q[1]*r[2]-q[2]*r[1]) / (r2*r1); + A[1] = (q[2]*r[0]-q[0]*r[2]) / (r2*r1); + A[2] = (q[0]*r[1]-q[1]*r[0]) / (r2*r1); + // Coordinate within smootherstep function + const double Sx = -(r1-radius[1])/(radius[1]-radius[0]); + const double Sx2 = Sx*Sx; + // Smootherstep and its radial derivative + const double S2 = 6.*Sx2*Sx2*Sx - 15.*Sx2*Sx2 + 10.*Sx2*Sx; + const double dS2dr = -(30.*Sx2*Sx2 - 60.*Sx2*Sx + 30.*Sx2)/(radius[1]-radius[0]); + + // Cartesian derivatives of S2 + double dS2cart[3]; + dS2cart[0] = (r[0]/r1)*dS2dr; + dS2cart[1] = (r[1]/r1)*dS2dr; + dS2cart[2] = (r[2]/r1)*dS2dr; + + if(_derivative == 0) && (r1 > radius[0]) { + /* Within transition range (between radius[0] and radius[1]) we + multiply the magnetic field with the S2 smootherstep function + and add an additional corrective term to remove divergence. This + is based on using the dipole field vector potential and scaling + it using the smootherstep function S2. + + Notation: + q = dipole moment (vector) + r = position vector + R = position distance + + The regular dipole field vector potential + A(r) = (mu0/4 pi R^3) * (q cross r) + + The smootherstep function + ( 0, x<=0 + S2(Sx) = ( 6x^5 -15x^4 +10x^3, 0<=x<=1 + ( 1, x>=1 + + Radial distance scaling for S2 + Sx = -(R-radius[1])/(radius[1]-radius[0]) + + The scaled vector potential is A'(r) = A(r)*S2(Sx) + + The scaled magnetic field is + B'(r) = del cross A'(r) + =(NRL)= S2(Sx) del cross A(r) + del S2(Sx) cross A(r) + = S2(Sx) B(r) + del S2(Sx) cross A(r) + + */ + double delS2crossA[3]; + delS2crossA[0] = dS2cart[1]*A[2] - dS2cart[2]*A[1]; + delS2crossA[1] = dS2cart[2]*A[0] - dS2cart[0]*A[2]; + delS2crossA[2] = dS2cart[0]*A[1] - dS2cart[1]*A[0]; + + return S2*B + delS2crossA[_fComponent]; + } + + else if(_derivative == 1) && (r1 > radius[0]) { + /* first derivatives of field calculated from diminishing vector potential + + del B'(r) = S2(Sx) del B(r) + B(r) del S2(Sx) + del (del S2(Sx) cross A(r)) + + component-wise: + + del Bx = S2(Sx) del Bx + del S2(Sx) Bx + del(del S2(Sx) cross A)@i=x + del By = S2(Sx) del By + del S2(Sx) By + del(del S2(Sx) cross A)@i=y + del Bz = S2(Sx) del Bz + del S2(Sx) Bz + del(del S2(Sx) cross A)@i=z + + where + + del(del S2(Sx) cross A)@i=x = del (dS2/dy Az - dS/dz Ay) + = del(dS/dy) Az + dS/dy del Az - del(DS/dz) Ay - dS/dz del Ay + + del(del S2(Sx) cross A)@i=y = del (dS2/dz Ax - dS/dx Az) + = del(dS/dz) Ax + dS/dz del Ax - del(DS/dx) Az - dS/dx del Az + + del(del S2(Sx) cross A)@i=z = del (dS2/dx Ay - dS/dy Ax) + = del(dS/dx) Ay + dS/dx del Ay - del(DS/dy) Ax - dS/dy del Ax + **********/ + + unsigned int sameComponent; + if(_dComponent==_fComponent) + sameComponent=1; + else + sameComponent=0; + + // Regular derivative of B + const double delB = -5*B*r[_dComponent]/r2+ + (3*q[_dComponent]*r[_fComponent] - + 2*q[_fComponent]*r[_dComponent] + + 3*rdotq*sameComponent)/r5; + + // Calculate del Ax, del Ay, del Az + double delAx[3]; + double delAy[3]; + double delAz[3]; + delAx[0] = (-3./(r2*r2*r1))*(q[1]*r[2]-q[2]*r[1])*r[0]; + delAx[1] = (-3./(r2*r2*r1))*(q[1]*r[2]-q[2]*r[1])*r[1] -q[2]/(r2*r1); + delAx[2] = (-3./(r2*r2*r1))*(q[1]*r[2]-q[2]*r[1])*r[2] +q[1]/(r2*r1); + delAy[0] = (-3./(r2*r2*r1))*(q[2]*r[0]-q[0]*r[2])*r[0] +q[2]/(r2*r1); + delAy[1] = (-3./(r2*r2*r1))*(q[2]*r[0]-q[0]*r[2])*r[1]; + delAy[2] = (-3./(r2*r2*r1))*(q[2]*r[0]-q[0]*r[2])*r[2] -q[0]/(r2*r1); + delAz[0] = (-3./(r2*r2*r1))*(q[0]*r[1]-q[1]*r[0])*r[0] -q[1]/(r2*r1); + delAz[1] = (-3./(r2*r2*r1))*(q[0]*r[1]-q[1]*r[0])*r[1] +q[0]/(r2*r1); + delAz[2] = (-3./(r2*r2*r1))*(q[0]*r[1]-q[1]*r[0])*r[2]; + + // Calculate del (dS2/dx), del (dS2/dy), del (dS2/dz) + ddidS2dr = 60.*(2.*Sx2*Sx - 3.*Sx2 + Sx)/(r2*(radius[1]-radius[0])*(radius[1]-radius[0])); + double deldS2dx[3]; + double deldS2dy[3]; + double deldS2dz[3]; + deldS2dx[0] = ddidS2dr*r[0]*r[0] -(r[0]/(r2*r1))*dS2dr*r[0] + dS2dr/r1; + deldS2dx[1] = ddidS2dr*r[0]*r[1] -(r[0]/(r2*r1))*dS2dr*r[1]; + deldS2dx[2] = ddidS2dr*r[0]*r[2] -(r[0]/(r2*r1))*dS2dr*r[2]; + deldS2dy[0] = ddidS2dr*r[1]*r[0] -(r[1]/(r2*r1))*dS2dr*r[0]; + deldS2dy[1] = ddidS2dr*r[1]*r[1] -(r[1]/(r2*r1))*dS2dr*r[1] + dS2dr/r1; + deldS2dy[2] = ddidS2dr*r[1]*r[2] -(r[1]/(r2*r1))*dS2dr*r[2]; + deldS2dz[0] = ddidS2dr*r[2]*r[0] -(r[2]/(r2*r1))*dS2dr*r[0]; + deldS2dz[1] = ddidS2dr*r[2]*r[1] -(r[2]/(r2*r1))*dS2dr*r[1]; + deldS2dz[2] = ddidS2dr*r[2]*r[2] -(r[2]/(r2*r1))*dS2dr*r[2] + dS2dr/r1; + + // Calculate del(del S2(Sx) cross A)@i=x, del(del S2(Sx) cross A)@i=y, del(del S2(Sx) cross A)@i=z + double ddS2crossA[3][3]; + // derivatives of X-directional field + ddS2crossA[0][0] = deldS2dy[0]*A[2] + dS2cart[1]*delAz[0] - deldS2dz[0]*A[1] - dS2cart[2]*delAy[0]; + ddS2crossA[0][1] = deldS2dy[1]*A[2] + dS2cart[1]*delAz[1] - deldS2dz[1]*A[1] - dS2cart[2]*delAy[1]; + ddS2crossA[0][2] = deldS2dy[2]*A[2] + dS2cart[1]*delAz[2] - deldS2dz[2]*A[1] - dS2cart[2]*delAy[2]; + // derivatives of Y-directional field + ddS2crossA[1][0] = deldS2dz[0]*A[0] + dS2cart[2]*delAx[0] - deldS2dx[0]*A[2] - dS2cart[0]*delAz[0]; + ddS2crossA[1][1] = deldS2dz[1]*A[0] + dS2cart[2]*delAx[1] - deldS2dx[1]*A[2] - dS2cart[0]*delAz[1]; + ddS2crossA[1][2] = deldS2dz[2]*A[0] + dS2cart[2]*delAx[2] - deldS2dx[2]*A[2] - dS2cart[0]*delAz[2]; + // derivatives of Z-directional field + ddS2crossA[2][0] = deldS2dx[0]*A[1] + dS2cart[0]*delAy[0] - deldS2dy[0]*A[0] - dS2cart[1]*delAx[0]; + ddS2crossA[2][1] = deldS2dx[1]*A[1] + dS2cart[0]*delAy[1] - deldS2dy[1]*A[0] - dS2cart[1]*delAx[1]; + ddS2crossA[2][2] = deldS2dx[2]*A[1] + dS2cart[0]*delAy[2] - deldS2dy[2]*A[0] - dS2cart[1]*delAx[2]; + + return S2*delB + dS2cart[_dComponent]*B + ddS2crossA[_fComponent][_dComponent]; + } + + return 0; // dummy, but prevents gcc from yelling +} + + + + + + diff --git a/backgroundfield/vectorRdipole.hpp_unused b/backgroundfield/vectorRdipole.hpp_unused new file mode 100644 index 000000000..bf8c2e817 --- /dev/null +++ b/backgroundfield/vectorRdipole.hpp_unused @@ -0,0 +1,50 @@ +/* + * This file is part of Vlasiator. + * Copyright 2010-2016 Finnish Meteorological Institute + * Copyright 2017-2019 University of Helsinki + * + * For details of usage, see the COPYING file and read the "Rules of the Road" + * at http://www.physics.helsinki.fi/vlasiator/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ +/* +Background magnetic field class of Vlasiator. +*/ + +#ifndef VECTORDIPOLE_HPP +#define VECTORDIPOLE_HPP +#include "fieldfunction.hpp" + + + +class VectorDipole: public FieldFunction { +private: + bool initialized; + double q[3]; // Dipole moment; set to (0,0,moment) for z-aligned + double center[3]; // Coordinates where the dipole sits; set to (0,0,0) + double radius[2]; // Radial extents of full and zero dipole +public: + + VectorDipole(){ + this->initialized = false; + } + void initialize(const double moment,const double center_x, const double center_y, const double center_z, const double tilt_angle_phi, const double tilt_angle_theta, const double radius_f, const double radius_z); + virtual double call(double x, double y, double z) const; + virtual ~Dipole() {} +}; + +#endif + From e092722c34865540305f7f12b39099d19a530865 Mon Sep 17 00:00:00 2001 From: Urs Ganse Date: Wed, 8 May 2019 15:47:01 +0300 Subject: [PATCH 392/602] Update output parameter description in help message --- parameters.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/parameters.cpp b/parameters.cpp index 5e00e455e..d1f8d6569 100644 --- a/parameters.cpp +++ b/parameters.cpp @@ -217,7 +217,8 @@ bool Parameters::addParameters(){ // Output variable parameters // NOTE Do not remove the : before the list of variable names as this is parsed by tools/check_vlasiator_cfg.sh - Readparameters::addComposing("variables.output", "List of data reduction operators (DROs) to add to the grid file output. Each variable to be added has to be on a new line output = XXX. Available (20190320): B BackgroundB PerturbedB E Rhom Rhoq populations_Rho V populations_V populations_moments_Backstream populations_moments_NonBackstream populations_EffectiveSparsityThreshold populations_RhoLossAdjust LBweight MaxVdt MaxRdt populations_MaxVdt populations_MaxRdt MaxFieldsdt MPIrank FsGridRank FsGridBoundaryType BoundaryType BoundaryLayer populations_Blocks fSaved populations_accSubcycles VolE HallE GradPeE VolB BackgroundVolB PerturbedVolB Pressure populations_PTensor derivs BVOLderivs GridCoordinates Potential BackgroundVolE ChargeDensity PotentialError MeshData"); + + Readparameters::addComposing("variables.output", "List of data reduction operators (DROs) to add to the grid file output. Each variable to be added has to be on a new line output = XXX. Available (20190508): B vg_B fg_b BackgroundB vg_BackgroundB fg_BackgroundB PerturbedB vg_PerturbedB fg_PerturbedB E vg_E fg_E Rhom vg_Rhom fg_Rhom Rhoq vg_Rhoq fg_Rhoq populations_Rho V vg_V fg_V populations_V populations_moments_Backstream populations_moments_NonBackstream populations_EffectiveSparsityThreshold populations_RhoLossAdjust LBweight MaxVdt MaxRdt populations_MaxVdt populations_MaxRdt MaxFieldsdt MPIrank vg_rank FsGridRank fg_rank FsGridBoundaryType BoundaryType vg_BoundaryType fg_BoundaryType BoundaryLayer vg_BoundaryLayer fg_BoundaryLayer populations_Blocks fSaved populations_accSubcycles VolE vg_VolE fg_VolE HallE GradPeE VolB vg_VolB fg_VolB BackgroundVolB PerturbedVolB Pressure vg_Pressure fg_Pressure populations_PTensor derivs BVOLderivs GridCoordinates Potential BackgroundVolE ChargeDensity PotentialError MeshData"); // NOTE Do not remove the : before the list of variable names as this is parsed by tools/check_vlasiator_cfg.sh Readparameters::addComposing("variables.diagnostic", "List of data reduction operators (DROs) to add to the diagnostic runtime output. Each variable to be added has to be on a new line diagnostic = XXX. Available (20190320): FluxB FluxE populations_Blocks Rhom populations_RhoLossAdjust LBweight populations_MaxVdt MaxVdt populations_MaxRdt MaxRdt MaxFieldsdt populations_MaxDistributionFunction populations_MinDistributionFunction"); From 637f5e233c07a09ebffd06cda8e44544dc201710 Mon Sep 17 00:00:00 2001 From: Urs Ganse Date: Wed, 8 May 2019 16:29:17 +0300 Subject: [PATCH 393/602] Get proper periodicity information from fsgrid. Also, whitespace fixes in iowrite.cpp --- iowrite.cpp | 170 +++++++++++++++++++++++++--------------------------- 1 file changed, 83 insertions(+), 87 deletions(-) diff --git a/iowrite.cpp b/iowrite.cpp index f336d86ac..b726fc873 100644 --- a/iowrite.cpp +++ b/iowrite.cpp @@ -815,94 +815,90 @@ bool writeMeshBoundingBox( Writer & vlsvWriter, */ bool writeFsGridMetadata(FsGrid< fsgrids::technical, 2>& technicalGrid, vlsv::Writer& vlsvWriter) { - std::map xmlAttributes; - const std::string meshName="fsgrid"; - xmlAttributes["mesh"] = meshName; - - //The visit plugin expects MESH_BBOX as a keyword. We only write one - //from the first rank. - std::array& globalSize = technicalGrid.getGlobalSize(); - std::array boundaryBox({globalSize[0], globalSize[1], globalSize[2], - 1,1,1}); - - if(technicalGrid.getRank() == 0) { - const unsigned int arraySize = 6; - const unsigned int vectorSize = 1; - vlsvWriter.writeArray("MESH_BBOX", xmlAttributes, arraySize, vectorSize, &boundaryBox[0]); - } else { - const unsigned int arraySize = 0; - const unsigned int vectorSize = 1; - vlsvWriter.writeArray("MESH_BBOX", xmlAttributes, arraySize, vectorSize, &boundaryBox); - } - - // Write three 1-dimensional arrays of node coordinates (x,y,z) for - // visit to create a cartesian grid out of. - std::vector xNodeCoordinates(globalSize[0]+1); - for(uint64_t i=0; i yNodeCoordinates(globalSize[1]+1); - for(uint64_t i=0; i zNodeCoordinates(globalSize[2]+1); - for(uint64_t i=0; i& localSize = technicalGrid.getLocalSize(); - std::vector globalIds(localSize[0]*localSize[1]*localSize[2]); - int i=0; - for(int z=0; z globalIndex = technicalGrid.getGlobalIndices(x,y,z); - globalIds[i++] = globalIndex[2]*globalSize[0]*globalSize[1]+ - globalIndex[1]*globalSize[0] + - globalIndex[0]; - } - } - } - - - // writeDomainSizes - std::array meshDomainSize({globalIds.size(), 0}); - vlsvWriter.writeArray("MESH_DOMAIN_SIZES", xmlAttributes, 1, 2, &meshDomainSize[0]); - - // Finally, write mesh object itself. - xmlAttributes.clear(); - xmlAttributes["name"] = meshName; - xmlAttributes["type"] = vlsv::mesh::STRING_UCD_MULTI; - // TODO: Dummy values, fix by getting actual periodicity from fsgrid - xmlAttributes["xperiodic"]="no"; - xmlAttributes["yperiodic"]="no"; - xmlAttributes["zperiodic"]="no"; - //if(P::xperiodic) { xmlAttributes["xperiodic"] = "yes"; } else { xmlAttributes["xperiodic"] = "no"; } - //if(P::yperiodic) { xmlAttributes["yperiodic"] = "yes"; } else { xmlAttributes["yperiodic"] = "no"; } - //if(P::zperiodic) { xmlAttributes["zperiodic"] = "yes"; } else { xmlAttributes["zperiodic"] = "no"; } - - vlsvWriter.writeArray("MESH", xmlAttributes, globalIds.size(), 1, globalIds.data()); + std::map xmlAttributes; + const std::string meshName="fsgrid"; + xmlAttributes["mesh"] = meshName; + + //The visit plugin expects MESH_BBOX as a keyword. We only write one + //from the first rank. + std::array& globalSize = technicalGrid.getGlobalSize(); + std::array boundaryBox({globalSize[0], globalSize[1], globalSize[2], + 1,1,1}); + + if(technicalGrid.getRank() == 0) { + const unsigned int arraySize = 6; + const unsigned int vectorSize = 1; + vlsvWriter.writeArray("MESH_BBOX", xmlAttributes, arraySize, vectorSize, &boundaryBox[0]); + } else { + const unsigned int arraySize = 0; + const unsigned int vectorSize = 1; + vlsvWriter.writeArray("MESH_BBOX", xmlAttributes, arraySize, vectorSize, &boundaryBox); + } + + // Write three 1-dimensional arrays of node coordinates (x,y,z) for + // visit to create a cartesian grid out of. + std::vector xNodeCoordinates(globalSize[0]+1); + for(uint64_t i=0; i yNodeCoordinates(globalSize[1]+1); + for(uint64_t i=0; i zNodeCoordinates(globalSize[2]+1); + for(uint64_t i=0; i& localSize = technicalGrid.getLocalSize(); + std::vector globalIds(localSize[0]*localSize[1]*localSize[2]); + int i=0; + for(int z=0; z globalIndex = technicalGrid.getGlobalIndices(x,y,z); + globalIds[i++] = globalIndex[2]*globalSize[0]*globalSize[1]+ + globalIndex[1]*globalSize[0] + + globalIndex[0]; + } + } + } - return true; + + // writeDomainSizes + std::array meshDomainSize({globalIds.size(), 0}); + vlsvWriter.writeArray("MESH_DOMAIN_SIZES", xmlAttributes, 1, 2, &meshDomainSize[0]); + + // Finally, write mesh object itself. + xmlAttributes.clear(); + xmlAttributes["name"] = meshName; + xmlAttributes["type"] = vlsv::mesh::STRING_UCD_MULTI; + xmlAttributes["xperiodic"]=technicalGrid.getPeriodic()[0]?"yes":"no"; + xmlAttributes["yperiodic"]=technicalGrid.getPeriodic()[1]?"yes":"no"; + xmlAttributes["zperiodic"]=technicalGrid.getPeriodic()[2]?"yes":"no"; + + vlsvWriter.writeArray("MESH", xmlAttributes, globalIds.size(), 1, globalIds.data()); + + return true; } /** This function writes the velocity space. From 22a6f1bd9af88d91f04efacdefe8f733c3c5fa01 Mon Sep 17 00:00:00 2001 From: Urs Ganse Date: Wed, 8 May 2019 16:30:36 +0300 Subject: [PATCH 394/602] fg_b -> fg_B in parameter help message --- parameters.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/parameters.cpp b/parameters.cpp index d1f8d6569..eee03c0a3 100644 --- a/parameters.cpp +++ b/parameters.cpp @@ -218,7 +218,7 @@ bool Parameters::addParameters(){ // Output variable parameters // NOTE Do not remove the : before the list of variable names as this is parsed by tools/check_vlasiator_cfg.sh - Readparameters::addComposing("variables.output", "List of data reduction operators (DROs) to add to the grid file output. Each variable to be added has to be on a new line output = XXX. Available (20190508): B vg_B fg_b BackgroundB vg_BackgroundB fg_BackgroundB PerturbedB vg_PerturbedB fg_PerturbedB E vg_E fg_E Rhom vg_Rhom fg_Rhom Rhoq vg_Rhoq fg_Rhoq populations_Rho V vg_V fg_V populations_V populations_moments_Backstream populations_moments_NonBackstream populations_EffectiveSparsityThreshold populations_RhoLossAdjust LBweight MaxVdt MaxRdt populations_MaxVdt populations_MaxRdt MaxFieldsdt MPIrank vg_rank FsGridRank fg_rank FsGridBoundaryType BoundaryType vg_BoundaryType fg_BoundaryType BoundaryLayer vg_BoundaryLayer fg_BoundaryLayer populations_Blocks fSaved populations_accSubcycles VolE vg_VolE fg_VolE HallE GradPeE VolB vg_VolB fg_VolB BackgroundVolB PerturbedVolB Pressure vg_Pressure fg_Pressure populations_PTensor derivs BVOLderivs GridCoordinates Potential BackgroundVolE ChargeDensity PotentialError MeshData"); + Readparameters::addComposing("variables.output", "List of data reduction operators (DROs) to add to the grid file output. Each variable to be added has to be on a new line output = XXX. Available (20190508): B vg_B fg_B BackgroundB vg_BackgroundB fg_BackgroundB PerturbedB vg_PerturbedB fg_PerturbedB E vg_E fg_E Rhom vg_Rhom fg_Rhom Rhoq vg_Rhoq fg_Rhoq populations_Rho V vg_V fg_V populations_V populations_moments_Backstream populations_moments_NonBackstream populations_EffectiveSparsityThreshold populations_RhoLossAdjust LBweight MaxVdt MaxRdt populations_MaxVdt populations_MaxRdt MaxFieldsdt MPIrank vg_rank FsGridRank fg_rank FsGridBoundaryType BoundaryType vg_BoundaryType fg_BoundaryType BoundaryLayer vg_BoundaryLayer fg_BoundaryLayer populations_Blocks fSaved populations_accSubcycles VolE vg_VolE fg_VolE HallE GradPeE VolB vg_VolB fg_VolB BackgroundVolB PerturbedVolB Pressure vg_Pressure fg_Pressure populations_PTensor derivs BVOLderivs GridCoordinates Potential BackgroundVolE ChargeDensity PotentialError MeshData"); // NOTE Do not remove the : before the list of variable names as this is parsed by tools/check_vlasiator_cfg.sh Readparameters::addComposing("variables.diagnostic", "List of data reduction operators (DROs) to add to the diagnostic runtime output. Each variable to be added has to be on a new line diagnostic = XXX. Available (20190320): FluxB FluxE populations_Blocks Rhom populations_RhoLossAdjust LBweight populations_MaxVdt MaxVdt populations_MaxRdt MaxRdt MaxFieldsdt populations_MaxDistributionFunction populations_MinDistributionFunction"); From fba08eee73323c134f51f750dc346eececd50ad8 Mon Sep 17 00:00:00 2001 From: Markus Battarbee Date: Wed, 8 May 2019 16:39:09 +0300 Subject: [PATCH 395/602] added python scripts for analyzing and verifying different dipole models --- doc/vectordipole/fieldmodels.py | 529 ++++++++++++++++++ doc/vectordipole/fluxfunction.py | 229 ++++++++ .../vectorpotentialdipole_verify.py | 406 ++++++++++++++ 3 files changed, 1164 insertions(+) create mode 100644 doc/vectordipole/fieldmodels.py create mode 100644 doc/vectordipole/fluxfunction.py create mode 100644 doc/vectordipole/vectorpotentialdipole_verify.py diff --git a/doc/vectordipole/fieldmodels.py b/doc/vectordipole/fieldmodels.py new file mode 100644 index 000000000..e58a3fafb --- /dev/null +++ b/doc/vectordipole/fieldmodels.py @@ -0,0 +1,529 @@ +#!/usr/bin/env python import matplotlib.pyplot as plt + +# /* +# * This file is part of Vlasiator. +# * Copyright 2010-2016 Finnish Meteorological Institute +# * Copyright 2017-2019 University of Helsinki +# * +# * For details of usage, see the COPYING file and read the "Rules of the Road" +# * at http://www.physics.helsinki.fi/vlasiator/ +# * +# * This program is free software; you can redistribute it and/or modify +# * it under the terms of the GNU General Public License as published by +# * the Free Software Foundation; either version 2 of the License, or +# * (at your option) any later version. +# * +# * This program is distributed in the hope that it will be useful, +# * but WITHOUT ANY WARRANTY; without even the implied warranty of +# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# * GNU General Public License for more details. +# * +# * You should have received a copy of the GNU General Public License along +# * with this program; if not, write to the Free Software Foundation, Inc., +# * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# */ +import numpy as np +import math + +''' Testing routine for different dipole formulations + Call this module from other testing / plotting routines + +''' + +RE=6371000. +class dipole(object): + ''' Class generating dipole fields + ''' + + moment_base = 8.e15 + + #RE=6371000. + minimumR=1e-3*RE + + def __init__(self, centerx, centery, centerz, tilt_phi, tilt_theta, mult=1.0, radius_f=None, radius_z=None): + self.radius = np.zeros(2)# // Radial extents of full and zero dipole + + self.q = np.zeros(3)# // Dipole moment# set to (0,0,moment) for z-aligned + self.center = np.zeros(3)# // Coordinates where the dipole sits# set to (0,0,0) + + self.center[0]=centerx + self.center[1]=centery + self.center[2]=centerz + self.tilt_angle_phi = tilt_phi * math.pi/180. + self.tilt_angle_theta = tilt_theta * math.pi/180. + self.moment = mult*self.moment_base + self.q[0]=-np.sin(self.tilt_angle_phi)*np.cos(self.tilt_angle_theta)*self.moment + self.q[1]=-np.sin(self.tilt_angle_phi)*np.sin(self.tilt_angle_theta)*self.moment + self.q[2]=-np.cos(self.tilt_angle_phi)*self.moment + + if radius_f is not None: + self.radius[0]=radius_f*RE + else: + self.radius[0]=10.*RE + if radius_z is not None: + self.radius[1]=radius_z*RE + else: + self.radius[1]=40.*RE + + def set_dipole(self, centerx, centery, centerz, tilt_phi, tilt_theta, mult=1.0, radius_f=None, radius_z=None): + self.center[0]=centerx + self.center[1]=centery + self.center[2]=centerz + self.tilt_angle_phi = tilt_phi * math.pi/180. + self.tilt_angle_theta = tilt_theta * math.pi/180. + self.moment = mult*self.moment_base + self.q[0]=-np.sin(self.tilt_angle_phi)*np.cos(self.tilt_angle_theta)*self.moment + self.q[1]=-np.sin(self.tilt_angle_phi)*np.sin(self.tilt_angle_theta)*self.moment + self.q[2]=-np.cos(self.tilt_angle_phi)*self.moment + if radius_f is not None: + self.radius[0]=radius_f*RE + if radius_z is not None: + self.radius[1]=radius_z*RE + + def get_old(self, x,y,z,derivative,fComponent,dComponent): + r = np.zeros(3) + r[0]= x-self.center[0] + r[1]= y-self.center[1] + r[2]= z-self.center[2] + r2 = r[0]*r[0]+r[1]*r[1]+r[2]*r[2] + if(r2=self.radius[1]*self.radius[1]): + return 0.0# #set zero field and derivatives outside "zero radius" + + # /* This function is called from within other calls, one component at a time. + # The component in question is defined using the fComponent index. If a derivative + # is requested, the direction of the derivative is defined using dComponent. */ + + r1 = np.sqrt(r2) + r5 = (r2*r2*r1) + rdotq=self.q[0]*r[0] + self.q[1]*r[1] +self.q[2]*r[2] + B=( 3*r[fComponent]*rdotq-self.q[fComponent]*r2)/r5 + + if(derivative == 0) and (r1 <= self.radius[0]): + # Full dipole field within full radius + return B + + if(derivative == 1) and (r1 <= self.radius[0]): + #first derivatives of full field + if(dComponent==fComponent): + sameComponent=1 + else: + sameComponent=0 + + # /* Confirmed Battarbee 26.04.2019: This is the correct + # 3D dipole derivative. */ + return -5*B*r[dComponent]/r2+(3*self.q[dComponent]*r[fComponent] - 2*self.q[fComponent]*r[dComponent] + 3*rdotq*sameComponent)/r5 + + # /* Within transition range (between "full radius" and "zero radius"), use + # a vector potential scaled with the smootherstep function. Calculated + # and coded by Markus Battarbee, 30.04.2019 */ + + # Calculate vector potential within transition range + A=np.zeros(3) + A[0] = (self.q[1]*r[2]-self.q[2]*r[1]) / (r2*r1)# + A[1] = (self.q[2]*r[0]-self.q[0]*r[2]) / (r2*r1)# + A[2] = (self.q[0]*r[1]-self.q[1]*r[0]) / (r2*r1)# + # Coordinate within smootherstep function + Sx = -(r1-self.radius[1])/(self.radius[1]-self.radius[0]) + Sx2 = Sx*Sx + # Smootherstep and its radial derivative + S2 = 6.*Sx2*Sx2*Sx - 15.*Sx2*Sx2 + 10.*Sx2*Sx + dS2dr = -(30.*Sx2*Sx2 - 60.*Sx2*Sx + 30.*Sx2)/(self.radius[1]-self.radius[0]) + + # Alternatively, smoothstep (does not look good!) + #S2 = 3.*Sx2 - 2.*Sx2*Sx + #dS2dr = -(6.*Sx - 6.*Sx2)/(radius[1]-radius[0]) + + #print("r",r1,"Sx",Sx,"S2",S2) + + # Cartesian derivatives of S2 + dS2cart=np.zeros(3) + dS2cart[0] = (r[0]/r1)*dS2dr + dS2cart[1] = (r[1]/r1)*dS2dr + dS2cart[2] = (r[2]/r1)*dS2dr# + #print("r",r1,"S2",S2,"dSdx",dS2cart[0],"dSdy",dS2cart[1],"dSdz",dS2cart[2]) + + if(derivative == 0) and (r1 > self.radius[0]): + # /* Within transition range (between radius[0] and radius[1]) we + # multiply the magnetic field with the S2 smootherstep function + # and add an additional corrective term to remove divergence. This + # is based on using the dipole field vector potential and scaling + # it using the smootherstep function S2. + + # Notation: + # q = dipole moment (vector) + # r = position vector + # R = position distance + + # The regular dipole field vector potential + # A(r) = (mu0/4 pi R^3) * (q cross r) + + # The smootherstep function + # ( 0, Sx<=0 + # S2(Sx) = ( 6 Sx^5 -15 Sx^4 +10 Sx^3, 0<=Sx<=1 + # ( 1, Sx>=1 + + # Radial distance scaling for S2 + # Sx = -(R-radius[1])/(radius[1]-radius[0]) + + # The scaled vector potential is A'(r) = A(r)*S2(Sx) + + # The scaled magnetic field is + # B'(r) = del cross A'(r) + # =(NRL)= S2(Sx) del cross A(r) + del S2(Sx) cross A(r) + # = S2(Sx) B(r) + del S2(Sx) cross A(r) + + # */ + delS2crossA=np.zeros(3) + delS2crossA[0] = dS2cart[1]*A[2] - dS2cart[2]*A[1] + delS2crossA[1] = dS2cart[2]*A[0] - dS2cart[0]*A[2] + delS2crossA[2] = dS2cart[0]*A[1] - dS2cart[1]*A[0] + + return S2*B + delS2crossA[fComponent] + + elif(derivative == 1) and (r1 > self.radius[0]): + # /* first derivatives of field calculated from diminishing vector potential + + # del B'(r) = S2(Sx) del B(r) + B(r) del S2(Sx) + del (del S2(Sx) cross A(r)) + + # component-wise: + + # del Bx = S2(Sx) del Bx + del S2(Sx) Bx + del(del S2(Sx) cross A)@i=x + # del By = S2(Sx) del By + del S2(Sx) By + del(del S2(Sx) cross A)@i=y + # del Bz = S2(Sx) del Bz + del S2(Sx) Bz + del(del S2(Sx) cross A)@i=z + + # where + + # del(del S2(Sx) cross A)@i=x = del (dS2/dy Az - dS/dz Ay) + # = del(dS/dy) Az + dS/dy del Az - del(DS/dz) Ay - dS/dz del Ay + + # del(del S2(Sx) cross A)@i=y = del (dS2/dz Ax - dS/dx Az) + # = del(dS/dz) Ax + dS/dz del Ax - del(DS/dx) Az - dS/dx del Az + + # del(del S2(Sx) cross A)@i=z = del (dS2/dx Ay - dS/dy Ax) + # = del(dS/dx) Ay + dS/dx del Ay - del(DS/dy) Ax - dS/dy del Ax + + + # **********/ + + if(dComponent==fComponent): + sameComponent=1 + else: + sameComponent=0 + + # Regular derivative of B + delB = -5*B*r[dComponent]/r2 + (3*self.q[dComponent]*r[fComponent] - 2*self.q[fComponent]*r[dComponent] + 3*rdotq*sameComponent)/r5 + + # Calculate del Ax, del Ay, del Az + delAx=np.zeros(3) + delAy=np.zeros(3) + delAz=np.zeros(3) + delAx[0] = (-3./(r2*r2*r1))*(self.q[1]*r[2]-self.q[2]*r[1])*r[0] + delAx[1] = (-3./(r2*r2*r1))*(self.q[1]*r[2]-self.q[2]*r[1])*r[1] -self.q[2]/(r2*r1) + delAx[2] = (-3./(r2*r2*r1))*(self.q[1]*r[2]-self.q[2]*r[1])*r[2] +self.q[1]/(r2*r1) + delAy[0] = (-3./(r2*r2*r1))*(self.q[2]*r[0]-self.q[0]*r[2])*r[0] +self.q[2]/(r2*r1) + delAy[1] = (-3./(r2*r2*r1))*(self.q[2]*r[0]-self.q[0]*r[2])*r[1] + delAy[2] = (-3./(r2*r2*r1))*(self.q[2]*r[0]-self.q[0]*r[2])*r[2] -self.q[0]/(r2*r1) + delAz[0] = (-3./(r2*r2*r1))*(self.q[0]*r[1]-self.q[1]*r[0])*r[0] -self.q[1]/(r2*r1) + delAz[1] = (-3./(r2*r2*r1))*(self.q[0]*r[1]-self.q[1]*r[0])*r[1] +self.q[0]/(r2*r1) + delAz[2] = (-3./(r2*r2*r1))*(self.q[0]*r[1]-self.q[1]*r[0])*r[2] + + ddidS2dr = 60.*(2.*Sx2*Sx - 3.*Sx2 + Sx)/(r2*(self.radius[1]-self.radius[0])*(self.radius[1]-self.radius[0])) + + # Calculate del (dS2/dx), del (dS2/dy), del (dS2/dz) + deldS2dx=np.zeros(3) + deldS2dy=np.zeros(3) + deldS2dz=np.zeros(3) + deldS2dx[0] = ddidS2dr*r[0]*r[0] -(r[0]/(r2*r1))*dS2dr*r[0] + dS2dr/r1 + deldS2dx[1] = ddidS2dr*r[0]*r[1] -(r[0]/(r2*r1))*dS2dr*r[1] + deldS2dx[2] = ddidS2dr*r[0]*r[2] -(r[0]/(r2*r1))*dS2dr*r[2] + deldS2dy[0] = ddidS2dr*r[1]*r[0] -(r[1]/(r2*r1))*dS2dr*r[0] + deldS2dy[1] = ddidS2dr*r[1]*r[1] -(r[1]/(r2*r1))*dS2dr*r[1] + dS2dr/r1 + deldS2dy[2] = ddidS2dr*r[1]*r[2] -(r[1]/(r2*r1))*dS2dr*r[2] + deldS2dz[0] = ddidS2dr*r[2]*r[0] -(r[2]/(r2*r1))*dS2dr*r[0] + deldS2dz[1] = ddidS2dr*r[2]*r[1] -(r[2]/(r2*r1))*dS2dr*r[1] + deldS2dz[2] = ddidS2dr*r[2]*r[2] -(r[2]/(r2*r1))*dS2dr*r[2] + dS2dr/r1 + + # Calculate del(del S2(Sx) cross A)@i=x, del(del S2(Sx) cross A)@i=y, del(del S2(Sx) cross A)@i=z + ddS2crossA=np.zeros([3,3]) + # derivatives of X-directional field + ddS2crossA[0][0] = deldS2dy[0]*A[2] + dS2cart[1]*delAz[0] - deldS2dz[0]*A[1] - dS2cart[2]*delAy[0] + ddS2crossA[0][1] = deldS2dy[1]*A[2] + dS2cart[1]*delAz[1] - deldS2dz[1]*A[1] - dS2cart[2]*delAy[1] + ddS2crossA[0][2] = deldS2dy[2]*A[2] + dS2cart[1]*delAz[2] - deldS2dz[2]*A[1] - dS2cart[2]*delAy[2] + # derivatives of Y-directional field + ddS2crossA[1][0] = deldS2dz[0]*A[0] + dS2cart[2]*delAx[0] - deldS2dx[0]*A[2] - dS2cart[0]*delAz[0] + ddS2crossA[1][1] = deldS2dz[1]*A[0] + dS2cart[2]*delAx[1] - deldS2dx[1]*A[2] - dS2cart[0]*delAz[1] + ddS2crossA[1][2] = deldS2dz[2]*A[0] + dS2cart[2]*delAx[2] - deldS2dx[2]*A[2] - dS2cart[0]*delAz[2] + # derivatives of Z-directional field + ddS2crossA[2][0] = deldS2dx[0]*A[1] + dS2cart[0]*delAy[0] - deldS2dy[0]*A[0] - dS2cart[1]*delAx[0] + ddS2crossA[2][1] = deldS2dx[1]*A[1] + dS2cart[0]*delAy[1] - deldS2dy[1]*A[0] - dS2cart[1]*delAx[1] + ddS2crossA[2][2] = deldS2dx[2]*A[1] + dS2cart[0]*delAy[2] - deldS2dy[2]*A[0] - dS2cart[1]*delAx[2] + + return S2*delB + dS2cart[dComponent]*B + ddS2crossA[fComponent][dComponent] + + print("ERROR") + return 0 # dummy, but prevents gcc from yelling + + def getX(self, x,y,z,derivative,fComponent,dComponent): + r = np.zeros(3) + r[0]= x-self.center[0] + r[1]= y-self.center[1] + r[2]= z-self.center[2] + + r2 = r[0]*r[0]+r[1]*r[1]+r[2]*r[2] + + if(r2=self.radius[1]): + return 0.0# #set zero field and derivatives outside "zero radius" + + # /* This function is called from within other calls, one component at a time. + # The component in question is defined using the fComponent index. If a derivative + # is requested, the direction of the derivative is defined using dComponent. */ + + r1 = np.sqrt(r2) + r5 = (r2*r2*r1) + rdotq=self.q[0]*r[0] + self.q[1]*r[1] +self.q[2]*r[2] + B=( 3*r[fComponent]*rdotq-self.q[fComponent]*r2)/r5 + + if(derivative == 0) and (x <= self.radius[0]): + # Full dipole field within full radius + return B + + if(derivative == 1) and (x <= self.radius[0]): + #first derivatives of full field + if(dComponent==fComponent): + sameComponent=1 + else: + sameComponent=0 + + # /* Confirmed Battarbee 26.04.2019: This is the correct + # 3D dipole derivative. */ + return -5*B*r[dComponent]/r2+(3*self.q[dComponent]*r[fComponent] - 2*self.q[fComponent]*r[dComponent] + 3*rdotq*sameComponent)/r5 + + # /* Within transition range (between "full radius" and "zero radius"), use + # a vector potential scaled with the smootherstep function. Calculated + # and coded by Markus Battarbee, 30.04.2019 */ + + # Calculate vector potential within transition range + A=np.zeros(3) + A[0] = (self.q[1]*r[2]-self.q[2]*r[1]) / (r2*r1)# + A[1] = (self.q[2]*r[0]-self.q[0]*r[2]) / (r2*r1)# + A[2] = (self.q[0]*r[1]-self.q[1]*r[0]) / (r2*r1)# + # Coordinate within smootherstep function + Sx = -(x-self.radius[1])/(self.radius[1]-self.radius[0]) + Sx2 = Sx*Sx + # Smootherstep and its radial derivative + S2 = 6.*Sx2*Sx2*Sx - 15.*Sx2*Sx2 + 10.*Sx2*Sx + dS2dr = -(30.*Sx2*Sx2 - 60.*Sx2*Sx + 30.*Sx2)/(self.radius[1]-self.radius[0]) + + # Alternatively, smoothstep (does not look good!) + #S2 = 3.*Sx2 - 2.*Sx2*Sx + #dS2dr = -(6.*Sx - 6.*Sx2)/(radius[1]-radius[0]) + + #print("r",r1,"Sx",Sx,"S2",S2) + + # Cartesian derivatives of S2 + dS2cart=np.zeros(3) + dS2cart[0] = dS2dr #(r[0]/r1)*dS2dr + dS2cart[1] = 0.#(r[1]/r1)*dS2dr + dS2cart[2] = 0.#(r[2]/r1)*dS2dr# + #print("r",r1,"S2",S2,"dSdx",dS2cart[0],"dSdy",dS2cart[1],"dSdz",dS2cart[2]) + + if(derivative == 0) and (x > self.radius[0]): + # /* Within transition range (between radius[0] and radius[1]) we + # multiply the magnetic field with the S2 smootherstep function + # and add an additional corrective term to remove divergence. This + # is based on using the dipole field vector potential and scaling + # it using the smootherstep function S2. + + # Notation: + # q = dipole moment (vector) + # r = position vector + # R = position distance + + # The regular dipole field vector potential + # A(r) = (mu0/4 pi R^3) * (q cross r) + + # The smootherstep function + # ( 0, Sx<=0 + # S2(Sx) = ( 6 Sx^5 -15 Sx^4 +10 Sx^3, 0<=Sx<=1 + # ( 1, Sx>=1 + + # Radial distance scaling for S2 + # Sx = -(R-radius[1])/(radius[1]-radius[0]) + + # The scaled vector potential is A'(r) = A(r)*S2(Sx) + + # The scaled magnetic field is + # B'(r) = del cross A'(r) + # =(NRL)= S2(Sx) del cross A(r) + del S2(Sx) cross A(r) + # = S2(Sx) B(r) + del S2(Sx) cross A(r) + + # */ + delS2crossA=np.zeros(3) + delS2crossA[0] = 0.#dS2cart[1]*A[2] - dS2cart[2]*A[1] + delS2crossA[1] = - dS2cart[0]*A[2] #dS2cart[2]*A[0] - dS2cart[0]*A[2] + delS2crossA[2] = dS2cart[0]*A[1] #- dS2cart[1]*A[0] + + return S2*B + delS2crossA[fComponent] + + elif(derivative == 1) and (x > self.radius[0]): + # /* first derivatives of field calculated from diminishing vector potential + + # del B'(r) = S2(Sx) del B(r) + B(r) del S2(Sx) + del (del S2(Sx) cross A(r)) + + # component-wise: + + # del Bx = S2(Sx) del Bx + del S2(Sx) Bx + del(del S2(Sx) cross A)@i=x + # del By = S2(Sx) del By + del S2(Sx) By + del(del S2(Sx) cross A)@i=y + # del Bz = S2(Sx) del Bz + del S2(Sx) Bz + del(del S2(Sx) cross A)@i=z + + # where + + # del(del S2(Sx) cross A)@i=x = del (dS2/dy Az - dS/dz Ay) + # = del(dS/dy) Az + dS/dy del Az - del(DS/dz) Ay - dS/dz del Ay + + # del(del S2(Sx) cross A)@i=y = del (dS2/dz Ax - dS/dx Az) + # = del(dS/dz) Ax + dS/dz del Ax - del(DS/dx) Az - dS/dx del Az + + # del(del S2(Sx) cross A)@i=z = del (dS2/dx Ay - dS/dy Ax) + # = del(dS/dx) Ay + dS/dx del Ay - del(DS/dy) Ax - dS/dy del Ax + + + # **********/ + + if(dComponent==fComponent): + sameComponent=1 + else: + sameComponent=0 + + # Regular derivative of B + delB = -5*B*r[dComponent]/r2 + (3*self.q[dComponent]*r[fComponent] - 2*self.q[fComponent]*r[dComponent] + 3*rdotq*sameComponent)/r5 + + # Calculate del Ax, del Ay, del Az + delAx=np.zeros(3) + delAy=np.zeros(3) + delAz=np.zeros(3) + # delAx[0] = (-3./(r2*r2*r1))*(self.q[1]*r[2]-self.q[2]*r[1])*r[0] + # delAx[1] = (-3./(r2*r2*r1))*(self.q[1]*r[2]-self.q[2]*r[1])*r[1] -self.q[2]/(r2*r1) + # delAx[2] = (-3./(r2*r2*r1))*(self.q[1]*r[2]-self.q[2]*r[1])*r[2] +self.q[1]/(r2*r1) + delAy[0] = (-3./(r2*r2*r1))*(self.q[2]*r[0]-self.q[0]*r[2])*r[0] +self.q[2]/(r2*r1) + delAy[1] = (-3./(r2*r2*r1))*(self.q[2]*r[0]-self.q[0]*r[2])*r[1] + delAy[2] = (-3./(r2*r2*r1))*(self.q[2]*r[0]-self.q[0]*r[2])*r[2] -self.q[0]/(r2*r1) + delAz[0] = (-3./(r2*r2*r1))*(self.q[0]*r[1]-self.q[1]*r[0])*r[0] -self.q[1]/(r2*r1) + delAz[1] = (-3./(r2*r2*r1))*(self.q[0]*r[1]-self.q[1]*r[0])*r[1] +self.q[0]/(r2*r1) + delAz[2] = (-3./(r2*r2*r1))*(self.q[0]*r[1]-self.q[1]*r[0])*r[2] + + #ddidS2dr = 60.*(2.*Sx2*Sx - 3.*Sx2 + Sx)/(r2*(radius[1]-radius[0])*(radius[1]-radius[0])) + ddxdS2dx = 60.*(2.*Sx2*Sx - 3.*Sx2 + Sx)/((self.radius[1]-self.radius[0])*(self.radius[1]-self.radius[0])) + + # Calculate del (dS2/dx), del (dS2/dy), del (dS2/dz) + deldS2dx=np.zeros(3) + deldS2dy=np.zeros(3) + deldS2dz=np.zeros(3) + deldS2dx[0] = ddxdS2dx + deldS2dx[1] = 0. + deldS2dx[2] = 0. + # deldS2dx[0] = ddxdS2dr*r[0]*r[0] -(r[0]/(r2*r1))*dS2dr*r[0] + dS2dr/r1 + # deldS2dx[1] = ddxdS2dr*r[0]*r[1] -(r[0]/(r2*r1))*dS2dr*r[1] + # deldS2dx[2] = ddxdS2dr*r[0]*r[2] -(r[0]/(r2*r1))*dS2dr*r[2] + # deldS2dy[0] = ddidS2dr*r[1]*r[0] -(r[1]/(r2*r1))*dS2dr*r[0] + # deldS2dy[1] = ddidS2dr*r[1]*r[1] -(r[1]/(r2*r1))*dS2dr*r[1] + dS2dr/r1 + # deldS2dy[2] = ddidS2dr*r[1]*r[2] -(r[1]/(r2*r1))*dS2dr*r[2] + # deldS2dz[0] = ddidS2dr*r[2]*r[0] -(r[2]/(r2*r1))*dS2dr*r[0] + # deldS2dz[1] = ddidS2dr*r[2]*r[1] -(r[2]/(r2*r1))*dS2dr*r[1] + # deldS2dz[2] = ddidS2dr*r[2]*r[2] -(r[2]/(r2*r1))*dS2dr*r[2] + dS2dr/r1 + + # Calculate del(del S2(Sx) cross A)@i=x, del(del S2(Sx) cross A)@i=y, del(del S2(Sx) cross A)@i=z + ddS2crossA=np.zeros([3,3]) + # derivatives of X-directional field + ddS2crossA[0][0] = 0.#deldS2dy[0]*A[2] + dS2cart[1]*delAz[0] - deldS2dz[0]*A[1] - dS2cart[2]*delAy[0] + ddS2crossA[0][1] = 0.#deldS2dy[1]*A[2] + dS2cart[1]*delAz[1] - deldS2dz[1]*A[1] - dS2cart[2]*delAy[1] + ddS2crossA[0][2] = 0.#deldS2dy[2]*A[2] + dS2cart[1]*delAz[2] - deldS2dz[2]*A[1] - dS2cart[2]*delAy[2] + # derivatives of Y-directional field + ddS2crossA[1][0] = - deldS2dx[0]*A[2] - dS2cart[0]*delAz[0] #deldS2dz[0]*A[0] + dS2cart[2]*delAx[0] + ddS2crossA[1][1] = - deldS2dx[1]*A[2] - dS2cart[0]*delAz[1] #deldS2dz[1]*A[0] + dS2cart[2]*delAx[1] + ddS2crossA[1][2] = - deldS2dx[2]*A[2] - dS2cart[0]*delAz[2] #deldS2dz[2]*A[0] + dS2cart[2]*delAx[2] + # derivatives of Z-directional field + ddS2crossA[2][0] = deldS2dx[0]*A[1] + dS2cart[0]*delAy[0] #- deldS2dy[0]*A[0] - dS2cart[1]*delAx[0] + ddS2crossA[2][1] = deldS2dx[1]*A[1] + dS2cart[0]*delAy[1] #- deldS2dy[1]*A[0] - dS2cart[1]*delAx[1] + ddS2crossA[2][2] = deldS2dx[2]*A[1] + dS2cart[0]*delAy[2] #- deldS2dy[2]*A[0] - dS2cart[1]*delAx[2] + + return S2*delB + dS2cart[dComponent]*B + ddS2crossA[fComponent][dComponent] + + print("ERROR") + return 0 # dummy, but prevents gcc from yelling + + diff --git a/doc/vectordipole/fluxfunction.py b/doc/vectordipole/fluxfunction.py new file mode 100644 index 000000000..37fc5ad37 --- /dev/null +++ b/doc/vectordipole/fluxfunction.py @@ -0,0 +1,229 @@ +# /* +# * This file is part of Vlasiator. +# * Copyright 2010-2016 Finnish Meteorological Institute +# * +# * For details of usage, see the COPYING file and read the "Rules of the Road" +# * at http://www.physics.helsinki.fi/vlasiator/ +# * +# * This program is free software; you can redistribute it and/or modify +# * it under the terms of the GNU General Public License as published by +# * the Free Software Foundation; either version 2 of the License, or +# * (at your option) any later version. +# * +# * This program is distributed in the hope that it will be useful, +# * but WITHOUT ANY WARRANTY; without even the implied warranty of +# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# * GNU General Public License for more details. +# * +# * You should have received a copy of the GNU General Public License along +# * with this program; if not, write to the Free Software Foundation, Inc., +# * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# */ +import numpy as np + +# Calculate fluxfunction by integrating along -z boundary first, +# and then going along z-direction. +def polar_computeFluxUp(BX,BY,BZ, dxdydz): + # Create fluxfunction-field to be the same shape as B + flux = np.zeros_like(BX) + sizes = np.array(BX.shape) + tmp_flux=0. + + # First fill the z=0 cells + for x in np.arange(sizes[0]-1,-1,-1): + tmp_flux -= BZ[x,0,0] * dxdydz[0] + flux[x,0,0] = tmp_flux + + # Now, for each row, integrate in z-direction. + for x in np.arange(sizes[0]): + tmp_flux = flux[x,0,0] + for z in np.arange(1,sizes[2]): + tmp_flux -= BX[x,0,z]*dxdydz[2] + flux[x,0,z] = tmp_flux + return flux + + # Calculate fluxfunction by integrating along +z boundary first, + # and then going along negative z-direction. +def polar_computeFluxDown(BX,BY,BZ,dxdydz): + # Create fluxfunction-field to be the same shape as B + flux = np.zeros_like(BX) + sizes = np.array(BX.shape) + tmp_flux=0. + + # Calculate flux-difference between bottom and top edge + # of +x boundary (so that values are consistent with computeFluxUp) + for z in np.arange(sizes[2]): + tmp_flux -= BX[sizes[0]-1,0,z]*dxdydz[2] + + # First, fill the z=max - 1 cells + for x in np.arange(sizes[0]-1,-1,-1): + tmp_flux -= BZ[x,0,sizes[2]-1] * dxdydz[0] + flux[x,0,sizes[2]-1] = tmp_flux + + # Now, for each row, integrate in -z-direction. + for x in np.arange(sizes[0]): + tmp_flux = flux[x,0,sizes[2]-1] + for z in np.arange(sizes[2]-1,0,-1): + tmp_flux += BX[x,0,z]*dxdydz[2] + flux[x,0,z] = tmp_flux + return flux + + +# Calculate fluxfunction by integrating along -x from the right boundary +def polar_computeFluxLeft(BX,BY,BZ,dxdydz): + # Create fluxfunction-field to be the same shape as B + flux = np.zeros_like(BX) + sizes = np.array(BX.shape) + tmp_flux=0. + bottom_right_flux=0. + + # First calculate flux difference to bottom right corner + # Now, for each row, integrate in -z-direction. + for z in np.arange(0,sizes[2]): + bottom_right_flux -= BX[sizes[0]-1,0,z] * dxdydz[2] + + tmp_flux = bottom_right_flux + for x in np.arange(sizes[0]-1,-1,-1): + tmp_flux -= BZ[x,0,z] * dxdydz[0] + flux[x,0,z] = tmp_flux + + return flux + +# namespace Equatorialplane { +# # Calculate fluxfunction by integrating along -y boundary first, +# # and then going along y-direction. +# std::vector computeFluxUp(Field& B) { +# # Create fluxfunction-field to be the same shape as B +# std::vector flux(B.dimension[0]->cells * B.dimension[1]->cells * B.dimension[2]->cells); + +# long double tmp_flux=0.; + +# # First, fill the y=3 cells +# for(int x=B.dimension[0]->cells-2; x>0; x--) { +# Vec3d bval = B.getCell(x,3,0); + +# tmp_flux -= bval[1] * B.dx[0]; +# flux[B.dimension[0]->cells * 3 + x] = tmp_flux; +# } + +# # Now, for each row, integrate in y-direction. +# for(int x=1; x< B.dimension[0]->cells-1; x++) { + +# tmp_flux = flux[B.dimension[0]->cells * 3 + x]; +# for(int y=4; y< B.dimension[1]->cells; y++) { +# Vec3d bval = B.getCell(x,y,0); + +# tmp_flux -= bval[0]*B.dx[1]; +# flux[B.dimension[0]->cells * y + x] = tmp_flux; +# } +# } + +# return flux; +# } + + + +# # Calculate fluxfunction by integrating along +y boundary first, +# # and then going along negative y-direction. +# std::vector computeFluxDown(Field& B) { +# # Create fluxfunction-field to be the same shape as B +# std::vector flux(B.dimension[0]->cells * B.dimension[1]->cells * B.dimension[2]->cells); + +# long double tmp_flux=0.; + +# # Calculate flux-difference between bottom and top edge +# # of +x boundary (so that values are consistent with computeFluxUp) +# for(int y=3; ycells-4; y++) { +# Vec3d bval = B.getCell(B.dimension[0]->cells-2,y,0); + +# tmp_flux -= bval[0]*B.dx[1]; +# } + +# # First, fill the y=max - 4 cells +# for(int x=B.dimension[0]->cells-2; x>0; x--) { +# Vec3d bval = B.getCell(x,B.dimension[1]->cells-4,0); + +# tmp_flux -= bval[1] * B.dx[0]; +# flux[B.dimension[0]->cells * (B.dimension[1]->cells - 4) + x] = tmp_flux; +# } + +# # Now, for each row, integrate in -y-direction. +# for(int x=1; x< B.dimension[0]->cells-1; x++) { + +# tmp_flux = flux[B.dimension[0]->cells * (B.dimension[1]->cells - 4) + x]; +# for(int y=B.dimension[1]->cells-5; y > 0; y--) { +# Vec3d bval = B.getCell(x,y,0); + +# tmp_flux += bval[0] * B.dx[1]; +# flux[B.dimension[0]->cells * y + x] = tmp_flux; +# } +# } + +# return flux; +# } + + + +# # Calculate fluxfunction by integrating along -x from the right boundary +# std::vector computeFluxLeft(Field& B) { +# # Create fluxfunction-field to be the same shape as B +# std::vector flux(B.dimension[0]->cells * B.dimension[1]->cells * B.dimension[2]->cells); + +# long double tmp_flux=0.; +# long double bottom_right_flux=0.; + +# # Now, for each row, integrate in -y-direction. +# for(int y=0; y < B.dimension[1]->cells; y++) { +# Vec3d bval = B.getCell(B.dimension[0]->cells-1,y,0); +# bottom_right_flux -= bval[0] * B.dx[1]; +# tmp_flux = bottom_right_flux; +# for(int x=B.dimension[0]->cells-1; x>0; x--) { + +# bval = B.getCell(x,y,0); + +# tmp_flux -= bval[1] * B.dx[0]; +# flux[B.dimension[0]->cells * y + x] = tmp_flux; +# } +# } + +# return flux; +# } + +# } + + +# Get a median of 3 values (branch-free!) +# static double median3(double a, double b, double c) { +# return max(min(a,b), min(max(a,b),c)); +# } +def median3(a, b, c): + return max(min(a,b), min(max(a,b),c)) + + +def calculate(BX,BY,BZ, dxdydz, dir=None): + sizes = np.array(BX.shape) + + if True: # polar plane + fluxUp = polar_computeFluxUp(BX,BY,BZ,dxdydz) + fluxDown = polar_computeFluxDown(BX,BY,BZ,dxdydz) + fluxLeft = polar_computeFluxLeft(BX,BY,BZ,dxdydz) + # else: + # fluxUp = Equatorialplane::computeFluxUp(B) + # fluxDown = Equatorialplane::computeFluxDown(B) + # fluxLeft = Equatorialplane::computeFluxLeft(B) + + for x in np.arange(sizes[0]): + for y in np.arange(sizes[1]): + for z in np.arange(sizes[2]): + a = fluxUp[x,y,z] + b = fluxDown[x,y,z] + c = fluxLeft[x,y,z] + if dir==None: + fluxUp[x,y,z] = median3(a,b,c) + elif dir=="down": + fluxUp[x,y,z] = b + elif dir=="left": + fluxUp[x,y,z] = c + + + return fluxUp diff --git a/doc/vectordipole/vectorpotentialdipole_verify.py b/doc/vectordipole/vectorpotentialdipole_verify.py new file mode 100644 index 000000000..3b21c5c98 --- /dev/null +++ b/doc/vectordipole/vectorpotentialdipole_verify.py @@ -0,0 +1,406 @@ +#!/usr/bin/env python import matplotlib.pyplot as plt + +# /* +# * This file is part of Vlasiator. +# * Copyright 2010-2016 Finnish Meteorological Institute +# * Copyright 2017-2019 University of Helsinki +# * +# * For details of usage, see the COPYING file and read the "Rules of the Road" +# * at http://www.physics.helsinki.fi/vlasiator/ +# * +# * This program is free software; you can redistribute it and/or modify +# * it under the terms of the GNU General Public License as published by +# * the Free Software Foundation; either version 2 of the License, or +# * (at your option) any later version. +# * +# * This program is distributed in the hope that it will be useful, +# * but WITHOUT ANY WARRANTY; without even the implied warranty of +# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# * GNU General Public License for more details. +# * +# * You should have received a copy of the GNU General Public License along +# * with this program; if not, write to the Free Software Foundation, Inc., +# * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# */ +import numpy as np +import math +import sys,os +import pytools as pt +import matplotlib.pyplot as plt +import fieldmodels + +''' Testing routine for different dipole formulations + + run using "python vectorpotentialdipole_verify.py [arg1] [arg2]" where arg1 is a number from 0 to 4 + for different test profile starting positions, directions, and dipole tilts. + + If arg2 is present, the code also calculates verification of derivative terms. + +''' + +if len(sys.argv)!=1: + testset = int(sys.argv[1]) +else: + testset = 0 + +if len(sys.argv)!=2: + calcderivatives=True +else: + calcderivatives=False + +plt.switch_backend('Agg') + +outfilename = "./vecpotdip_verify_"+str(testset)+".pdf" + +RE=6371000. +#RE=1 +epsilon=1.e-15 + +if testset==0: + tilt_angle_phi = 0. + tilt_angle_theta = 0. + line_theta = 0. + line_start = np.array([0,0,0]) +elif testset==1: + tilt_angle_phi = 0. + tilt_angle_theta = 0. + line_theta = 45. + line_start = np.array([0,0,0]) +elif testset==2: + tilt_angle_phi = 0. + tilt_angle_theta = 0. + line_theta = 0. + line_start = np.array([-3,-3,-3]) +elif testset==3: + tilt_angle_phi = 0. + tilt_angle_theta = 0. + line_theta = 45. + line_start = np.array([-3,-3,-3]) +elif testset==4: + tilt_angle_phi = 5. + tilt_angle_theta = 45. + line_theta = 0. + line_start = np.array([0,0,0]) +else: # Same as 0 + print("Default") + tilt_angle_phi = 0. + tilt_angle_theta = 0. + line_theta = 0. + line_start = np.array([0,0,0]) + +print("Test set "+str(testset)+" line start "+str(line_start)+" tilt phi "+str(tilt_angle_phi)+" tilt theta "+str(tilt_angle_theta)+" line theta "+str(line_theta)) + +line_phi = np.array([0,45,80,90,110,135])*math.pi/180. +line_theta = np.zeros(len(line_phi)) + line_theta * math.pi/180. +#line_start = np.array([-5,-5,-5]) +step = 0.1 + +linewidth=2 +linthresh=1.e-10 +fontsize=20 + +#fieldmodels.dipole.set_dipole(centerx, centery, centerz, tilt_phi, tilt_theta, mult=1.0, radius_f=None, radius_z=None): +dip = fieldmodels.dipole(0,0,0,tilt_angle_phi,tilt_angle_theta) +mdip = fieldmodels.dipole(80*RE,0,0,tilt_angle_phi,180.-tilt_angle_theta) + +# Create figure +fig = plt.figure() +fig.set_size_inches(20,30) +nsubplots=len(line_theta) +for i in range(nsubplots): + fig.add_subplot(nsubplots,1,i+1) +axes = fig.get_axes() + +radii = np.arange(0.1,100,step)*RE +nr=len(radii) +radiiRE = radii/RE + +fig.suptitle(r"Profiles starting from ("+str(line_start[0])+","+str(line_start[1])+","+str(line_start[2])+") [RE] with dipole tilt $\Phi="+str(int(tilt_angle_phi))+"$, $\Theta="+str(int(tilt_angle_theta))+"$", fontsize=fontsize) + +for i in range(nsubplots): + print("subplot ",i) + ax = axes[i] + + ax.text(0.2,0.08,r"profile with $\theta="+str(int(line_theta[i]*180./math.pi))+"$, $\phi="+str(int(line_phi[i]*180./math.pi))+"$",transform=ax.transAxes, bbox=dict(facecolor='white', alpha=0.7), fontsize=fontsize) + + xv = line_start[0]*RE + radii*np.sin(line_phi[i])*np.cos(line_theta[i]) + yv = line_start[1]*RE + radii*np.sin(line_phi[i])*np.sin(line_theta[i]) + zv = line_start[2]*RE + radii*np.cos(line_phi[i]) + + B1 = np.zeros([nr,4]) # X-scaled vector dipole + B2 = np.zeros([nr,4]) # regular dipole + B3 = np.zeros([nr,4]) # regular dipole + mirror dipole + B4 = np.zeros([nr,4]) # line dipole + mirror dipole + + for j in range(nr): + for k in range(3): + B1[j,k] = dip.getX(xv[j],yv[j],zv[j],0,k,0) + B2[j,k] = dip.get_old(xv[j],yv[j],zv[j],0,k,0) + B3[j,k] = B2[j,k] + mdip.get_old(xv[j],yv[j],zv[j],0,k,0) + B4[j,k] = dip.get_ldp(xv[j],yv[j],zv[j],0,k,0) + B4[j,k] = B4[j,k] + mdip.get_ldp(xv[j],yv[j],zv[j],0,k,0) + B1[j,3] = np.linalg.norm(B1[j,0:3]) + B2[j,3] = np.linalg.norm(B2[j,0:3]) + B3[j,3] = np.linalg.norm(B3[j,0:3]) + B4[j,3] = np.linalg.norm(B4[j,0:3]) + + colors=['r','k','b','magenta'] + coords = ['x','y','z','mag'] + + for k in range(4): + ax.plot(radiiRE, B1[:,k], c=colors[k], linestyle='-', linewidth=linewidth, label='vectorpot B'+coords[k], zorder=-10) + ax.plot(radiiRE, B2[:,k], c=colors[k], linestyle='--', linewidth=linewidth, label='regular B'+coords[k]) + ax.plot(radiiRE, B3[:,k], c=colors[k], linestyle=':', linewidth=linewidth, label='reg+mirror B'+coords[k]) + if tilt_angle_phi==0: + ax.plot(radiiRE, B4[:,k], c=colors[k], linestyle='-.', linewidth=linewidth, label='line+mirror B'+coords[k]) + + ax.set_xlabel(r"$r$ [$r_\mathrm{E}$]", fontsize=fontsize) + ax.set_xlim([0,70]) + #ax.set_yscale('log', nonposy='clip') + ax.set_yscale('symlog', linthreshy=linthresh) + for item in ax.get_xticklabels(): + item.set_fontsize(fontsize) + for item in ax.get_yticklabels(): + item.set_fontsize(fontsize) + + ylims = np.array(ax.get_ylim()) + if ylims[0] < -1e-4: + ylims[0] = -1e-4 + if ylims[1] > 1e-4: + ylims[1] = 1e-4 + ax.set_ylim(ylims) + +handles, labels = axes[0].get_legend_handles_labels() +axes[0].legend(handles, labels, fontsize=fontsize) + +fig.savefig(outfilename) +plt.close() + + + +if calcderivatives: + # Derivatives + step2=0.00001 # distance in each direction for calculating numerical derivative + for kkk in range(3): + print("derivatives d"+coords[kkk]) + dB1 = np.zeros([nr,3,3]) + dB2 = np.zeros([nr,3,3]) + dB3 = np.zeros([nr,3,3]) + dB4 = np.zeros([nr,3,3]) + + # Create figure + fig = plt.figure() + fig.set_size_inches(20,30) + for i in range(nsubplots): + fig.add_subplot(nsubplots,1,i+1) + axes = fig.get_axes() + + fig.suptitle(r"Profiles starting from ("+str(line_start[0])+","+str(line_start[1])+","+str(line_start[2])+") [RE] with dipole tilt $\Phi="+str(int(tilt_angle_phi))+"$, $\Theta="+str(int(tilt_angle_theta))+"$", fontsize=fontsize) + + for i in range(nsubplots): + print("derivatives subplot ",i) + ax = axes[i] + + xv = line_start[0]*RE + radii*np.sin(line_phi[i])*np.cos(line_theta[i]) + yv = line_start[1]*RE + radii*np.sin(line_phi[i])*np.sin(line_theta[i]) + zv = line_start[2]*RE + radii*np.cos(line_phi[i]) + + for j in range(nr): + for k in range(3): + B1[j,k] = dip.getX(xv[j],yv[j],zv[j],0,k,0) + B2[j,k] = dip.get_old(xv[j],yv[j],zv[j],0,k,0) + B3[j,k] = B2[j,k] + mdip.get_old(xv[j],yv[j],zv[j],0,k,0) + B4[j,k] = dip.get_ldp(xv[j],yv[j],zv[j],0,k,0) + B4[j,k] = B4[j,k] + mdip.get_ldp(xv[j],yv[j],zv[j],0,k,0) + #for kk in range(3): + kk=kkk + dB1[j,k,kk] = dip.getX(xv[j],yv[j],zv[j],1,k,kk) + dB2[j,k,kk] = dip.get_old(xv[j],yv[j],zv[j],1,k,kk) + dB3[j,k,kk] = dB2[j,k,kk] + mdip.get_old(xv[j],yv[j],zv[j],1,k,kk) + dB4[j,k,kk] = dip.get_ldp(xv[j],yv[j],zv[j],1,k,kk) + dB4[j,k,kk] = dB4[j,k,kk] + mdip.get_ldp(xv[j],yv[j],zv[j],1,k,kk) + + # analytical derivative vs numerical derivative + for j in np.arange(1,nr-1): + for k in range(3): + + # d/dx + if kkk==0: + cdbx=(dip.getX(xv[j]+step2*RE,yv[j],zv[j],0,k,0) - dip.getX(xv[j]-step2*RE,yv[j],zv[j],0,k,0))/(2*step2*RE) + if abs(cdbx) > 0: + dB1[j,k,0] = dB1[j,k,0]/cdbx + elif (abs(cdbx) 0: + dB2[j,k,0] = dB2[j,k,0]/cdbx + elif (abs(cdbx) 0: + dB3[j,k,0] = dB3[j,k,0]/cdbx + elif (abs(cdbx) 0: + dB4[j,k,0] = dB4[j,k,0]/cdbx + elif (abs(cdbx) 0: + dB1[j,k,1] = dB1[j,k,1]/cdby + elif (abs(cdby) 0: + dB2[j,k,1] = dB2[j,k,1]/cdby + elif (abs(cdby) 0: + dB3[j,k,1] = dB3[j,k,1]/cdby + elif (abs(cdby) 0: + dB4[j,k,1] = dB4[j,k,1]/cdby + elif (abs(cdby) 0: + dB1[j,k,2] = dB1[j,k,2]/cdbz + elif (abs(cdbz) 0: + dB2[j,k,2] = dB2[j,k,2]/cdbz + elif (abs(cdbz) 0: + dB3[j,k,2] = dB3[j,k,2]/cdbz + elif (abs(cdbz) 0: + dB4[j,k,2] = dB4[j,k,2]/cdbz + elif (abs(cdbz) Date: Thu, 9 May 2019 13:33:38 +0300 Subject: [PATCH 396/602] Corrections to scripts for analyzing vector potential dipole field --- doc/vectordipole/fluxfunction.py | 142 +++++++++--- ...vectorpotentialdipole_compare_with_data.py | 152 +++++++++++++ .../vectorpotentialdipole_fluxfunctions.py | 212 ++++++++++++++++++ .../vectorpotentialdipole_streamlines.py | 170 ++++++++++++++ .../vectorpotentialdipole_verify.py | 91 ++++---- projects/Magnetosphere/Magnetosphere.cpp | 2 +- 6 files changed, 700 insertions(+), 69 deletions(-) create mode 100644 doc/vectordipole/vectorpotentialdipole_compare_with_data.py create mode 100644 doc/vectordipole/vectorpotentialdipole_fluxfunctions.py create mode 100644 doc/vectordipole/vectorpotentialdipole_streamlines.py diff --git a/doc/vectordipole/fluxfunction.py b/doc/vectordipole/fluxfunction.py index 37fc5ad37..7d8ca1863 100644 --- a/doc/vectordipole/fluxfunction.py +++ b/doc/vectordipole/fluxfunction.py @@ -28,18 +28,20 @@ def polar_computeFluxUp(BX,BY,BZ, dxdydz): flux = np.zeros_like(BX) sizes = np.array(BX.shape) tmp_flux=0. + flux[sizes[0]-1,0,0] = tmp_flux - # First fill the z=0 cells - for x in np.arange(sizes[0]-1,-1,-1): + # First fill the z=0 cells in the -x direction + for x in np.arange(sizes[0]-2,-1,-1): tmp_flux -= BZ[x,0,0] * dxdydz[0] flux[x,0,0] = tmp_flux - # Now, for each row, integrate in z-direction. + # Now, for each column, integrate in +z-direction. for x in np.arange(sizes[0]): - tmp_flux = flux[x,0,0] - for z in np.arange(1,sizes[2]): - tmp_flux -= BX[x,0,z]*dxdydz[2] - flux[x,0,z] = tmp_flux + tmp_flux = flux[x,0,0] + for z in np.arange(1,sizes[2]): + tmp_flux -= BX[x,0,z]*dxdydz[2] + flux[x,0,z] = tmp_flux + return flux # Calculate fluxfunction by integrating along +z boundary first, @@ -49,23 +51,25 @@ def polar_computeFluxDown(BX,BY,BZ,dxdydz): flux = np.zeros_like(BX) sizes = np.array(BX.shape) tmp_flux=0. + flux[sizes[0]-1,0,0] = tmp_flux - # Calculate flux-difference between bottom and top edge - # of +x boundary (so that values are consistent with computeFluxUp) - for z in np.arange(sizes[2]): + # First fill the x=xmax cells in the +z direction + for z in np.arange(1,sizes[2]): tmp_flux -= BX[sizes[0]-1,0,z]*dxdydz[2] + flux[sizes[0]-1,0,z] = tmp_flux - # First, fill the z=max - 1 cells - for x in np.arange(sizes[0]-1,-1,-1): + # Then fill the z=max - 1 cells + for x in np.arange(sizes[0]-2,-1,-1): tmp_flux -= BZ[x,0,sizes[2]-1] * dxdydz[0] flux[x,0,sizes[2]-1] = tmp_flux - # Now, for each row, integrate in -z-direction. - for x in np.arange(sizes[0]): + # Now, for each column, integrate in -z-direction. + for x in np.arange(sizes[0]-1): tmp_flux = flux[x,0,sizes[2]-1] - for z in np.arange(sizes[2]-1,0,-1): + for z in np.arange(sizes[2]-2,-1,-1): tmp_flux += BX[x,0,z]*dxdydz[2] flux[x,0,z] = tmp_flux + return flux @@ -75,20 +79,50 @@ def polar_computeFluxLeft(BX,BY,BZ,dxdydz): flux = np.zeros_like(BX) sizes = np.array(BX.shape) tmp_flux=0. - bottom_right_flux=0. + flux[sizes[0]-1,0,0] = tmp_flux - # First calculate flux difference to bottom right corner - # Now, for each row, integrate in -z-direction. - for z in np.arange(0,sizes[2]): - bottom_right_flux -= BX[sizes[0]-1,0,z] * dxdydz[2] + # First fill the x=xmax cells in the +z direction + for z in np.arange(1,sizes[2]): + tmp_flux -= BX[sizes[0]-1,0,z]*dxdydz[2] + flux[sizes[0]-1,0,z] = tmp_flux - tmp_flux = bottom_right_flux - for x in np.arange(sizes[0]-1,-1,-1): + # Now, for each row, integrate in -x-direction. + for z in np.arange(0,sizes[2]): + tmp_flux = flux[sizes[0]-1,0,z] + for x in np.arange(sizes[0]-2,-1,-1): tmp_flux -= BZ[x,0,z] * dxdydz[0] flux[x,0,z] = tmp_flux return flux +# Calculate fluxfunction by integrating along +x from the left boundary +def polar_computeFluxRight(BX,BY,BZ,dxdydz): + # Create fluxfunction-field to be the same shape as B + flux = np.zeros_like(BX) + sizes = np.array(BX.shape) + tmp_flux=0. + flux[sizes[0]-1,0,0] = tmp_flux + + # First fill the z=0 cells in the -x direction + for x in np.arange(sizes[0]-2,-1,-1): + tmp_flux -= BZ[x,0,0]*dxdydz[0] + flux[x,0,0] = tmp_flux + + # Then fill the x=0 cells in the +z direction + for z in np.arange(1,sizes[2]): + tmp_flux -= BX[x,0,0]*dxdydz[2] + flux[0,0,z] = tmp_flux + + # Now, for each row, integrate in +x-direction. + for z in np.arange(1,sizes[2]): + tmp_flux = flux[0,0,z] + for x in np.arange(1,sizes[0]): + tmp_flux += BZ[x,0,z] * dxdydz[0] + flux[x,0,z] = tmp_flux + + return flux + + # namespace Equatorialplane { # # Calculate fluxfunction by integrating along -y boundary first, # # and then going along y-direction. @@ -199,14 +233,45 @@ def polar_computeFluxLeft(BX,BY,BZ,dxdydz): def median3(a, b, c): return max(min(a,b), min(max(a,b),c)) +def median4(a, b, c, d): + # This actually drops the largest and smallest value and returns the mean of the remaining two + l = [a,b,c,d] + l.sort() + return np.mean(l[1:3]) + +def mean4(a, b, c, d): + return np.mean([a,b,c,d]) + +def calculate(BX,BY,BZ, dxdydz): + sizes = np.array(BX.shape) + + if True: # polar plane + fluxUp = polar_computeFluxUp(BX,BY,BZ,dxdydz) + fluxDown = polar_computeFluxDown(BX,BY,BZ,dxdydz) + fluxLeft = polar_computeFluxLeft(BX,BY,BZ,dxdydz) + # else: + # fluxUp = Equatorialplane::computeFluxUp(B) + # fluxDown = Equatorialplane::computeFluxDown(B) + # fluxLeft = Equatorialplane::computeFluxLeft(B) + + for x in np.arange(sizes[0]): + for y in np.arange(sizes[1]): + for z in np.arange(sizes[2]): + a = fluxUp[x,y,z] + b = fluxDown[x,y,z] + c = fluxLeft[x,y,z] + fluxUp[x,y,z] = median3(a,b,c) + return fluxUp + -def calculate(BX,BY,BZ, dxdydz, dir=None): +def calculate4(BX,BY,BZ, dxdydz): sizes = np.array(BX.shape) if True: # polar plane fluxUp = polar_computeFluxUp(BX,BY,BZ,dxdydz) fluxDown = polar_computeFluxDown(BX,BY,BZ,dxdydz) fluxLeft = polar_computeFluxLeft(BX,BY,BZ,dxdydz) + fluxRight = polar_computeFluxRight(BX,BY,BZ,dxdydz) # else: # fluxUp = Equatorialplane::computeFluxUp(B) # fluxDown = Equatorialplane::computeFluxDown(B) @@ -218,12 +283,29 @@ def calculate(BX,BY,BZ, dxdydz, dir=None): a = fluxUp[x,y,z] b = fluxDown[x,y,z] c = fluxLeft[x,y,z] - if dir==None: - fluxUp[x,y,z] = median3(a,b,c) - elif dir=="down": - fluxUp[x,y,z] = b - elif dir=="left": - fluxUp[x,y,z] = c - + d = fluxRight[x,y,z] + fluxUp[x,y,z] = median4(a,b,c,d) + return fluxUp + +def calculate4mean(BX,BY,BZ, dxdydz): + sizes = np.array(BX.shape) + if True: # polar plane + fluxUp = polar_computeFluxUp(BX,BY,BZ,dxdydz) + fluxDown = polar_computeFluxDown(BX,BY,BZ,dxdydz) + fluxLeft = polar_computeFluxLeft(BX,BY,BZ,dxdydz) + fluxRight = polar_computeFluxRight(BX,BY,BZ,dxdydz) + # else: + # fluxUp = Equatorialplane::computeFluxUp(B) + # fluxDown = Equatorialplane::computeFluxDown(B) + # fluxLeft = Equatorialplane::computeFluxLeft(B) + + for x in np.arange(sizes[0]): + for y in np.arange(sizes[1]): + for z in np.arange(sizes[2]): + a = fluxUp[x,y,z] + b = fluxDown[x,y,z] + c = fluxLeft[x,y,z] + d = fluxRight[x,y,z] + fluxUp[x,y,z] = mean4(a,b,c,d) return fluxUp diff --git a/doc/vectordipole/vectorpotentialdipole_compare_with_data.py b/doc/vectordipole/vectorpotentialdipole_compare_with_data.py new file mode 100644 index 000000000..47efccc22 --- /dev/null +++ b/doc/vectordipole/vectorpotentialdipole_compare_with_data.py @@ -0,0 +1,152 @@ +#!/usr/bin/env python import matplotlib.pyplot as plt + +# /* +# * This file is part of Vlasiator. +# * Copyright 2010-2016 Finnish Meteorological Institute +# * Copyright 2017-2019 University of Helsinki +# * +# * For details of usage, see the COPYING file and read the "Rules of the Road" +# * at http://www.physics.helsinki.fi/vlasiator/ +# * +# * This program is free software; you can redistribute it and/or modify +# * it under the terms of the GNU General Public License as published by +# * the Free Software Foundation; either version 2 of the License, or +# * (at your option) any later version. +# * +# * This program is distributed in the hope that it will be useful, +# * but WITHOUT ANY WARRANTY; without even the implied warranty of +# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# * GNU General Public License for more details. +# * +# * You should have received a copy of the GNU General Public License along +# * with this program; if not, write to the Free Software Foundation, Inc., +# * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# */ +import numpy as np +import math +import sys,os +import pytools as pt +import matplotlib.pyplot as plt +import fieldmodels + +''' Testing routine for vector potential dipole + +run using "python vectorpotentialdipole_compare_with_data.py [ang]" where +angle (given in degrees) is the polar angle of the line profile to plot. + Plots also Vlasiator BCH profiles at different times + +''' + +if len(sys.argv)!=1: + line_phi = float(sys.argv[1]) +else: + line_phi = 45. + +plt.switch_backend('Agg') + +outfilename = "./vecpotdip_compare_"+str(int(line_phi))+".png" + +inputLocation="/proj/vlasov/2D/BCH/bulk/" +times = [0,10,50,100,200,500] +colors = ['r','g','b','magenta','k'] +timefulls = [str(time).rjust(7, '0') for time in times] +file_names = [inputLocation+"bulk."+timefull+".vlsv" for timefull in timefulls] + +vlsvobj = [] +for i in range(len(times)): + vlsvobj.append(pt.vlsvfile.VlsvReader(file_name=file_names[i])) + +RE=6371000. + +tilt_angle_phi = 0. +tilt_angle_theta = 0. +line_theta = 0. * math.pi/180. +line_phi = line_phi * math.pi/180. +line_start = np.array([0,0,0]) + +step = 0.1 +linewidth=2 +linthresh=1.e-10 +fontsize=20 + + +#fieldmodels.dipole.set_dipole(centerx, centery, centerz, tilt_phi, tilt_theta, mult=1.0, radius_f=None, radius_z=None): +dip = fieldmodels.dipole(0,0,0,tilt_angle_phi,tilt_angle_theta) +mdip = fieldmodels.dipole(80*RE,0,0,tilt_angle_phi,180.-tilt_angle_theta) + +# Create figure +fig = plt.figure() +fig.set_size_inches(20,30) +nsubplots=3 +for i in range(nsubplots): + fig.add_subplot(nsubplots,1,i+1) +axes = fig.get_axes() + +radii = np.arange(0.1,45,step)*RE +nr=len(radii) +radiiRE = radii/RE + +fig.suptitle(r"Profiles with $\theta="+str(int(line_theta*180./math.pi))+"$, $\phi="+str(int(line_phi*180./math.pi))+"$ starting from ("+str(line_start[0])+","+str(line_start[1])+","+str(line_start[2])+") [RE] with dipole tilt $\Phi="+str(int(tilt_angle_phi*180./math.pi))+"$, $\Theta="+str(int(tilt_angle_theta*180./math.pi))+"$", fontsize=fontsize) + +xv = line_start[0]*RE + radii*np.sin(line_phi)*np.cos(line_theta) +yv = line_start[1]*RE + radii*np.sin(line_phi)*np.sin(line_theta) +zv = line_start[2]*RE + radii*np.cos(line_phi) + +B1 = np.zeros([nr,3]) +B2 = np.zeros([nr,3]) +B3 = np.zeros([nr,3]) +B4 = np.zeros([nr,3]) + +for j in range(nr): + for k in range(3): + B1[j,k] = dip.get(xv[j],yv[j],zv[j],0,k,0) +# B2[j,k] = dip.get_old(xv[j],yv[j],zv[j],0,k,0) +# B3[j,k] = B2[j,k] + mdip.get_old(xv[j],yv[j],zv[j],0,k,0) +# B4[j,k] = dip.get_ldp(xv[j],yv[j],zv[j],0,k,0) +# B4[j,k] = B4[j,k] + mdip.get_ldp(xv[j],yv[j],zv[j],0,k,0) + +colors=['r','k','b'] +coords = ['x','y','z'] + +for k in range(nsubplots): + ax = axes[k] + print("component "+coords[k]) + ax.plot(radiiRE, B1[:,k], c=colors[-1], linestyle='-', linewidth=linewidth, label='vectorpot B'+coords[k], zorder=-10) + #ax.plot(radiiRE, B2[:,k], c=colors[k], linestyle='--', linewidth=linewidth, label='regular B'+coords[k]) + #ax.plot(radiiRE, B3[:,k], c=colors[k], linestyle=':', linewidth=linewidth, label='reg+mirror B'+coords[k]) + #if tilt_angle_phi==0: + # ax.plot(radiiRE, B4[:,k], c=colors[k], linestyle='-.', linewidth=linewidth, label='line+mirror B'+coords[k]) + + for i in range(len(times)): + vf=vlsvobj[i] + print('time t='+str(int(times[i]*0.5))+'s') + res = pt.calculations.cut_through_step(vf, [xv[0],0,zv[0]], [xv[-1],0,zv[-1]]) + cut = res[0].data + pr_dist = res[1].data + pr_coords = res[2].data + pr_Re = np.array(pr_dist)/RE + pr_B = vf.read_variable("B", operator=coords[k],cellids=cut) + #ax.plot(pr_Re, np.array(pr_B), c=colors[i], linestyle='-', linewidth=linewidth, label='vlsv t='+str(int(times[i]*0.5))+'s',zorder=i) + ax.plot(pr_Re, np.array(pr_B), linestyle='-', linewidth=linewidth, label='vlsv t='+str(int(times[i]*0.5))+'s',zorder=i) + + ax.set_xlabel(r"$r$ [$r_\mathrm{E}$]", fontsize=fontsize) + ax.set_xlim([0,70]) + #ax.set_yscale('log', nonposy='clip') + ax.set_yscale('symlog', linthreshy=linthresh) + for item in ax.get_xticklabels(): + item.set_fontsize(fontsize) + for item in ax.get_yticklabels(): + item.set_fontsize(fontsize) + + ylims = np.array(ax.get_ylim()) + if ylims[0] < -1e-4: + ylims[0] = -1e-4 + if ylims[1] > 1e-4: + ylims[1] = 1e-4 + ax.set_ylim(ylims) + + handles, labels = ax.get_legend_handles_labels() + ax.legend(handles, labels, fontsize=fontsize) + +fig.savefig(outfilename) +plt.close() diff --git a/doc/vectordipole/vectorpotentialdipole_fluxfunctions.py b/doc/vectordipole/vectorpotentialdipole_fluxfunctions.py new file mode 100644 index 000000000..ca29ee81c --- /dev/null +++ b/doc/vectordipole/vectorpotentialdipole_fluxfunctions.py @@ -0,0 +1,212 @@ +#!/usr/bin/env python import matplotlib.pyplot as plt + +# /* +# * This file is part of Vlasiator. +# * Copyright 2010-2016 Finnish Meteorological Institute +# * Copyright 2017-2019 University of Helsinki +# * +# * For details of usage, see the COPYING file and read the "Rules of the Road" +# * at http://www.physics.helsinki.fi/vlasiator/ +# * +# * This program is free software; you can redistribute it and/or modify +# * it under the terms of the GNU General Public License as published by +# * the Free Software Foundation; either version 2 of the License, or +# * (at your option) any later version. +# * +# * This program is distributed in the hope that it will be useful, +# * but WITHOUT ANY WARRANTY; without even the implied warranty of +# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# * GNU General Public License for more details. +# * +# * You should have received a copy of the GNU General Public License along +# * with this program; if not, write to the Free Software Foundation, Inc., +# * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# */ +import numpy as np +import math +import sys,os +import pytools as pt +import matplotlib.pyplot as plt +import matplotlib as mpl +import scipy +import fluxfunction as ff +import fieldmodels + +''' Testing routine for different dipole formulations + + Plots flux function contours of magnetic field in the meridional x-z-plane for four different models. + Also evaluates in-plane divergence, as the flux function doesn't work properly for the 3D dipole models. + +''' + +if len(sys.argv)!=1: + testset = int(sys.argv[1]) +else: + testset = 0 + +plt.switch_backend('Agg') +print(mpl.__version__) +outfilename = "./vecpotdip_verify_fluxfunctions_"+str(testset)+".png" + + +# Select flux function method +ffc = ff.calculate # 3-way calculation +#ffc = ff.calculate4 # 4-way calculation, returns mean of middle two values +#ffc = ff.calculate4mean # 4-way calculation, returns mean of values + +#flux_levels = np.linspace(-5.e-5,5e-4,100) +#flux_levels = np.linspace(-5.e-5,0,10000) + +#flux_levels = np.linspace(5.e-7,5e-4,1000) +#flux_levels = np.linspace(5.e-12,5e-7,1000) +#flux_levels = np.reshape([np.linspace(-1.e-5,-1e-14,200),np.linspace(1.e-14,1e-5,200)],400) +#flux_levels = np.reshape([np.logspace(-14,-5,200)*-1,np.logspace(-14,-5,200)],400) + +#flux_levels = np.reshape([np.linspace(-1.e-6,-1e-14,300),np.linspace(1.e-14,1e-6,300)],600) +flux_levels = np.reshape([np.linspace(-5.e-6,-1e-14,1000),np.linspace(1.e-14,5e-6,1000)],2000) + +RE=6371000. +epsilon=1.e-15 + +if testset==0: + tilt_angle_phi = 0. + tilt_angle_theta = 0. + BGB=[0,0,0] +elif testset==1: + tilt_angle_phi = 10. + tilt_angle_theta = 0. + BGB=[0,0,0] +elif testset==2: + tilt_angle_phi = 0. + tilt_angle_theta = 0. + BGB=[0,0,-5.e-9] +elif testset==3: # Warning: this might not be a valid check, using flux functions with a out-of-plane tilted dipole? + tilt_angle_phi = 10. + tilt_angle_theta = 45. + BGB=[0,0,0] +else: # Same as 0 + print("Default") + tilt_angle_phi = 0. + tilt_angle_theta = 0. + BGB=[0,0,0] + +#fieldmodels.dipole.set_dipole(centerx, centery, centerz, tilt_phi, tilt_theta, mult=1.0, radius_f=None, radius_z=None): +dip = fieldmodels.dipole(0,0,0,tilt_angle_phi,tilt_angle_theta) +mdip = fieldmodels.dipole(80*RE,0,0,tilt_angle_phi,180.-tilt_angle_theta) + + +fontsize=20 +# Create figure +fig = plt.figure() +fig.set_size_inches(20,20) + +gs = mpl.gridspec.GridSpec(2, 2, wspace=0.25, hspace=0.25) +fig.add_subplot(gs[0, 0]) +fig.add_subplot(gs[0, 1]) +fig.add_subplot(gs[1, 0]) +fig.add_subplot(gs[1, 1]) +axes = fig.get_axes() + +fig.suptitle(r"Flux function contours of meridional plane magnetic field with dipole tilt $\Phi="+str(int(tilt_angle_phi))+"$, $\Theta="+str(int(tilt_angle_theta))+"$ with IMF=("+str(BGB[0])+","+str(BGB[1])+","+str(BGB[2])+")", fontsize=fontsize) + + +nx = 200 +nz = 200 +xmin, xmax = (-59,41) +zmin, zmax = (-50,50) + +x = np.linspace(xmin,xmax,num=nx) +z = np.linspace(zmin,zmax,num=nz) +BX = np.zeros([nx,1,nz]) +BZ = np.zeros([nx,1,nz]) + +divB = np.zeros([nx,1,nz]) + +[Xmesh,Zmesh] = scipy.meshgrid(x,z) + +dxdydz=[x[1]-x[0],0,z[1]-z[0]] + + +ax = axes[0] +print("0") +for i in range(len(x)): + for j in range(len(z)): + BX[i,0,j] = dip.get_old(x[i]*RE,0,z[j]*RE,0,0,0) +BGB[0] + BZ[i,0,j] = dip.get_old(x[i]*RE,0,z[j]*RE,0,2,0) +BGB[2] + divB[i,0,j] = dip.get_old(x[i]*RE,0,z[j]*RE,1,0,0) + divB[i,0,j] += dip.get_old(x[i]*RE,0,z[j]*RE,1,2,2) +print(np.sum(divB),np.amin(divB),np.amax(divB)) +ax.pcolormesh(Xmesh,Zmesh,divB[:,0,:]) +flux_function = ffc(BX,None,BZ,dxdydz) +fluxcont = ax.contour(Xmesh,Zmesh,flux_function[:,0,:].T,flux_levels,colors='k',linestyles='solid',linewidths=0.5) +ax.text(0.2,0.08,"Regular dipole",transform=ax.transAxes, bbox=dict(facecolor='white', alpha=0.7), fontsize=fontsize) + +ax = axes[1] +print("1") +for i in range(len(x)): + for j in range(len(z)): + BX[i,0,j] = dip.getX(x[i]*RE,0,z[j]*RE,0,0,0) +BGB[0] + BZ[i,0,j] = dip.getX(x[i]*RE,0,z[j]*RE,0,2,0) +BGB[2] + divB[i,0,j] = dip.getX(x[i]*RE,0,z[j]*RE,1,0,0) + divB[i,0,j] += dip.getX(x[i]*RE,0,z[j]*RE,1,2,2) +print(np.sum(divB),np.amin(divB),np.amax(divB)) +ax.pcolormesh(Xmesh,Zmesh,divB[:,0,:]) +flux_function = ffc(BX,None,BZ,dxdydz) +fluxcont = ax.contour(Xmesh,Zmesh,flux_function[:,0,:].T,flux_levels,colors='k',linestyles='solid',linewidths=0.5) +ax.text(0.2,0.08,"Vector potential (X)",transform=ax.transAxes, bbox=dict(facecolor='white', alpha=0.7), fontsize=fontsize) + +# ax = axes[1] +# print("1") +# for i in range(len(x)): +# for j in range(len(z)): +# BX[i,0,j] = dip.getX(x[i]*RE,0,z[j]*RE,0,0,0) +# BZ[i,0,j] = dip.getX(x[i]*RE,0,z[j]*RE,0,2,0) +# BX[i,0,j] += mdip.getX(x[i]*RE,0,z[j]*RE,0,0,0) +BGB[0] +# BZ[i,0,j] += mdip.getX(x[i]*RE,0,z[j]*RE,0,2,0) +BGB[2] +# print(np.sum(divB),np.amin(divB),np.amax(divB)) +# ax.pcolormesh(Xmesh,Zmesh,divB[:,0,:]) +# flux_function = ffc(BX,None,BZ,dxdydz) +# fluxcont = ax.contour(Xmesh,Zmesh,flux_function[:,0,:].T,flux_levels,colors='k',linestyles='solid',linewidths=0.5) +# ax.text(0.2,0.08,"Vector potential + mirror (X)",transform=ax.transAxes, bbox=dict(facecolor='white', alpha=0.7), fontsize=fontsize) + +ax = axes[2] +print("2") +for i in range(len(x)): + for j in range(len(z)): + BX[i,0,j] = dip.get_old(x[i]*RE,0,z[j]*RE,0,0,0) + BZ[i,0,j] = dip.get_old(x[i]*RE,0,z[j]*RE,0,2,0) + BX[i,0,j] += mdip.get_old(x[i]*RE,0,z[j]*RE,0,0,0) +BGB[0] + BZ[i,0,j] += mdip.get_old(x[i]*RE,0,z[j]*RE,0,2,0) +BGB[2] + + divB[i,0,j] = dip.get_old(x[i]*RE,0,z[j]*RE,1,0,0) + divB[i,0,j] += dip.get_old(x[i]*RE,0,z[j]*RE,1,2,2) + divB[i,0,j] += mdip.get_old(x[i]*RE,0,z[j]*RE,1,0,0) + divB[i,0,j] += mdip.get_old(x[i]*RE,0,z[j]*RE,1,2,2) +print(np.sum(divB),np.amin(divB),np.amax(divB)) +ax.pcolormesh(Xmesh,Zmesh,divB[:,0,:]) +flux_function = ffc(BX,None,BZ,dxdydz) +fluxcont = ax.contour(Xmesh,Zmesh,flux_function[:,0,:].T,flux_levels,colors='k',linestyles='solid',linewidths=0.5) +ax.text(0.2,0.08,"Regular dipole + mirror",transform=ax.transAxes, bbox=dict(facecolor='white', alpha=0.7), fontsize=fontsize) + +if tilt_angle_phi 0: + if abs(cdbx) > epsilon*B1[j,k]: dB1[j,k,0] = dB1[j,k,0]/cdbx - elif (abs(cdbx) 0: + if abs(cdbx) > epsilon*B2[j,k]: dB2[j,k,0] = dB2[j,k,0]/cdbx - elif (abs(cdbx) 0: + if abs(cdbx) > epsilon*B3[j,k]: dB3[j,k,0] = dB3[j,k,0]/cdbx - elif (abs(cdbx) 0: + if abs(cdbx) > epsilon*B4[j,k]: dB4[j,k,0] = dB4[j,k,0]/cdbx - elif (abs(cdbx) 0: + if abs(cdby) > epsilon*B1[j,k]: dB1[j,k,1] = dB1[j,k,1]/cdby - elif (abs(cdby) 0: + if abs(cdby) > epsilon*B2[j,k]: dB2[j,k,1] = dB2[j,k,1]/cdby - elif (abs(cdby) 0: + if abs(cdby) > epsilon*B3[j,k]: dB3[j,k,1] = dB3[j,k,1]/cdby - elif (abs(cdby) 0: + if abs(cdby) > epsilon*B4[j,k]: dB4[j,k,1] = dB4[j,k,1]/cdby - elif (abs(cdby) 0: + if abs(cdbz) > epsilon*B1[j,k]: dB1[j,k,2] = dB1[j,k,2]/cdbz - elif (abs(cdbz) 0: + if abs(cdbz) > epsilon*B2[j,k]: dB2[j,k,2] = dB2[j,k,2]/cdbz - elif (abs(cdbz) 0: + if abs(cdbz) > epsilon*B3[j,k]: dB3[j,k,2] = dB3[j,k,2]/cdbz - elif (abs(cdbz) 0: + if abs(cdbz) > epsilon*B4[j,k]: dB4[j,k,2] = dB4[j,k,2]/cdbz - elif (abs(cdbz)dipoleScalingFactor, this->dipoleMirrorLocationX, 0.0, 0.0, 0.0 );//mirror setBackgroundField(bgFieldDipole, BgBGrid, true); break; - case 4: // Vector potential dipole, vanishes after a given radius + case 4: // Vector potential dipole, vanishes after a given x-coordinate bgVectorDipole.initialize(126.2e6 *this->dipoleScalingFactor, 0.0, 0.0, 0.0, this->dipoleTiltPhi, this->dipoleTiltTheta, this->dipoleXFull, this->dipoleXZero ); setBackgroundField(bgVectorDipole, BgBGrid); break; From 5c7051bc559a9761d636ff7fca1f477c8176ceac Mon Sep 17 00:00:00 2001 From: Urs Ganse Date: Thu, 9 May 2019 14:40:25 +0300 Subject: [PATCH 397/602] Fix two misnamed datareducers. --- datareduction/datareducer.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/datareduction/datareducer.cpp b/datareduction/datareducer.cpp index 495383761..61163e6f8 100644 --- a/datareduction/datareducer.cpp +++ b/datareduction/datareducer.cpp @@ -259,7 +259,7 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti continue; } if(*it == "fg_V") { // Overall effective bulk density defining the center-of-mass frame from all populations - outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fg_background_B",[]( + outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fg_V",[]( FsGrid< std::array, 2>& perBGrid, FsGrid< std::array, 2>& EGrid, FsGrid< std::array, 2>& EHallGrid, @@ -573,7 +573,7 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti outputReducer->addOperator(new DRO::VariableBVol); continue; } - if(*it == "fg_volB") { // Static (typically dipole) magnetic field part + if(*it == "fg_VolB") { // Static (typically dipole) magnetic field part outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fg_volB",[]( FsGrid< std::array, 2>& perBGrid, FsGrid< std::array, 2>& EGrid, From a2ddeec913c1cf6f968e1289fff61591930f0655 Mon Sep 17 00:00:00 2001 From: Urs Ganse Date: Thu, 9 May 2019 15:05:28 +0300 Subject: [PATCH 398/602] Remove DCCRG E- and B-Reducers, since they are not coupled anymore. --- datareduction/datareducer.cpp | 8 ------ datareduction/datareductionoperator.cpp | 37 ------------------------- datareduction/datareductionoperator.h | 15 ---------- parameters.cpp | 2 +- 4 files changed, 1 insertion(+), 61 deletions(-) diff --git a/datareduction/datareducer.cpp b/datareduction/datareducer.cpp index 61163e6f8..7aa7f67ba 100644 --- a/datareduction/datareducer.cpp +++ b/datareduction/datareducer.cpp @@ -70,10 +70,6 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti )); continue; } - if(*it == "vg_B") { // Bulk magnetic field at Yee-Lattice locations - outputReducer->addOperator(new DRO::VariableB); - continue; - } if(*it == "fg_BackgroundB" || *it == "BackgroundB") { // Static (typically dipole) magnetic field part outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fg_background_B",[]( FsGrid< std::array, 2>& perBGrid, @@ -175,10 +171,6 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti )); continue; } - if(*it == "vg_E") { // Bulk electric field at Yee-lattice locations - outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("E",CellParams::EX,3)); - continue; - } if(*it == "vg_Rhom" || *it == "Rhom") { // Overall mass density (summed over all populations) outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("rhom",CellParams::RHOM,1)); continue; diff --git a/datareduction/datareductionoperator.cpp b/datareduction/datareductionoperator.cpp index d1ff5c274..ca5710877 100644 --- a/datareduction/datareductionoperator.cpp +++ b/datareduction/datareductionoperator.cpp @@ -216,43 +216,6 @@ namespace DRO { return true; } - - - - //------------------ total B --------------------------------------- - VariableB::VariableB(): DataReductionOperator() { } - VariableB::~VariableB() { } - - bool VariableB::getDataVectorInfo(std::string& dataType,unsigned int& dataSize,unsigned int& vectorSize) const { - dataType = "float"; - dataSize = sizeof(Real); - vectorSize = 3; - return true; - } - - std::string VariableB::getName() const {return "B";} - - bool VariableB::reduceData(const SpatialCell* cell,char* buffer) { - const char* ptr = reinterpret_cast(B); - for (uint i = 0; i < 3*sizeof(Real); ++i) buffer[i] = ptr[i]; - return true; - } - - bool VariableB::setSpatialCell(const SpatialCell* cell) { - B[0] = cell->parameters[CellParams::PERBX] + cell->parameters[CellParams::BGBX]; - B[1] = cell->parameters[CellParams::PERBY] + cell->parameters[CellParams::BGBY]; - B[2] = cell->parameters[CellParams::PERBZ] + cell->parameters[CellParams::BGBZ]; - if(std::isinf(B[0]) || std::isnan(B[0]) || - std::isinf(B[1]) || std::isnan(B[1]) || - std::isinf(B[2]) || std::isnan(B[2]) - ) { - string message = "The DataReductionOperator " + this->getName() + " returned a nan or an inf."; - bailout(true, message, __FILE__, __LINE__); - } - return true; - } - - //MPI rank MPIrank::MPIrank(): DataReductionOperator() { } MPIrank::~MPIrank() { } diff --git a/datareduction/datareductionoperator.h b/datareduction/datareductionoperator.h index 37f0c9639..9b495de24 100644 --- a/datareduction/datareductionoperator.h +++ b/datareduction/datareductionoperator.h @@ -202,21 +202,6 @@ namespace DRO { std::string popName; }; - class VariableB: public DataReductionOperator { - public: - VariableB(); - virtual ~VariableB(); - - virtual bool getDataVectorInfo(std::string& dataType,unsigned int& dataSize,unsigned int& vectorSize) const; - virtual std::string getName() const; - virtual bool reduceData(const SpatialCell* cell,char* buffer); - virtual bool setSpatialCell(const SpatialCell* cell); - - protected: - Real B[3]; - }; - - class VariableBVol: public DataReductionOperator { public: VariableBVol(); diff --git a/parameters.cpp b/parameters.cpp index eee03c0a3..1222e6485 100644 --- a/parameters.cpp +++ b/parameters.cpp @@ -218,7 +218,7 @@ bool Parameters::addParameters(){ // Output variable parameters // NOTE Do not remove the : before the list of variable names as this is parsed by tools/check_vlasiator_cfg.sh - Readparameters::addComposing("variables.output", "List of data reduction operators (DROs) to add to the grid file output. Each variable to be added has to be on a new line output = XXX. Available (20190508): B vg_B fg_B BackgroundB vg_BackgroundB fg_BackgroundB PerturbedB vg_PerturbedB fg_PerturbedB E vg_E fg_E Rhom vg_Rhom fg_Rhom Rhoq vg_Rhoq fg_Rhoq populations_Rho V vg_V fg_V populations_V populations_moments_Backstream populations_moments_NonBackstream populations_EffectiveSparsityThreshold populations_RhoLossAdjust LBweight MaxVdt MaxRdt populations_MaxVdt populations_MaxRdt MaxFieldsdt MPIrank vg_rank FsGridRank fg_rank FsGridBoundaryType BoundaryType vg_BoundaryType fg_BoundaryType BoundaryLayer vg_BoundaryLayer fg_BoundaryLayer populations_Blocks fSaved populations_accSubcycles VolE vg_VolE fg_VolE HallE GradPeE VolB vg_VolB fg_VolB BackgroundVolB PerturbedVolB Pressure vg_Pressure fg_Pressure populations_PTensor derivs BVOLderivs GridCoordinates Potential BackgroundVolE ChargeDensity PotentialError MeshData"); + Readparameters::addComposing("variables.output", "List of data reduction operators (DROs) to add to the grid file output. Each variable to be added has to be on a new line output = XXX. Available (20190508): B fg_B BackgroundB vg_BackgroundB fg_BackgroundB PerturbedB vg_PerturbedB fg_PerturbedB E fg_E Rhom vg_Rhom fg_Rhom Rhoq vg_Rhoq fg_Rhoq populations_Rho V vg_V fg_V populations_V populations_moments_Backstream populations_moments_NonBackstream populations_EffectiveSparsityThreshold populations_RhoLossAdjust LBweight MaxVdt MaxRdt populations_MaxVdt populations_MaxRdt MaxFieldsdt MPIrank vg_rank FsGridRank fg_rank FsGridBoundaryType BoundaryType vg_BoundaryType fg_BoundaryType BoundaryLayer vg_BoundaryLayer fg_BoundaryLayer populations_Blocks fSaved populations_accSubcycles VolE vg_VolE fg_VolE HallE GradPeE VolB vg_VolB fg_VolB BackgroundVolB PerturbedVolB Pressure vg_Pressure fg_Pressure populations_PTensor derivs BVOLderivs GridCoordinates Potential BackgroundVolE ChargeDensity PotentialError MeshData"); // NOTE Do not remove the : before the list of variable names as this is parsed by tools/check_vlasiator_cfg.sh Readparameters::addComposing("variables.diagnostic", "List of data reduction operators (DROs) to add to the diagnostic runtime output. Each variable to be added has to be on a new line diagnostic = XXX. Available (20190320): FluxB FluxE populations_Blocks Rhom populations_RhoLossAdjust LBweight populations_MaxVdt MaxVdt populations_MaxRdt MaxRdt MaxFieldsdt populations_MaxDistributionFunction populations_MinDistributionFunction"); From 35a9beeb52c507e456830082485a7a4f63b0e521 Mon Sep 17 00:00:00 2001 From: Markus Battarbee Date: Fri, 10 May 2019 12:03:59 +0300 Subject: [PATCH 399/602] Added new energy density data reducer and inherited class for data reducers to write parameters to vlsv file --- datareduction/datareducer.cpp | 28 +++++++++ datareduction/datareducer.h | 2 + datareduction/datareductionoperator.cpp | 83 +++++++++++++++++++++++++ datareduction/datareductionoperator.h | 27 ++++++++ iowrite.cpp | 5 ++ object_wrapper.cpp | 17 +++++ parameters.cpp | 2 +- particle_species.h | 5 ++ 8 files changed, 168 insertions(+), 1 deletion(-) diff --git a/datareduction/datareducer.cpp b/datareduction/datareducer.cpp index 99462de96..0d82c6400 100644 --- a/datareduction/datareducer.cpp +++ b/datareduction/datareducer.cpp @@ -148,6 +148,13 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti } continue; } + if(*it == "populations_EnergyDensity") { + // Per-population energy density in three energy ranges + for(unsigned int i =0; i < getObjectWrapper().particleSpecies.size(); i++) { + outputReducer->addOperator(new DRO::VariableEnergyDensity(i)); + } + continue; + } if(*it == "MaxFieldsdt") { // Maximum timestep constraint as calculated by the fieldsolver outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("max_fields_dt",CellParams::MAXFDT,1)); @@ -517,6 +524,14 @@ bool DataReducer::handlesWriting(const unsigned int& operatorID) const { return dynamic_cast(operators[operatorID]) != nullptr; } +/** Ask a DataReductionOperator if it wants to write parameterdas to the vlsv file header + * @param operatorID ID number of the DataReductionOperator. + * @return If true, then VLSVWriter should be passed to the DataReductionOperator.*/ +bool DataReducer::hasParameters(const unsigned int& operatorID) const { + if (operatorID >= operators.size()) return false; + return dynamic_cast(operators[operatorID]) != nullptr; +} + /** Request a DataReductionOperator to calculate its output data and to write it to the given buffer. * @param cell Pointer to spatial cell whose data is to be reduced. * @param operatorID ID number of the applied DataReductionOperator. @@ -570,3 +585,16 @@ bool DataReducer::writeData(const unsigned int& operatorID, } return writingOperator->writeData(mpiGrid,cells,meshName,vlsvWriter); } + +/** Write parameters related to given DataReductionOperator to the output file. + * @param operatorID ID number of the selected DataReductionOperator. + * @param vlsvWriter VLSV file writer that has output file open. + * @return If true, DataReductionOperator wrote its parameters successfully.*/ +bool DataReducer::writeParameters(const unsigned int& operatorID, vlsv::Writer& vlsvWriter) { + if (operatorID >= operators.size()) return false; + DRO::DataReductionOperatorHasParameters* parameterOperator = dynamic_cast(operators[operatorID]); + if(parameterOperator == nullptr) { + return false; + } + return parameterOperator->writeParameters(vlsvWriter); +} diff --git a/datareduction/datareducer.h b/datareduction/datareducer.h index 70aea990f..8cac52c7e 100644 --- a/datareduction/datareducer.h +++ b/datareduction/datareducer.h @@ -45,6 +45,7 @@ class DataReducer { unsigned int& dataSize,unsigned int& vectorSize) const; std::string getName(const unsigned int& operatorID) const; bool handlesWriting(const unsigned int& operatorID) const; + bool hasParameters(const unsigned int& operatorID) const; bool reduceData(const SpatialCell* cell,const unsigned int& operatorID,char* buffer); bool reduceDiagnostic(const SpatialCell* cell,const unsigned int& operatorID,Real * result); unsigned int size() const; @@ -52,6 +53,7 @@ class DataReducer { const dccrg::Dccrg& mpiGrid, const std::vector& cells,const std::string& meshName, vlsv::Writer& vlsvWriter); + bool writeParameters(const unsigned int& operatorID, vlsv::Writer& vlsvWriter); private: /** Private copy-constructor to prevent copying the class. diff --git a/datareduction/datareductionoperator.cpp b/datareduction/datareductionoperator.cpp index d26d70b5e..fcc26fc84 100644 --- a/datareduction/datareductionoperator.cpp +++ b/datareduction/datareductionoperator.cpp @@ -1476,4 +1476,87 @@ namespace DRO { bool VariableEffectiveSparsityThreshold::setSpatialCell(const spatial_cell::SpatialCell* cell) { return true; } + + VariableEnergyDensity::VariableEnergyDensity(cuint _popID): DataReductionOperatorHasParameters(),popID(_popID) { + popName = getObjectWrapper().particleSpecies[popID].name; + } + VariableEnergyDensity::~VariableEnergyDensity() { } + + std::string VariableEnergyDensity::getName() const {return popName + "/EnergyDensity";} + + bool VariableEnergyDensity::getDataVectorInfo(std::string& dataType,unsigned int& dataSize,unsigned int& vectorSize) const { + dataType = "float"; + dataSize = sizeof(Real); + vectorSize = 3; // This is not components, but rather total energy density, density over E1, and density over E2 + return true; + } + + bool VariableEnergyDensity::reduceData(const SpatialCell* cell,char* buffer) { + const Real HALF = 0.5; + # pragma omp parallel + { + Real thread_E0_sum = 0.0; + Real thread_E1_sum = 0.0; + Real thread_E2_sum = 0.0; + + const Real* parameters = cell->get_block_parameters(popID); + const Realf* block_data = cell->get_data(popID); + + # pragma omp for + for (vmesh::LocalID n=0; nget_number_of_velocity_blocks(popID); n++) { + const Real DV3 + = parameters[n * BlockParams::N_VELOCITY_BLOCK_PARAMS + BlockParams::DVX] + * parameters[n * BlockParams::N_VELOCITY_BLOCK_PARAMS + BlockParams::DVY] + * parameters[n * BlockParams::N_VELOCITY_BLOCK_PARAMS + BlockParams::DVZ]; + + for (uint k = 0; k < WID; ++k) for (uint j = 0; j < WID; ++j) for (uint i = 0; i < WID; ++i) { + const Real VX + = parameters[n * BlockParams::N_VELOCITY_BLOCK_PARAMS + BlockParams::VXCRD] + + (i + HALF)*parameters[n * BlockParams::N_VELOCITY_BLOCK_PARAMS + BlockParams::DVX]; + const Real VY + = parameters[n * BlockParams::N_VELOCITY_BLOCK_PARAMS + BlockParams::VYCRD] + + (j + HALF)*parameters[n * BlockParams::N_VELOCITY_BLOCK_PARAMS + BlockParams::DVY]; + const Real VZ + = parameters[n * BlockParams::N_VELOCITY_BLOCK_PARAMS + BlockParams::VZCRD] + + (k + HALF)*parameters[n * BlockParams::N_VELOCITY_BLOCK_PARAMS + BlockParams::DVZ]; + + const Real ENERGY = (VX*VX + VY*VY + VZ*VZ) * HALF * getObjectWrapper().particleSpecies[popID].mass; + thread_E0_sum += block_data[n * SIZE_VELBLOCK+cellIndex(i,j,k)] * ENERGY * DV3; + if (ENERGY > E1limit) thread_E1_sum += block_data[n * SIZE_VELBLOCK+cellIndex(i,j,k)] * ENERGY * DV3; + if (ENERGY > E2limit) thread_E2_sum += block_data[n * SIZE_VELBLOCK+cellIndex(i,j,k)] * ENERGY * DV3; + } + } + + // Accumulate contributions coming from this velocity block to the + // spatial cell velocity moments. If multithreading / OpenMP is used, + // these updates need to be atomic: + # pragma omp critical + { + EDensity[0] += thread_E0_sum; + EDensity[1] += thread_E1_sum; + EDensity[2] += thread_E2_sum; + } + } + const char* ptr = reinterpret_cast(&EDensity); + for (uint i = 0; i < 3*sizeof(Real); ++i) buffer[i] = ptr[i]; + return true; + } + + bool VariableEnergyDensity::setSpatialCell(const SpatialCell* cell) { + for(int i = 0; i < 3; i++) { + EDensity[i] = 0.0; + } + solarwindenergy = getObjectWrapper().particleSpecies[popID].SolarWindEnergy; + E1limit = solarwindenergy * getObjectWrapper().particleSpecies[popID].EnergyDensityLimit1; + E2limit = solarwindenergy * getObjectWrapper().particleSpecies[popID].EnergyDensityLimit2; + return true; + } + + bool VariableEnergyDensity::writeParameters(vlsv::Writer& vlsvWriter) { + if( vlsvWriter.writeParameter("EnergyDensity0", &solarwindenergy) == false ) { return false; } + if( vlsvWriter.writeParameter("EnergyDensity1", &E1limit) == false ) { return false; } + if( vlsvWriter.writeParameter("EnergyDensity2", &E2limit) == false ) { return false; } + return true; + } + } // namespace DRO diff --git a/datareduction/datareductionoperator.h b/datareduction/datareductionoperator.h index c6162dde6..d5abcea66 100644 --- a/datareduction/datareductionoperator.h +++ b/datareduction/datareductionoperator.h @@ -73,6 +73,13 @@ namespace DRO { vlsv::Writer& vlsvWriter) = 0; }; + class DataReductionOperatorHasParameters: public DataReductionOperator { + public: + DataReductionOperatorHasParameters() : DataReductionOperator() {}; + virtual bool writeParameters(vlsv::Writer& vlsvWriter) = 0; + }; + + class DataReductionOperatorCellParams: public DataReductionOperator { public: DataReductionOperatorCellParams(const std::string& name,const unsigned int parameterIndex,const unsigned int vectorSize); @@ -506,6 +513,26 @@ namespace DRO { uint popID; std::string popName; }; + + class VariableEnergyDensity: public DataReductionOperatorHasParameters { + public: + VariableEnergyDensity(cuint popID); + virtual ~VariableEnergyDensity(); + + virtual bool getDataVectorInfo(std::string& dataType,unsigned int& dataSize,unsigned int& vectorSize) const; + virtual std::string getName() const; + virtual bool reduceData(const SpatialCell* cell,char* buffer); + virtual bool setSpatialCell(const SpatialCell* cell); + virtual bool writeParameters(vlsv::Writer& vlsvWriter); + + protected: + uint popID; + std::string popName; + Real EDensity[3]; + Real solarwindenergy; + Real E1limit; + Real E2limit; + }; } // namespace DRO diff --git a/iowrite.cpp b/iowrite.cpp index a159ee164..e38103cde 100644 --- a/iowrite.cpp +++ b/iowrite.cpp @@ -408,6 +408,11 @@ bool writeDataReducer(const dccrg::Dccrg& } + // Check if the DataReducer wants to write paramters to the output file + if (dataReducer.hasParameters(dataReducerIndex) == true) { + success = dataReducer.writeParameters(dataReducerIndex,vlsvWriter); + } + delete[] varBuffer; varBuffer = NULL; phiprof::stop("DRO_"+variableName); diff --git a/object_wrapper.cpp b/object_wrapper.cpp index 822ce0762..df73a3cbb 100644 --- a/object_wrapper.cpp +++ b/object_wrapper.cpp @@ -70,6 +70,12 @@ bool ObjectWrapper::addPopulationParameters() { Readparameters::add(pop + "_backstream.vy", "Center coordinate for the maxwellian distribution. Used for calculating the backstream moments.", 0.0); Readparameters::add(pop + "_backstream.vz", "Center coordinate for the maxwellian distribution. Used for calculating the backstream moments.", 0.0); Readparameters::add(pop + "_backstream.radius", "Radius of the maxwellian distribution. Used for calculating the backstream moments. If set to 0 (default), the backstream/non-backstream DROs are skipped.", 0.0); + + // Energy density parameters + Readparameters::add(pop + "_energydensity.limit1", "Lower limit of second bin for energy density, given in units of solar wind ram energy.", 5.0); + Readparameters::add(pop + "_energydensity.limit2", "Lower limit of third bin for energy density, given in units of solar wind ram energy.", 10.0); + Readparameters::add(pop + "_energydensity.solarwindspeed", "Incoming solar wind velocity magnitude. Used for calculating energy densities.", 0.0); + Readparameters::add(pop + "_energydensity.solarwindenergy", "Incoming solar wind ram energy. Used for calculating energy densities.", 0.0); } return true; @@ -154,6 +160,17 @@ bool ObjectWrapper::getParameters() { Readparameters::get(pop + "_backstream.vx", species.backstreamV[0]); Readparameters::get(pop + "_backstream.vy", species.backstreamV[1]); Readparameters::get(pop + "_backstream.vz", species.backstreamV[2]); + + //Get energy density parameters + Readparameters::get(pop + "_energydensity.limit1", species.EnergyDensityLimit1); + Readparameters::get(pop + "_energydensity.limit2", species.EnergyDensityLimit2); + Readparameters::get(pop + "_energydensity.solarwindenergy", species.SolarWindEnergy); + Readparameters::get(pop + "_energydensity.solarwindspeed", species.SolarWindSpeed); + + const Real EPSILON = 1.e-25; + if (species.SolarWindEnergy < EPSILON) { + species.SolarWindEnergy = 0.5 * species.mass * species.SolarWindEnergy * species.SolarWindEnergy; + } } return true; diff --git a/parameters.cpp b/parameters.cpp index 5e00e455e..85b41ff32 100644 --- a/parameters.cpp +++ b/parameters.cpp @@ -217,7 +217,7 @@ bool Parameters::addParameters(){ // Output variable parameters // NOTE Do not remove the : before the list of variable names as this is parsed by tools/check_vlasiator_cfg.sh - Readparameters::addComposing("variables.output", "List of data reduction operators (DROs) to add to the grid file output. Each variable to be added has to be on a new line output = XXX. Available (20190320): B BackgroundB PerturbedB E Rhom Rhoq populations_Rho V populations_V populations_moments_Backstream populations_moments_NonBackstream populations_EffectiveSparsityThreshold populations_RhoLossAdjust LBweight MaxVdt MaxRdt populations_MaxVdt populations_MaxRdt MaxFieldsdt MPIrank FsGridRank FsGridBoundaryType BoundaryType BoundaryLayer populations_Blocks fSaved populations_accSubcycles VolE HallE GradPeE VolB BackgroundVolB PerturbedVolB Pressure populations_PTensor derivs BVOLderivs GridCoordinates Potential BackgroundVolE ChargeDensity PotentialError MeshData"); + Readparameters::addComposing("variables.output", "List of data reduction operators (DROs) to add to the grid file output. Each variable to be added has to be on a new line output = XXX. Available (20190320): B BackgroundB PerturbedB E Rhom Rhoq populations_Rho V populations_V populations_moments_Backstream populations_moments_NonBackstream populations_EffectiveSparsityThreshold populations_RhoLossAdjust LBweight MaxVdt MaxRdt populations_MaxVdt populations_MaxRdt MaxFieldsdt MPIrank FsGridRank FsGridBoundaryType BoundaryType BoundaryLayer populations_Blocks fSaved populations_accSubcycles VolE HallE GradPeE VolB BackgroundVolB PerturbedVolB Pressure populations_PTensor populations_EnergyDensity derivs BVOLderivs GridCoordinates Potential BackgroundVolE ChargeDensity PotentialError MeshData"); // NOTE Do not remove the : before the list of variable names as this is parsed by tools/check_vlasiator_cfg.sh Readparameters::addComposing("variables.diagnostic", "List of data reduction operators (DROs) to add to the diagnostic runtime output. Each variable to be added has to be on a new line diagnostic = XXX. Available (20190320): FluxB FluxE populations_Blocks Rhom populations_RhoLossAdjust LBweight populations_MaxVdt MaxVdt populations_MaxRdt MaxRdt MaxFieldsdt populations_MaxDistributionFunction populations_MinDistributionFunction"); diff --git a/particle_species.h b/particle_species.h index c194eaa91..e72499c05 100644 --- a/particle_species.h +++ b/particle_species.h @@ -59,6 +59,11 @@ namespace species { Real backstreamRadius; /*!< Radius of sphere to split the distribution into backstreaming and non-backstreaming. 0 (default in cfg) disables the DRO. */ std::array backstreamV; /*!< Centre of sphere to split the distribution into backstreaming and non-backstreaming. 0 (default in cfg) disables the DRO. */ + + Real EnergyDensityLimit1; /*!< Lower bound for second Energy density bin in units of solar wind ram energy. Default 5. */ + Real EnergyDensityLimit2; /*!< Lower bound forthird Energy density bin in units of solar wind ram energy. Default 10. */ + Real SolarWindEnergy; /*!< Solar wind ram energy, used for calculating energy density bins. Default value of 0 attempts to use SolarWindSpeed instead. */ + Real SolarWindSpeed; /*!< Solar wind speed, used for calculating energy density bins if solar wind ram energy wasn't given. Default 0. */ Species(); Species(const Species& other); From 45a1fe1cef9ee7986eba7a95ee0bfff9dcd540c2 Mon Sep 17 00:00:00 2001 From: Markus Battarbee Date: Fri, 10 May 2019 15:08:05 +0300 Subject: [PATCH 400/602] Fixed parameters for Energy Density, removed complaint from ionosphere --- datareduction/datareductionoperator.cpp | 11 ++++++++--- object_wrapper.cpp | 2 +- sysboundary/ionosphere.cpp | 4 ++-- 3 files changed, 11 insertions(+), 6 deletions(-) diff --git a/datareduction/datareductionoperator.cpp b/datareduction/datareductionoperator.cpp index 990d12534..e1a808d7e 100644 --- a/datareduction/datareductionoperator.cpp +++ b/datareduction/datareductionoperator.cpp @@ -1495,6 +1495,11 @@ namespace DRO { EDensity[1] += thread_E1_sum; EDensity[2] += thread_E2_sum; } + + // Store energy density in units eV/cm^3 instead of Joules per m^3 + EDensity[0] *= 1.0e-6/physicalconstants::CHARGE; + EDensity[1] *= 1.0e-6/physicalconstants::CHARGE; + EDensity[2] *= 1.0e-6/physicalconstants::CHARGE; } const char* ptr = reinterpret_cast(&EDensity); for (uint i = 0; i < 3*sizeof(Real); ++i) buffer[i] = ptr[i]; @@ -1512,9 +1517,9 @@ namespace DRO { } bool VariableEnergyDensity::writeParameters(vlsv::Writer& vlsvWriter) { - if( vlsvWriter.writeParameter("EnergyDensity0", &solarwindenergy) == false ) { return false; } - if( vlsvWriter.writeParameter("EnergyDensity1", &E1limit) == false ) { return false; } - if( vlsvWriter.writeParameter("EnergyDensity2", &E2limit) == false ) { return false; } + if( vlsvWriter.writeParameter("EnergyDensityESW", &solarwindenergy) == false ) { return false; } + if( vlsvWriter.writeParameter("EnergyDensityELimit1", &E1limit) == false ) { return false; } + if( vlsvWriter.writeParameter("EnergyDensityELimit2", &E2limit) == false ) { return false; } return true; } diff --git a/object_wrapper.cpp b/object_wrapper.cpp index df73a3cbb..38513d73b 100644 --- a/object_wrapper.cpp +++ b/object_wrapper.cpp @@ -169,7 +169,7 @@ bool ObjectWrapper::getParameters() { const Real EPSILON = 1.e-25; if (species.SolarWindEnergy < EPSILON) { - species.SolarWindEnergy = 0.5 * species.mass * species.SolarWindEnergy * species.SolarWindEnergy; + species.SolarWindEnergy = 0.5 * species.mass * species.SolarWindSpeed * species.SolarWindSpeed; } } diff --git a/sysboundary/ionosphere.cpp b/sysboundary/ionosphere.cpp index 337ab51fa..5647a920f 100644 --- a/sysboundary/ionosphere.cpp +++ b/sysboundary/ionosphere.cpp @@ -550,8 +550,8 @@ namespace SBC { ) { std::vector< std::array > closestCells = getAllClosestNonsysboundaryCells(technicalGrid, i,j,k); if (closestCells.size() == 1 && closestCells[0][0] == std::numeric_limits::min() ) { - std::cerr << __FILE__ << ":" << __LINE__ << ":" << "No closest cells found!" << std::endl; - // mismatch on fsgrid and mpigrid? + //mismatch on fsgrid and mpigrid? + //std::cerr << __FILE__ << ":" << __LINE__ << ":" << "No closest cells found!" << std::endl; //abort(); return 0; } From 5da11c153d5e7a0713b77c1e0660fcb9ba8fe1cd Mon Sep 17 00:00:00 2001 From: Markus Battarbee Date: Fri, 10 May 2019 16:23:03 +0300 Subject: [PATCH 401/602] Fixed output and threadsafe --- datareduction/datareductionoperator.cpp | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/datareduction/datareductionoperator.cpp b/datareduction/datareductionoperator.cpp index e1a808d7e..7b1b665e0 100644 --- a/datareduction/datareductionoperator.cpp +++ b/datareduction/datareductionoperator.cpp @@ -1496,11 +1496,12 @@ namespace DRO { EDensity[2] += thread_E2_sum; } - // Store energy density in units eV/cm^3 instead of Joules per m^3 - EDensity[0] *= 1.0e-6/physicalconstants::CHARGE; - EDensity[1] *= 1.0e-6/physicalconstants::CHARGE; - EDensity[2] *= 1.0e-6/physicalconstants::CHARGE; } + // Store energy density in units eV/cm^3 instead of Joules per m^3 + EDensity[0] *= (1.0e-6)/physicalconstants::CHARGE; + EDensity[1] *= (1.0e-6)/physicalconstants::CHARGE; + EDensity[2] *= (1.0e-6)/physicalconstants::CHARGE; + const char* ptr = reinterpret_cast(&EDensity); for (uint i = 0; i < 3*sizeof(Real); ++i) buffer[i] = ptr[i]; return true; @@ -1517,9 +1518,13 @@ namespace DRO { } bool VariableEnergyDensity::writeParameters(vlsv::Writer& vlsvWriter) { - if( vlsvWriter.writeParameter("EnergyDensityESW", &solarwindenergy) == false ) { return false; } - if( vlsvWriter.writeParameter("EnergyDensityELimit1", &E1limit) == false ) { return false; } - if( vlsvWriter.writeParameter("EnergyDensityELimit2", &E2limit) == false ) { return false; } + // Output energies in in eV + Real swe = solarwindenergy/physicalconstants::CHARGE; + Real e1l = E1limit/physicalconstants::CHARGE; + Real e2l = E2limit/physicalconstants::CHARGE; + if( vlsvWriter.writeParameter("EnergyDensityESW", &swe) == false ) { return false; } + if( vlsvWriter.writeParameter("EnergyDensityELimit1", &e1l) == false ) { return false; } + if( vlsvWriter.writeParameter("EnergyDensityELimit2", &e2l) == false ) { return false; } return true; } From c458e3737b83960c5f696a9ff560a59741db3d84 Mon Sep 17 00:00:00 2001 From: Urs Ganse Date: Mon, 13 May 2019 16:11:37 +0300 Subject: [PATCH 402/602] Remove data reducer returning false error message. This is no longer an error, since fsgrid reducers return false there. --- iowrite.cpp | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/iowrite.cpp b/iowrite.cpp index b726fc873..b8dd249e7 100644 --- a/iowrite.cpp +++ b/iowrite.cpp @@ -366,8 +366,7 @@ bool writeDataReducer(const dccrg::Dccrg& //Reduce data ( return false if the operation fails ) if (dataReducer.reduceData(mpiGrid[cells[cell]],dataReducerIndex,varBuffer + cell*vectorSize*dataSize) == false){ success = false; - logFile << "(MAIN) writeGrid: ERROR datareductionoperator '" << dataReducer.getName(dataReducerIndex) << - "' returned false!" << endl << writeVerbose; + // Note that this is not an error (anymore), since fsgrid reducers will return false here. } } if( success ) { From 7489ac9c9938b9bc1b0b8de09cc7e1636f10eadb Mon Sep 17 00:00:00 2001 From: Markus Battarbee Date: Tue, 14 May 2019 10:59:53 +0300 Subject: [PATCH 403/602] Added (to python test) vector potential model for solar wind IMF scaling in at the inflow boundary --- doc/vectordipole/fieldmodels.py | 126 ++++++++++++++++++++++++++++++++ 1 file changed, 126 insertions(+) diff --git a/doc/vectordipole/fieldmodels.py b/doc/vectordipole/fieldmodels.py index e58a3fafb..46fb9bebd 100644 --- a/doc/vectordipole/fieldmodels.py +++ b/doc/vectordipole/fieldmodels.py @@ -527,3 +527,129 @@ def getX(self, x,y,z,derivative,fComponent,dComponent): return 0 # dummy, but prevents gcc from yelling + + + + + + + + + + + +class IMFpotential(object): + ''' Class generating a scaling vector potential for the inflow IMF + ''' + # The vector potential for a constant field is defined as + # A = 0.5 * B cross r + + def __init__(self, radius_z=10, radius_f=40, IMF=[0.,0.,-5.e-9]): + self.radius = np.zeros(2)# // X-extents of zero and full field + self.radius[0]=radius_z*RE + self.radius[1]=radius_f*RE + self.IMF = IMF + + def set_IMF(self, radius_z=10, radius_f=40, IMF=[0.,0.,-5.e-9]): + self.radius[0]=radius_z*RE + self.radius[1]=radius_f*RE + self.IMF = IMF + + def get(self, x,y,z,derivative,fComponent,dComponent): + r = np.zeros(3) + r[0]= x + r[1]= y + r[2]= z + + # Simple constant fields outside variation zone + if(xself.radius[1]): + if derivative==0: + return self.IMF[fComponent] + else: + return 0.0 + + A = np.zeros(3) + A[0] = 0.5*(self.IMF[1]*r[2] - self.IMF[2]*r[1]) + A[1] = 0.5*(self.IMF[2]*r[0] - self.IMF[0]*r[2]) + A[2] = 0.5*(self.IMF[0]*r[1] - self.IMF[1]*r[0]) + + B = self.IMF[fComponent] + + # Coordinate within smootherstep function + Sx = (x-self.radius[0])/(self.radius[1]-self.radius[0]) + Sx2 = Sx*Sx + # Smootherstep and its x-derivative + S2 = 6.*Sx2*Sx2*Sx - 15.*Sx2*Sx2 + 10.*Sx2*Sx + dS2dx = (30.*Sx2*Sx2 - 60.*Sx2*Sx + 30.*Sx2)/(self.radius[1]-self.radius[0]) + + # Cartesian derivatives of S2 + dS2cart=np.zeros(3) + dS2cart[0] = dS2dx + dS2cart[1] = 0. + dS2cart[2] = 0. + + + if(derivative == 0): + # The scaled magnetic field is + # B'(r) = del cross A'(r) + # =(NRL)= S2(Sx) del cross A(r) + del S2(Sx) cross A(r) + # = S2(Sx) B(r) + del S2(Sx) cross A(r) + + delS2crossA=np.zeros(3) + delS2crossA[0] = 0.#dS2cart[1]*A[2] - dS2cart[2]*A[1] + delS2crossA[1] = - dS2cart[0]*A[2] #dS2cart[2]*A[0] - dS2cart[0]*A[2] + delS2crossA[2] = dS2cart[0]*A[1] #- dS2cart[1]*A[0] + + return S2*B + delS2crossA[fComponent] + + elif(derivative == 1): + # Regular derivative of B + delB = 0. + + # Calculate del Ax, del Ay, del Az + delAx=np.zeros(3) + delAy=np.zeros(3) + delAz=np.zeros(3) + delAx[0] = 0. + delAx[1] = -0.5*self.IMF[2] + delAx[2] = 0.5*self.IMF[1] + delAy[0] = 0.5*self.IMF[2] + delAy[1] = 0.0 + delAy[2] = -0.5*self.IMF[0] + delAz[0] = -0.5*self.IMF[1] + delAz[1] = 0.5*self.IMF[0] + delAz[2] = 0.0 + + #ddidS2dr = 60.*(2.*Sx2*Sx - 3.*Sx2 + Sx)/(r2*(radius[1]-radius[0])*(radius[1]-radius[0])) + ddxdS2dx = 60.*(2.*Sx2*Sx - 3.*Sx2 + Sx)/((self.radius[1]-self.radius[0])*(self.radius[1]-self.radius[0])) + + # Calculate del (dS2/dx), del (dS2/dy), del (dS2/dz) + deldS2dx=np.zeros(3) + deldS2dy=np.zeros(3) + deldS2dz=np.zeros(3) + deldS2dx[0] = ddxdS2dx + deldS2dx[1] = 0. + deldS2dx[2] = 0. + + # Calculate del(del S2(Sx) cross A)@i=x, del(del S2(Sx) cross A)@i=y, del(del S2(Sx) cross A)@i=z + ddS2crossA=np.zeros([3,3]) + + # derivatives of X-directional field + ddS2crossA[0][0] = deldS2dy[0]*A[2] + dS2cart[1]*delAz[0] - deldS2dz[0]*A[1] - dS2cart[2]*delAy[0] + ddS2crossA[0][1] = deldS2dy[1]*A[2] + dS2cart[1]*delAz[1] - deldS2dz[1]*A[1] - dS2cart[2]*delAy[1] + ddS2crossA[0][2] = deldS2dy[2]*A[2] + dS2cart[1]*delAz[2] - deldS2dz[2]*A[1] - dS2cart[2]*delAy[2] + # derivatives of Y-directional field + ddS2crossA[1][0] = deldS2dz[0]*A[0] + dS2cart[2]*delAx[0] - deldS2dx[0]*A[2] - dS2cart[0]*delAz[0] + ddS2crossA[1][1] = deldS2dz[1]*A[0] + dS2cart[2]*delAx[1] - deldS2dx[1]*A[2] - dS2cart[0]*delAz[1] + ddS2crossA[1][2] = deldS2dz[2]*A[0] + dS2cart[2]*delAx[2] - deldS2dx[2]*A[2] - dS2cart[0]*delAz[2] + # derivatives of Z-directional field + ddS2crossA[2][0] = deldS2dx[0]*A[1] + dS2cart[0]*delAy[0] - deldS2dy[0]*A[0] - dS2cart[1]*delAx[0] + ddS2crossA[2][1] = deldS2dx[1]*A[1] + dS2cart[0]*delAy[1] - deldS2dy[1]*A[0] - dS2cart[1]*delAx[1] + ddS2crossA[2][2] = deldS2dx[2]*A[1] + dS2cart[0]*delAy[2] - deldS2dy[2]*A[0] - dS2cart[1]*delAx[2] + + return S2*delB + dS2cart[dComponent]*B + ddS2crossA[fComponent][dComponent] + + print("ERROR") + return 0#; // dummy, but prevents gcc from yelling From ed4598d55805906ad3e779f4f1c34483214e201f Mon Sep 17 00:00:00 2001 From: tkoskela Date: Tue, 14 May 2019 11:15:29 +0300 Subject: [PATCH 404/602] Removed unused fsgrid variables from spatial_cell and the associated coupling function calls and datareducer calls. Compiles, pushing to test on sisu. List of removed variables: - fieldsolver::derivatives entirely - CellParams::BGBX,BGBY,BGBZ - CellParams::EXHALL_* --- common.h | 146 ++++---- datareduction/datareducer.cpp | 204 +++++------ datareduction/datareductionoperator.cpp | 17 +- fieldsolver/gridGlue.cpp | 428 ++++++++++++------------ grid.cpp | 32 +- iowrite.cpp | 4 +- spatial_cell.cpp | 30 +- spatial_cell.hpp | 2 +- vlasiator.cpp | 98 +++--- 9 files changed, 481 insertions(+), 480 deletions(-) diff --git a/common.h b/common.h index 8ca859c52..ef3798ef9 100644 --- a/common.h +++ b/common.h @@ -133,9 +133,9 @@ namespace CellParams { EX, /*!< Total electric field x-component, averaged over cell edge. Used to propagate BX,BY,BZ.*/ EY, /*!< Total wlectric field y-component, averaged over cell edge. Used to propagate BX,BY,BZ.*/ EZ, /*!< Total electric field z-component, averaged over cell edge. Used to propagate BX,BY,BZ.*/ - BGBX, /*!< Background magnetic field x-component, averaged over cell x-face.*/ - BGBY, /*!< Background magnetic field x-component, averaged over cell x-face.*/ - BGBZ, /*!< Background magnetic field x-component, averaged over cell x-face.*/ + /* BGBX, /\*!< Background magnetic field x-component, averaged over cell x-face.*\/ */ + /* BGBY, /\*!< Background magnetic field x-component, averaged over cell x-face.*\/ */ + /* BGBZ, /\*!< Background magnetic field x-component, averaged over cell x-face.*\/ */ PERBX, /*!< Perturbed Magnetic field x-component, averaged over cell x-face. Propagated by field solver.*/ PERBY, /*!< Perturbed Magnetic field y-component, averaged over cell y-face. Propagated by field solver.*/ PERBZ, /*!< Perturbed Magnetic field z-component, averaged over cell z-face. Propagated by field solver.*/ @@ -164,18 +164,18 @@ namespace CellParams { EXVOL, /*!< Ex averaged over spatial cell.*/ EYVOL, /*!< Ey averaged over spatial cell.*/ EZVOL, /*!< Ez averaged over spatial cell.*/ - EXHALL_000_100, /*!< Hall term x averaged along x on -y/-z edge of spatial cell.*/ - EYHALL_000_010, /*!< Hall term y averaged along y on -x/-z edge of spatial cell.*/ - EZHALL_000_001, /*!< Hall term z averaged along z on -x/-y edge of spatial cell.*/ - EYHALL_100_110, /*!< Hall term y averaged along y on +x/-z edge of spatial cell.*/ - EZHALL_100_101, /*!< Hall term z averaged along z on +x/-y edge of spatial cell.*/ - EXHALL_010_110, /*!< Hall term x averaged along x on +y/-z edge of spatial cell.*/ - EZHALL_010_011, /*!< Hall term z averaged along z on +y/-x edge of spatial cell.*/ - EZHALL_110_111, /*!< Hall term z averaged along z on +x/+y edge of spatial cell.*/ - EXHALL_001_101, /*!< Hall term x averaged along x on -y/+z edge of spatial cell.*/ - EYHALL_001_011, /*!< Hall term y averaged along y on -x/+z edge of spatial cell.*/ - EYHALL_101_111, /*!< Hall term y averaged along y on +x/+z edge of spatial cell.*/ - EXHALL_011_111, /*!< Hall term x averaged along x on +y/+z edge of spatial cell.*/ + /* EXHALL_000_100, /\*!< Hall term x averaged along x on -y/-z edge of spatial cell.*\/ */ + /* EYHALL_000_010, /\*!< Hall term y averaged along y on -x/-z edge of spatial cell.*\/ */ + /* EZHALL_000_001, /\*!< Hall term z averaged along z on -x/-y edge of spatial cell.*\/ */ + /* EYHALL_100_110, /\*!< Hall term y averaged along y on +x/-z edge of spatial cell.*\/ */ + /* EZHALL_100_101, /\*!< Hall term z averaged along z on +x/-y edge of spatial cell.*\/ */ + /* EXHALL_010_110, /\*!< Hall term x averaged along x on +y/-z edge of spatial cell.*\/ */ + /* EZHALL_010_011, /\*!< Hall term z averaged along z on +y/-x edge of spatial cell.*\/ */ + /* EZHALL_110_111, /\*!< Hall term z averaged along z on +x/+y edge of spatial cell.*\/ */ + /* EXHALL_001_101, /\*!< Hall term x averaged along x on -y/+z edge of spatial cell.*\/ */ + /* EYHALL_001_011, /\*!< Hall term y averaged along y on -x/+z edge of spatial cell.*\/ */ + /* EYHALL_101_111, /\*!< Hall term y averaged along y on +x/+z edge of spatial cell.*\/ */ + /* EXHALL_011_111, /\*!< Hall term x averaged along x on +y/+z edge of spatial cell.*\/ */ EXGRADPE, /*!< Electron pressure gradient term x.*/ EYGRADPE, /*!< Electron pressure gradient term y.*/ EZGRADPE, /*!< Electron pressure gradient term z.*/ @@ -232,64 +232,64 @@ namespace CellParams { */ namespace fieldsolver { enum { - drhomdx, /*!< Derivative of volume-averaged mass density to x-direction. */ - drhomdy, /*!< Derivative of volume-averaged mass density to y-direction. */ - drhomdz, /*!< Derivative of volume-averaged mass density to z-direction. */ - drhoqdx, /*!< Derivative of volume-averaged charge density to x-direction. */ - drhoqdy, /*!< Derivative of volume-averaged charge density to y-direction. */ - drhoqdz, /*!< Derivative of volume-averaged charge density to z-direction. */ - dBGBxdy, /*!< Derivative of face-averaged Bx to y-direction. */ - dBGBxdz, /*!< Derivative of face-averaged Bx to z-direction. */ - dBGBydx, /*!< Derivative of face-averaged By to x-direction. */ - dBGBydz, /*!< Derivative of face-averaged By to z-direction. */ - dBGBzdx, /*!< Derivative of face-averaged Bz to x-direction. */ - dBGBzdy, /*!< Derivative of face-averaged Bz to y-direction. */ - dPERBxdy, /*!< Derivative of face-averaged Bx to y-direction. */ - dPERBxdz, /*!< Derivative of face-averaged Bx to z-direction. */ - dPERBydx, /*!< Derivative of face-averaged By to x-direction. */ - dPERBydz, /*!< Derivative of face-averaged By to z-direction. */ - dPERBzdx, /*!< Derivative of face-averaged Bz to x-direction. */ - dPERBzdy, /*!< Derivative of face-averaged Bz to y-direction. */ - // Insert for Hall term - // NOTE 2nd derivatives of BGBn are not needed as curl(dipole) = 0.0 - // will change if BGB is not curl-free -// dBGBxdyy, /*!< Second derivative of face-averaged Bx to yy-direction. */ -// dBGBxdzz, /*!< Second derivative of face-averaged Bx to zz-direction. */ -// dBGBxdyz, /*!< Second derivative of face-averaged Bx to yz-direction. */ -// dBGBydxx, /*!< Second derivative of face-averaged By to xx-direction. */ -// dBGBydzz, /*!< Second derivative of face-averaged By to zz-direction. */ -// dBGBydxz, /*!< Second derivative of face-averaged By to xz-direction. */ -// dBGBzdxx, /*!< Second derivative of face-averaged Bz to xx-direction. */ -// dBGBzdyy, /*!< Second derivative of face-averaged Bz to yy-direction. */ -// dBGBzdxy, /*!< Second derivative of face-averaged Bz to xy-direction. */ - dPERBxdyy, /*!< Second derivative of face-averaged Bx to yy-direction. */ - dPERBxdzz, /*!< Second derivative of face-averaged Bx to zz-direction. */ - dPERBxdyz, /*!< Second derivative of face-averaged Bx to yz-direction. */ - dPERBydxx, /*!< Second derivative of face-averaged By to xx-direction. */ - dPERBydzz, /*!< Second derivative of face-averaged By to zz-direction. */ - dPERBydxz, /*!< Second derivative of face-averaged By to xz-direction. */ - dPERBzdxx, /*!< Second derivative of face-averaged Bz to xx-direction. */ - dPERBzdyy, /*!< Second derivative of face-averaged Bz to yy-direction. */ - dPERBzdxy, /*!< Second derivative of face-averaged Bz to xy-direction. */ - dp11dx, /*!< Derivative of P_11 to x direction. */ - dp11dy, /*!< Derivative of P_11 to x direction. */ - dp11dz, /*!< Derivative of P_11 to x direction. */ - dp22dx, /*!< Derivative of P_22 to y direction. */ - dp22dy, /*!< Derivative of P_22 to y direction. */ - dp22dz, /*!< Derivative of P_22 to y direction. */ - dp33dx, /*!< Derivative of P_33 to z direction. */ - dp33dy, /*!< Derivative of P_33 to z direction. */ - dp33dz, /*!< Derivative of P_33 to z direction. */ - // End of insert for Hall term - dVxdx, /*!< Derivative of volume-averaged Vx to x-direction. */ - dVxdy, /*!< Derivative of volume-averaged Vx to y-direction. */ - dVxdz, /*!< Derivative of volume-averaged Vx to z-direction. */ - dVydx, /*!< Derivative of volume-averaged Vy to x-direction. */ - dVydy, /*!< Derivative of volume-averaged Vy to y-direction. */ - dVydz, /*!< Derivative of volume-averaged Vy to z-direction. */ - dVzdx, /*!< Derivative of volume-averaged Vz to x-direction. */ - dVzdy, /*!< Derivative of volume-averaged Vz to y-direction. */ - dVzdz, /*!< Derivative of volume-averaged Vz to z-direction. */ +/* drhomdx, /\*!< Derivative of volume-averaged mass density to x-direction. *\/ */ +/* drhomdy, /\*!< Derivative of volume-averaged mass density to y-direction. *\/ */ +/* drhomdz, /\*!< Derivative of volume-averaged mass density to z-direction. *\/ */ +/* drhoqdx, /\*!< Derivative of volume-averaged charge density to x-direction. *\/ */ +/* drhoqdy, /\*!< Derivative of volume-averaged charge density to y-direction. *\/ */ +/* drhoqdz, /\*!< Derivative of volume-averaged charge density to z-direction. *\/ */ +/* dBGBxdy, /\*!< Derivative of face-averaged Bx to y-direction. *\/ */ +/* dBGBxdz, /\*!< Derivative of face-averaged Bx to z-direction. *\/ */ +/* dBGBydx, /\*!< Derivative of face-averaged By to x-direction. *\/ */ +/* dBGBydz, /\*!< Derivative of face-averaged By to z-direction. *\/ */ +/* dBGBzdx, /\*!< Derivative of face-averaged Bz to x-direction. *\/ */ +/* dBGBzdy, /\*!< Derivative of face-averaged Bz to y-direction. *\/ */ +/* dPERBxdy, /\*!< Derivative of face-averaged Bx to y-direction. *\/ */ +/* dPERBxdz, /\*!< Derivative of face-averaged Bx to z-direction. *\/ */ +/* dPERBydx, /\*!< Derivative of face-averaged By to x-direction. *\/ */ +/* dPERBydz, /\*!< Derivative of face-averaged By to z-direction. *\/ */ +/* dPERBzdx, /\*!< Derivative of face-averaged Bz to x-direction. *\/ */ +/* dPERBzdy, /\*!< Derivative of face-averaged Bz to y-direction. *\/ */ +/* // Insert for Hall term */ +/* // NOTE 2nd derivatives of BGBn are not needed as curl(dipole) = 0.0 */ +/* // will change if BGB is not curl-free */ +/* // dBGBxdyy, /\*!< Second derivative of face-averaged Bx to yy-direction. *\/ */ +/* // dBGBxdzz, /\*!< Second derivative of face-averaged Bx to zz-direction. *\/ */ +/* // dBGBxdyz, /\*!< Second derivative of face-averaged Bx to yz-direction. *\/ */ +/* // dBGBydxx, /\*!< Second derivative of face-averaged By to xx-direction. *\/ */ +/* // dBGBydzz, /\*!< Second derivative of face-averaged By to zz-direction. *\/ */ +/* // dBGBydxz, /\*!< Second derivative of face-averaged By to xz-direction. *\/ */ +/* // dBGBzdxx, /\*!< Second derivative of face-averaged Bz to xx-direction. *\/ */ +/* // dBGBzdyy, /\*!< Second derivative of face-averaged Bz to yy-direction. *\/ */ +/* // dBGBzdxy, /\*!< Second derivative of face-averaged Bz to xy-direction. *\/ */ +/* dPERBxdyy, /\*!< Second derivative of face-averaged Bx to yy-direction. *\/ */ +/* dPERBxdzz, /\*!< Second derivative of face-averaged Bx to zz-direction. *\/ */ +/* dPERBxdyz, /\*!< Second derivative of face-averaged Bx to yz-direction. *\/ */ +/* dPERBydxx, /\*!< Second derivative of face-averaged By to xx-direction. *\/ */ +/* dPERBydzz, /\*!< Second derivative of face-averaged By to zz-direction. *\/ */ +/* dPERBydxz, /\*!< Second derivative of face-averaged By to xz-direction. *\/ */ +/* dPERBzdxx, /\*!< Second derivative of face-averaged Bz to xx-direction. *\/ */ +/* dPERBzdyy, /\*!< Second derivative of face-averaged Bz to yy-direction. *\/ */ +/* dPERBzdxy, /\*!< Second derivative of face-averaged Bz to xy-direction. *\/ */ +/* dp11dx, /\*!< Derivative of P_11 to x direction. *\/ */ +/* dp11dy, /\*!< Derivative of P_11 to x direction. *\/ */ +/* dp11dz, /\*!< Derivative of P_11 to x direction. *\/ */ +/* dp22dx, /\*!< Derivative of P_22 to y direction. *\/ */ +/* dp22dy, /\*!< Derivative of P_22 to y direction. *\/ */ +/* dp22dz, /\*!< Derivative of P_22 to y direction. *\/ */ +/* dp33dx, /\*!< Derivative of P_33 to z direction. *\/ */ +/* dp33dy, /\*!< Derivative of P_33 to z direction. *\/ */ +/* dp33dz, /\*!< Derivative of P_33 to z direction. *\/ */ +/* // End of insert for Hall term */ +/* dVxdx, /\*!< Derivative of volume-averaged Vx to x-direction. *\/ */ +/* dVxdy, /\*!< Derivative of volume-averaged Vx to y-direction. *\/ */ +/* dVxdz, /\*!< Derivative of volume-averaged Vx to z-direction. *\/ */ +/* dVydx, /\*!< Derivative of volume-averaged Vy to x-direction. *\/ */ +/* dVydy, /\*!< Derivative of volume-averaged Vy to y-direction. *\/ */ +/* dVydz, /\*!< Derivative of volume-averaged Vy to z-direction. *\/ */ +/* dVzdx, /\*!< Derivative of volume-averaged Vz to x-direction. *\/ */ +/* dVzdy, /\*!< Derivative of volume-averaged Vz to y-direction. *\/ */ +/* dVzdz, /\*!< Derivative of volume-averaged Vz to z-direction. *\/ */ N_SPATIAL_CELL_DERIVATIVES }; } diff --git a/datareduction/datareducer.cpp b/datareduction/datareducer.cpp index 7aa7f67ba..488868830 100644 --- a/datareduction/datareducer.cpp +++ b/datareduction/datareducer.cpp @@ -101,10 +101,10 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti )); continue; } - if(*it == "vg_BackgroundB") { // Static (typically dipole) magnetic field part - outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("background_B",CellParams::BGBX,3)); - continue; - } + // if(*it == "vg_BackgroundB") { // Static (typically dipole) magnetic field part + // outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("background_B",CellParams::BGBX,3)); + // continue; + // } if(*it == "fg_PerturbedB" || *it == "PerturbedB") { // Fluctuating magnetic field part outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fg_perturbed_B",[]( FsGrid< std::array, 2>& perBGrid, @@ -539,27 +539,27 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti )); continue; } - if(*it == "HallE") { - // 12 corner components of the hall-effect contribution to the electric field - outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("EXHALL_000_100",CellParams::EXHALL_000_100,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("EXHALL_001_101",CellParams::EXHALL_001_101,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("EXHALL_010_110",CellParams::EXHALL_010_110,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("EXHALL_011_111",CellParams::EXHALL_011_111,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("EYHALL_000_010",CellParams::EYHALL_000_010,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("EYHALL_001_011",CellParams::EYHALL_001_011,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("EYHALL_100_110",CellParams::EYHALL_100_110,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("EYHALL_101_111",CellParams::EYHALL_101_111,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("EZHALL_000_001",CellParams::EZHALL_000_001,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("EZHALL_010_011",CellParams::EZHALL_010_011,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("EZHALL_100_101",CellParams::EZHALL_100_101,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("EZHALL_110_111",CellParams::EZHALL_110_111,1)); - continue; - } - if(*it =="GradPeE") { - // Electron pressure gradient contribution to the generalized ohm's law - outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("EGRADPE",CellParams::EXGRADPE,3)); - continue; - } + // if(*it == "HallE") { + // // 12 corner components of the hall-effect contribution to the electric field + // outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("EXHALL_000_100",CellParams::EXHALL_000_100,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("EXHALL_001_101",CellParams::EXHALL_001_101,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("EXHALL_010_110",CellParams::EXHALL_010_110,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("EXHALL_011_111",CellParams::EXHALL_011_111,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("EYHALL_000_010",CellParams::EYHALL_000_010,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("EYHALL_001_011",CellParams::EYHALL_001_011,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("EYHALL_100_110",CellParams::EYHALL_100_110,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("EYHALL_101_111",CellParams::EYHALL_101_111,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("EZHALL_000_001",CellParams::EZHALL_000_001,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("EZHALL_010_011",CellParams::EZHALL_010_011,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("EZHALL_100_101",CellParams::EZHALL_100_101,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("EZHALL_110_111",CellParams::EZHALL_110_111,1)); + // continue; + // } + // if(*it =="GradPeE") { + // // Electron pressure gradient contribution to the generalized ohm's law + // outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("EGRADPE",CellParams::EXGRADPE,3)); + // continue; + // } if(*it == "VolB" || *it == "vg_VolB") { // Volume-averaged magnetic field outputReducer->addOperator(new DRO::VariableBVol); @@ -651,73 +651,73 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti } continue; } - if(*it == "derivs") { - // Derivatives of all quantities that might be of interest - outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("drhomdx",fieldsolver::drhomdx,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("drhomdy",fieldsolver::drhomdy,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("drhomdz",fieldsolver::drhomdz,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("drhoqdx",fieldsolver::drhoqdx,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("drhoqdy",fieldsolver::drhoqdy,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("drhoqdz",fieldsolver::drhoqdz,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dp11dx",fieldsolver::dp11dx,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dp22dx",fieldsolver::dp22dx,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dp33dx",fieldsolver::dp33dx,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dp11dy",fieldsolver::dp11dy,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dp22dy",fieldsolver::dp22dy,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dp33dy",fieldsolver::dp33dy,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dp11dz",fieldsolver::dp11dz,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dp22dz",fieldsolver::dp22dz,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dp33dz",fieldsolver::dp33dz,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dPERBxdy",fieldsolver::dPERBxdy,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dBGBxdy",fieldsolver::dBGBxdy,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dPERBxdz",fieldsolver::dPERBxdz,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dBGBxdz",fieldsolver::dBGBxdz,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dPERBydx",fieldsolver::dPERBydx,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dBGBydx",fieldsolver::dBGBydx,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dPERBydz",fieldsolver::dPERBydz,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dBGBydz",fieldsolver::dBGBydz,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dPERBzdx",fieldsolver::dPERBzdx,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dBGBzdx",fieldsolver::dBGBzdx,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dPERBzdy",fieldsolver::dPERBzdy,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dBGBzdy",fieldsolver::dBGBzdy,1)); - if(Parameters::ohmHallTerm == 2) { - outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dPERBxdyy",fieldsolver::dPERBxdyy,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dPERBxdzz",fieldsolver::dPERBxdzz,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dPERBydxx",fieldsolver::dPERBydxx,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dPERBydzz",fieldsolver::dPERBydzz,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dPERBzdxx",fieldsolver::dPERBzdxx,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dPERBzdyy",fieldsolver::dPERBzdyy,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dPERBxdyz",fieldsolver::dPERBxdyz,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dPERBydxz",fieldsolver::dPERBydxz,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dPERBzdxy",fieldsolver::dPERBzdxy,1)); - } - outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dVxdx",fieldsolver::dVxdx,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dVxdy",fieldsolver::dVxdy,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dVxdz",fieldsolver::dVxdz,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dVydx",fieldsolver::dVydx,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dVydy",fieldsolver::dVydy,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dVydz",fieldsolver::dVydz,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dVzdx",fieldsolver::dVzdx,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dVzdy",fieldsolver::dVzdy,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dVzdz",fieldsolver::dVzdz,1)); - continue; - } - if(*it == "BVOLderivs") { - // Volume-averaged derivatives - outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dPERBXVOLdy",bvolderivatives::dPERBXVOLdy,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dBGBXVOLdy",bvolderivatives::dBGBXVOLdy,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dPERBXVOLdz",bvolderivatives::dPERBXVOLdz,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dBGBXVOLdz",bvolderivatives::dBGBXVOLdz,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dPERBYVOLdx",bvolderivatives::dPERBYVOLdx,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dBGBYVOLdx",bvolderivatives::dBGBYVOLdx,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dPERBYVOLdz",bvolderivatives::dPERBYVOLdz,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dBGBYVOLdz",bvolderivatives::dBGBYVOLdz,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dPERBZVOLdx",bvolderivatives::dPERBZVOLdx,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dBGBZVOLdx",bvolderivatives::dBGBZVOLdx,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dPERBZVOLdy",bvolderivatives::dPERBZVOLdy,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dBGBZVOLdy",bvolderivatives::dBGBZVOLdy,1)); - continue; - } + // if(*it == "derivs") { + // // Derivatives of all quantities that might be of interest + // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("drhomdx",fieldsolver::drhomdx,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("drhomdy",fieldsolver::drhomdy,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("drhomdz",fieldsolver::drhomdz,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("drhoqdx",fieldsolver::drhoqdx,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("drhoqdy",fieldsolver::drhoqdy,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("drhoqdz",fieldsolver::drhoqdz,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dp11dx",fieldsolver::dp11dx,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dp22dx",fieldsolver::dp22dx,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dp33dx",fieldsolver::dp33dx,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dp11dy",fieldsolver::dp11dy,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dp22dy",fieldsolver::dp22dy,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dp33dy",fieldsolver::dp33dy,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dp11dz",fieldsolver::dp11dz,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dp22dz",fieldsolver::dp22dz,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dp33dz",fieldsolver::dp33dz,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dPERBxdy",fieldsolver::dPERBxdy,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dBGBxdy",fieldsolver::dBGBxdy,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dPERBxdz",fieldsolver::dPERBxdz,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dBGBxdz",fieldsolver::dBGBxdz,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dPERBydx",fieldsolver::dPERBydx,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dBGBydx",fieldsolver::dBGBydx,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dPERBydz",fieldsolver::dPERBydz,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dBGBydz",fieldsolver::dBGBydz,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dPERBzdx",fieldsolver::dPERBzdx,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dBGBzdx",fieldsolver::dBGBzdx,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dPERBzdy",fieldsolver::dPERBzdy,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dBGBzdy",fieldsolver::dBGBzdy,1)); + // if(Parameters::ohmHallTerm == 2) { + // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dPERBxdyy",fieldsolver::dPERBxdyy,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dPERBxdzz",fieldsolver::dPERBxdzz,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dPERBydxx",fieldsolver::dPERBydxx,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dPERBydzz",fieldsolver::dPERBydzz,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dPERBzdxx",fieldsolver::dPERBzdxx,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dPERBzdyy",fieldsolver::dPERBzdyy,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dPERBxdyz",fieldsolver::dPERBxdyz,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dPERBydxz",fieldsolver::dPERBydxz,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dPERBzdxy",fieldsolver::dPERBzdxy,1)); + // } + // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dVxdx",fieldsolver::dVxdx,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dVxdy",fieldsolver::dVxdy,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dVxdz",fieldsolver::dVxdz,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dVydx",fieldsolver::dVydx,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dVydy",fieldsolver::dVydy,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dVydz",fieldsolver::dVydz,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dVzdx",fieldsolver::dVzdx,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dVzdy",fieldsolver::dVzdy,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dVzdz",fieldsolver::dVzdz,1)); + // continue; + // } + // if(*it == "BVOLderivs") { + // // Volume-averaged derivatives + // outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dPERBXVOLdy",bvolderivatives::dPERBXVOLdy,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dBGBXVOLdy",bvolderivatives::dBGBXVOLdy,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dPERBXVOLdz",bvolderivatives::dPERBXVOLdz,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dBGBXVOLdz",bvolderivatives::dBGBXVOLdz,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dPERBYVOLdx",bvolderivatives::dPERBYVOLdx,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dBGBYVOLdx",bvolderivatives::dBGBYVOLdx,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dPERBYVOLdz",bvolderivatives::dPERBYVOLdz,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dBGBYVOLdz",bvolderivatives::dBGBYVOLdz,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dPERBZVOLdx",bvolderivatives::dPERBZVOLdx,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dBGBZVOLdx",bvolderivatives::dBGBZVOLdx,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dPERBZVOLdy",bvolderivatives::dPERBZVOLdy,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dBGBZVOLdy",bvolderivatives::dBGBZVOLdy,1)); + // continue; + // } if(*it == "GridCoordinates") { // Spatial coordinates for each cell outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("X",CellParams::XCRD,1)); @@ -766,16 +766,16 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti for (it = P::diagnosticVariableList.begin(); it != P::diagnosticVariableList.end(); it++) { - if(*it == "FluxB") { - // Overall magnetic flux through the simulation plane - diagnosticReducer->addOperator(new DRO::DiagnosticFluxB); - continue; - } - if(*it == "FluxE") { - // Overall electric flux through the simulation plane - diagnosticReducer->addOperator(new DRO::DiagnosticFluxE); - continue; - } + // if(*it == "FluxB") { + // // Overall magnetic flux through the simulation plane + // diagnosticReducer->addOperator(new DRO::DiagnosticFluxB); + // continue; + // } + // if(*it == "FluxE") { + // // Overall electric flux through the simulation plane + // diagnosticReducer->addOperator(new DRO::DiagnosticFluxE); + // continue; + // } if (*it == "populations_Blocks") { // Per-population total block counts for(unsigned int i =0; i < getObjectWrapper().particleSpecies.size(); i++) { diff --git a/datareduction/datareductionoperator.cpp b/datareduction/datareductionoperator.cpp index ca5710877..074ec17e4 100644 --- a/datareduction/datareductionoperator.cpp +++ b/datareduction/datareductionoperator.cpp @@ -160,15 +160,14 @@ namespace DRO { return true; } - DataReductionOperatorDerivatives::DataReductionOperatorDerivatives(const std::string& name,const unsigned int parameterIndex,const unsigned int vectorSize): - DataReductionOperatorCellParams(name,parameterIndex,vectorSize) { - - } - //a version with derivatives, this is the only function that is different - bool DataReductionOperatorDerivatives::setSpatialCell(const SpatialCell* cell) { - data = &(cell->derivatives[_parameterIndex]); - return true; - } + // DataReductionOperatorDerivatives::DataReductionOperatorDerivatives(const std::string& name,const unsigned int parameterIndex,const unsigned int vectorSize): + // DataReductionOperatorCellParams(name,parameterIndex,vectorSize) { + // } + // //a version with derivatives, this is the only function that is different + // bool DataReductionOperatorDerivatives::setSpatialCell(const SpatialCell* cell) { + // data = &(cell->derivatives[_parameterIndex]); + // return true; + // } DataReductionOperatorBVOLDerivatives::DataReductionOperatorBVOLDerivatives(const std::string& name,const unsigned int parameterIndex,const unsigned int vectorSize): diff --git a/fieldsolver/gridGlue.cpp b/fieldsolver/gridGlue.cpp index 41b91a78a..4c59f022a 100644 --- a/fieldsolver/gridGlue.cpp +++ b/fieldsolver/gridGlue.cpp @@ -370,247 +370,247 @@ void getFieldsFromFsGrid( } -void getBgFieldsAndDerivativesFromFsGrid( - FsGrid< std::array, 2>& BgBGrid, - FsGrid< fsgrids::technical, 2>& technicalGrid, - dccrg::Dccrg& mpiGrid, - const std::vector& cells -) { - // Setup transfer buffers - cint nCellsOnMaxRefLvl = getNumberOfCellsOnMaxRefLvl(mpiGrid, cells); - std::vector< std::array > transferBufferBGB(nCellsOnMaxRefLvl); - std::vector< std::array*> transferBufferPointerBGB; - std::vector< fsgrids::technical > transferBufferTechnical(nCellsOnMaxRefLvl); - std::vector< fsgrids::technical*> transferBufferPointerTechnical; +// void getBgFieldsAndDerivativesFromFsGrid( +// FsGrid< std::array, 2>& BgBGrid, +// FsGrid< fsgrids::technical, 2>& technicalGrid, +// dccrg::Dccrg& mpiGrid, +// const std::vector& cells +// ) { +// // Setup transfer buffers +// cint nCellsOnMaxRefLvl = getNumberOfCellsOnMaxRefLvl(mpiGrid, cells); +// std::vector< std::array > transferBufferBGB(nCellsOnMaxRefLvl); +// std::vector< std::array*> transferBufferPointerBGB; +// std::vector< fsgrids::technical > transferBufferTechnical(nCellsOnMaxRefLvl); +// std::vector< fsgrids::technical*> transferBufferPointerTechnical; - // Setup transfer pointers - BgBGrid.setupForTransferOut(nCellsOnMaxRefLvl); - technicalGrid.setupForTransferOut(nCellsOnMaxRefLvl); - int k = 0; - for(auto dccrgId : cells) { - const auto fsgridIds = mapDccrgIdToFsGridGlobalID(mpiGrid, dccrgId); - // Store a pointer to the first fsgrid cell that maps to each dccrg Id - transferBufferPointerBGB.push_back(&transferBufferBGB[k]); - transferBufferPointerTechnical.push_back(&transferBufferTechnical[k]); - for (auto fsgridId : fsgridIds) { - std::array* thisCellData = &transferBufferBGB[k]; - BgBGrid.transferDataOut(fsgridId, thisCellData); - fsgrids::technical* thisCellDataTechnical = &transferBufferTechnical[k]; - technicalGrid.transferDataOut(fsgridId, thisCellDataTechnical); - k++; - } - } - // Do the transfer - BgBGrid.finishTransfersOut(); - technicalGrid.finishTransfersOut(); +// // Setup transfer pointers +// BgBGrid.setupForTransferOut(nCellsOnMaxRefLvl); +// technicalGrid.setupForTransferOut(nCellsOnMaxRefLvl); +// int k = 0; +// for(auto dccrgId : cells) { +// const auto fsgridIds = mapDccrgIdToFsGridGlobalID(mpiGrid, dccrgId); +// // Store a pointer to the first fsgrid cell that maps to each dccrg Id +// transferBufferPointerBGB.push_back(&transferBufferBGB[k]); +// transferBufferPointerTechnical.push_back(&transferBufferTechnical[k]); +// for (auto fsgridId : fsgridIds) { +// std::array* thisCellData = &transferBufferBGB[k]; +// BgBGrid.transferDataOut(fsgridId, thisCellData); +// fsgrids::technical* thisCellDataTechnical = &transferBufferTechnical[k]; +// technicalGrid.transferDataOut(fsgridId, thisCellDataTechnical); +// k++; +// } +// } +// // Do the transfer +// BgBGrid.finishTransfersOut(); +// technicalGrid.finishTransfersOut(); - // Build lists of index pairs to dccrg and fsgrid - std::vector> iCellParams; - iCellParams.reserve(6); - iCellParams.push_back(std::make_pair(CellParams::BGBX, fsgrids::bgbfield::BGBX)); - iCellParams.push_back(std::make_pair(CellParams::BGBY, fsgrids::bgbfield::BGBY)); - iCellParams.push_back(std::make_pair(CellParams::BGBZ, fsgrids::bgbfield::BGBZ)); - iCellParams.push_back(std::make_pair(CellParams::BGBXVOL, fsgrids::bgbfield::BGBXVOL)); - iCellParams.push_back(std::make_pair(CellParams::BGBYVOL, fsgrids::bgbfield::BGBYVOL)); - iCellParams.push_back(std::make_pair(CellParams::BGBZVOL, fsgrids::bgbfield::BGBZVOL)); - std::vector> iDerivatives; - iDerivatives.reserve(6); - iDerivatives.push_back(std::make_pair(fieldsolver::dBGBxdy, fsgrids::bgbfield::dBGBxdy)); - iDerivatives.push_back(std::make_pair(fieldsolver::dBGBxdz, fsgrids::bgbfield::dBGBxdz)); - iDerivatives.push_back(std::make_pair(fieldsolver::dBGBydx, fsgrids::bgbfield::dBGBydx)); - iDerivatives.push_back(std::make_pair(fieldsolver::dBGBydz, fsgrids::bgbfield::dBGBydz)); - iDerivatives.push_back(std::make_pair(fieldsolver::dBGBzdx, fsgrids::bgbfield::dBGBzdx)); - iDerivatives.push_back(std::make_pair(fieldsolver::dBGBzdy, fsgrids::bgbfield::dBGBzdy)); - std::vector> iDerivativesBVOL; - iDerivativesBVOL.reserve(6); - iDerivativesBVOL.push_back(std::make_pair(bvolderivatives::dBGBXVOLdy, fsgrids::bgbfield::dBGBXVOLdy)); - iDerivativesBVOL.push_back(std::make_pair(bvolderivatives::dBGBXVOLdz, fsgrids::bgbfield::dBGBXVOLdz)); - iDerivativesBVOL.push_back(std::make_pair(bvolderivatives::dBGBYVOLdx, fsgrids::bgbfield::dBGBYVOLdx)); - iDerivativesBVOL.push_back(std::make_pair(bvolderivatives::dBGBYVOLdz, fsgrids::bgbfield::dBGBYVOLdz)); - iDerivativesBVOL.push_back(std::make_pair(bvolderivatives::dBGBZVOLdx, fsgrids::bgbfield::dBGBZVOLdx)); - iDerivativesBVOL.push_back(std::make_pair(bvolderivatives::dBGBZVOLdy, fsgrids::bgbfield::dBGBZVOLdy)); +// // Build lists of index pairs to dccrg and fsgrid +// std::vector> iCellParams; +// iCellParams.reserve(6); +// iCellParams.push_back(std::make_pair(CellParams::BGBX, fsgrids::bgbfield::BGBX)); +// iCellParams.push_back(std::make_pair(CellParams::BGBY, fsgrids::bgbfield::BGBY)); +// iCellParams.push_back(std::make_pair(CellParams::BGBZ, fsgrids::bgbfield::BGBZ)); +// iCellParams.push_back(std::make_pair(CellParams::BGBXVOL, fsgrids::bgbfield::BGBXVOL)); +// iCellParams.push_back(std::make_pair(CellParams::BGBYVOL, fsgrids::bgbfield::BGBYVOL)); +// iCellParams.push_back(std::make_pair(CellParams::BGBZVOL, fsgrids::bgbfield::BGBZVOL)); +// std::vector> iDerivatives; +// iDerivatives.reserve(6); +// iDerivatives.push_back(std::make_pair(fieldsolver::dBGBxdy, fsgrids::bgbfield::dBGBxdy)); +// iDerivatives.push_back(std::make_pair(fieldsolver::dBGBxdz, fsgrids::bgbfield::dBGBxdz)); +// iDerivatives.push_back(std::make_pair(fieldsolver::dBGBydx, fsgrids::bgbfield::dBGBydx)); +// iDerivatives.push_back(std::make_pair(fieldsolver::dBGBydz, fsgrids::bgbfield::dBGBydz)); +// iDerivatives.push_back(std::make_pair(fieldsolver::dBGBzdx, fsgrids::bgbfield::dBGBzdx)); +// iDerivatives.push_back(std::make_pair(fieldsolver::dBGBzdy, fsgrids::bgbfield::dBGBzdy)); +// std::vector> iDerivativesBVOL; +// iDerivativesBVOL.reserve(6); +// iDerivativesBVOL.push_back(std::make_pair(bvolderivatives::dBGBXVOLdy, fsgrids::bgbfield::dBGBXVOLdy)); +// iDerivativesBVOL.push_back(std::make_pair(bvolderivatives::dBGBXVOLdz, fsgrids::bgbfield::dBGBXVOLdz)); +// iDerivativesBVOL.push_back(std::make_pair(bvolderivatives::dBGBYVOLdx, fsgrids::bgbfield::dBGBYVOLdx)); +// iDerivativesBVOL.push_back(std::make_pair(bvolderivatives::dBGBYVOLdz, fsgrids::bgbfield::dBGBYVOLdz)); +// iDerivativesBVOL.push_back(std::make_pair(bvolderivatives::dBGBZVOLdx, fsgrids::bgbfield::dBGBZVOLdx)); +// iDerivativesBVOL.push_back(std::make_pair(bvolderivatives::dBGBZVOLdy, fsgrids::bgbfield::dBGBZVOLdy)); - // Distribute data from the transfer buffer back into the appropriate mpiGrid places - // Disregard DO_NOT_COMPUTE cells - #pragma omp parallel for - for(uint i = 0; i < cells.size(); ++i) { +// // Distribute data from the transfer buffer back into the appropriate mpiGrid places +// // Disregard DO_NOT_COMPUTE cells +// #pragma omp parallel for +// for(uint i = 0; i < cells.size(); ++i) { - const CellID dccrgId = cells[i]; - auto cellParams = mpiGrid[dccrgId]->get_cell_parameters(); +// const CellID dccrgId = cells[i]; +// auto cellParams = mpiGrid[dccrgId]->get_cell_parameters(); - // Calculate the number of fsgrid cells we loop through - cint nCells = pow(pow(2,mpiGrid.mapping.get_maximum_refinement_level() - mpiGrid.mapping.get_refinement_level(dccrgId)),3); - // Count the number of fsgrid cells we need to average into the current dccrg cell - int nCellsToSum = 0; +// // Calculate the number of fsgrid cells we loop through +// cint nCells = pow(pow(2,mpiGrid.mapping.get_maximum_refinement_level() - mpiGrid.mapping.get_refinement_level(dccrgId)),3); +// // Count the number of fsgrid cells we need to average into the current dccrg cell +// int nCellsToSum = 0; - // TODO: Could optimize here by adding a separate branch for nCells == 1 with direct assignment of the value - // Could also do the average in a temporary value and only access grid structure once. +// // TODO: Could optimize here by adding a separate branch for nCells == 1 with direct assignment of the value +// // Could also do the average in a temporary value and only access grid structure once. - // Initialize values to 0 - for (auto j : iCellParams) cellParams[j.first] = 0.0; - for (auto j : iDerivatives) mpiGrid[dccrgId]->derivatives[j.first] = 0.0; - for (auto j : iDerivativesBVOL) mpiGrid[dccrgId]->derivativesBVOL[j.first] = 0.0; +// // Initialize values to 0 +// for (auto j : iCellParams) cellParams[j.first] = 0.0; +// for (auto j : iDerivatives) mpiGrid[dccrgId]->derivatives[j.first] = 0.0; +// for (auto j : iDerivativesBVOL) mpiGrid[dccrgId]->derivativesBVOL[j.first] = 0.0; - for(int iCell = 0; iCell < nCells; ++iCell) { - // The fsgrid cells that cover the i'th dccrg cell are pointed at by - // transferBufferPointer[i] ... transferBufferPointer[i] + nCell. - // We want to average over those who are not DO_NOT_COMPUTE to get the value for the dccrg cell - if ((transferBufferPointerTechnical[i] + iCell)->sysBoundaryFlag == sysboundarytype::DO_NOT_COMPUTE) { - continue; - } else { - nCellsToSum++; +// for(int iCell = 0; iCell < nCells; ++iCell) { +// // The fsgrid cells that cover the i'th dccrg cell are pointed at by +// // transferBufferPointer[i] ... transferBufferPointer[i] + nCell. +// // We want to average over those who are not DO_NOT_COMPUTE to get the value for the dccrg cell +// if ((transferBufferPointerTechnical[i] + iCell)->sysBoundaryFlag == sysboundarytype::DO_NOT_COMPUTE) { +// continue; +// } else { +// nCellsToSum++; - std::array* thisCellData = transferBufferPointerBGB[i] + iCell; +// std::array* thisCellData = transferBufferPointerBGB[i] + iCell; - for (auto j : iCellParams) cellParams[j.first] += thisCellData->at(j.second); - for (auto j : iDerivatives) mpiGrid[dccrgId]->derivatives[j.first] += thisCellData->at(j.second); - for (auto j : iDerivativesBVOL) mpiGrid[dccrgId]->derivativesBVOL[j.first] += thisCellData->at(j.second); - } - } +// for (auto j : iCellParams) cellParams[j.first] += thisCellData->at(j.second); +// for (auto j : iDerivatives) mpiGrid[dccrgId]->derivatives[j.first] += thisCellData->at(j.second); +// for (auto j : iDerivativesBVOL) mpiGrid[dccrgId]->derivativesBVOL[j.first] += thisCellData->at(j.second); +// } +// } - if (nCellsToSum > 0) { - // Divide by the number of cells to get the average - for (auto j : iCellParams) cellParams[j.first] /= nCellsToSum; - for (auto j : iDerivatives) mpiGrid[dccrgId]->derivatives[j.first] /= nCellsToSum; - for (auto j : iDerivativesBVOL) mpiGrid[dccrgId]->derivativesBVOL[j.first] /= nCellsToSum; - } - } -} - - -void getDerivativesFromFsGrid( - FsGrid< std::array, 2>& dperbGrid, - FsGrid< std::array, 2>& dmomentsGrid, - FsGrid< fsgrids::technical, 2>& technicalGrid, - dccrg::Dccrg& mpiGrid, - const std::vector& cells -) { - - // Setup transfer buffers - cint nCellsOnMaxRefLvl = getNumberOfCellsOnMaxRefLvl(mpiGrid, cells); - std::vector< std::array > dperbTransferBuffer(nCellsOnMaxRefLvl); - std::vector< std::array > dmomentsTransferBuffer(nCellsOnMaxRefLvl); +// if (nCellsToSum > 0) { +// // Divide by the number of cells to get the average +// for (auto j : iCellParams) cellParams[j.first] /= nCellsToSum; +// for (auto j : iDerivatives) mpiGrid[dccrgId]->derivatives[j.first] /= nCellsToSum; +// for (auto j : iDerivativesBVOL) mpiGrid[dccrgId]->derivativesBVOL[j.first] /= nCellsToSum; +// } +// } +// } + + +// void getDerivativesFromFsGrid( +// FsGrid< std::array, 2>& dperbGrid, +// FsGrid< std::array, 2>& dmomentsGrid, +// FsGrid< fsgrids::technical, 2>& technicalGrid, +// dccrg::Dccrg& mpiGrid, +// const std::vector& cells +// ) { + +// // Setup transfer buffers +// cint nCellsOnMaxRefLvl = getNumberOfCellsOnMaxRefLvl(mpiGrid, cells); +// std::vector< std::array > dperbTransferBuffer(nCellsOnMaxRefLvl); +// std::vector< std::array > dmomentsTransferBuffer(nCellsOnMaxRefLvl); - std::vector< std::array*> dperbTransferBufferPointer; - std::vector< std::array*> dmomentsTransferBufferPointer; +// std::vector< std::array*> dperbTransferBufferPointer; +// std::vector< std::array*> dmomentsTransferBufferPointer; - std::vector< fsgrids::technical > transferBufferTechnical(nCellsOnMaxRefLvl); - std::vector< fsgrids::technical*> transferBufferPointerTechnical; +// std::vector< fsgrids::technical > transferBufferTechnical(nCellsOnMaxRefLvl); +// std::vector< fsgrids::technical*> transferBufferPointerTechnical; - dperbGrid.setupForTransferOut(nCellsOnMaxRefLvl); - dmomentsGrid.setupForTransferOut(nCellsOnMaxRefLvl); - technicalGrid.setupForTransferOut(nCellsOnMaxRefLvl); +// dperbGrid.setupForTransferOut(nCellsOnMaxRefLvl); +// dmomentsGrid.setupForTransferOut(nCellsOnMaxRefLvl); +// technicalGrid.setupForTransferOut(nCellsOnMaxRefLvl); - int k = 0; - for (auto dccrgId : cells) { +// int k = 0; +// for (auto dccrgId : cells) { - // Assuming same local size in all fsgrids - const auto fsgridIds = mapDccrgIdToFsGridGlobalID(mpiGrid, dccrgId); - // Store a pointer to the first fsgrid cell that maps to each dccrg Id - dperbTransferBufferPointer.push_back(&dperbTransferBuffer[k]); - dmomentsTransferBufferPointer.push_back(&dmomentsTransferBuffer[k]); - transferBufferPointerTechnical.push_back(&transferBufferTechnical[k]); +// // Assuming same local size in all fsgrids +// const auto fsgridIds = mapDccrgIdToFsGridGlobalID(mpiGrid, dccrgId); +// // Store a pointer to the first fsgrid cell that maps to each dccrg Id +// dperbTransferBufferPointer.push_back(&dperbTransferBuffer[k]); +// dmomentsTransferBufferPointer.push_back(&dmomentsTransferBuffer[k]); +// transferBufferPointerTechnical.push_back(&transferBufferTechnical[k]); - for (auto fsgridId : fsgridIds) { +// for (auto fsgridId : fsgridIds) { - std::array* dperbCellData = &dperbTransferBuffer[k]; - dperbGrid.transferDataOut(fsgridId, dperbCellData); - std::array* dmomentsCellData = &dmomentsTransferBuffer[k]; - dmomentsGrid.transferDataOut(fsgridId, dmomentsCellData); - fsgrids::technical* thisCellDataTechnical = &transferBufferTechnical[k]; - technicalGrid.transferDataOut(fsgridId, thisCellDataTechnical); - k++; - } - } +// std::array* dperbCellData = &dperbTransferBuffer[k]; +// dperbGrid.transferDataOut(fsgridId, dperbCellData); +// std::array* dmomentsCellData = &dmomentsTransferBuffer[k]; +// dmomentsGrid.transferDataOut(fsgridId, dmomentsCellData); +// fsgrids::technical* thisCellDataTechnical = &transferBufferTechnical[k]; +// technicalGrid.transferDataOut(fsgridId, thisCellDataTechnical); +// k++; +// } +// } - // Do the transfer - dperbGrid.finishTransfersOut(); - dmomentsGrid.finishTransfersOut(); - technicalGrid.finishTransfersOut(); - - std::vector> iDmoments; - std::vector> iDperb; - iDmoments.reserve(24); - iDmoments.push_back(std::make_pair(fieldsolver::drhomdx, fsgrids::dmoments::drhomdx)); - iDmoments.push_back(std::make_pair(fieldsolver::drhomdy, fsgrids::dmoments::drhomdy)); - iDmoments.push_back(std::make_pair(fieldsolver::drhomdz, fsgrids::dmoments::drhomdz)); - iDmoments.push_back(std::make_pair(fieldsolver::drhoqdx, fsgrids::dmoments::drhoqdx)); - iDmoments.push_back(std::make_pair(fieldsolver::drhoqdy, fsgrids::dmoments::drhoqdy)); - iDmoments.push_back(std::make_pair(fieldsolver::drhoqdz, fsgrids::dmoments::drhoqdz)); - iDmoments.push_back(std::make_pair(fieldsolver::dp11dx , fsgrids::dmoments::dp11dx )); - iDmoments.push_back(std::make_pair(fieldsolver::dp11dy , fsgrids::dmoments::dp11dy )); - iDmoments.push_back(std::make_pair(fieldsolver::dp11dz , fsgrids::dmoments::dp11dz )); - iDmoments.push_back(std::make_pair(fieldsolver::dp22dx , fsgrids::dmoments::dp22dx )); - iDmoments.push_back(std::make_pair(fieldsolver::dp22dy , fsgrids::dmoments::dp22dy )); - iDmoments.push_back(std::make_pair(fieldsolver::dp22dz , fsgrids::dmoments::dp22dz )); - iDmoments.push_back(std::make_pair(fieldsolver::dp33dx , fsgrids::dmoments::dp33dx )); - iDmoments.push_back(std::make_pair(fieldsolver::dp33dy , fsgrids::dmoments::dp33dy )); - iDmoments.push_back(std::make_pair(fieldsolver::dp33dz , fsgrids::dmoments::dp33dz )); - iDmoments.push_back(std::make_pair(fieldsolver::dVxdx , fsgrids::dmoments::dVxdx )); - iDmoments.push_back(std::make_pair(fieldsolver::dVxdy , fsgrids::dmoments::dVxdy )); - iDmoments.push_back(std::make_pair(fieldsolver::dVxdz , fsgrids::dmoments::dVxdz )); - iDmoments.push_back(std::make_pair(fieldsolver::dVydx , fsgrids::dmoments::dVydx )); - iDmoments.push_back(std::make_pair(fieldsolver::dVydy , fsgrids::dmoments::dVydy )); - iDmoments.push_back(std::make_pair(fieldsolver::dVydz , fsgrids::dmoments::dVydz )); - iDmoments.push_back(std::make_pair(fieldsolver::dVzdx , fsgrids::dmoments::dVzdx )); - iDmoments.push_back(std::make_pair(fieldsolver::dVzdy , fsgrids::dmoments::dVzdy )); - iDmoments.push_back(std::make_pair(fieldsolver::dVzdz , fsgrids::dmoments::dVzdz )); - - iDperb.reserve(15); - iDperb.push_back(std::make_pair(fieldsolver::dPERBxdy , fsgrids::dperb::dPERBxdy )); - iDperb.push_back(std::make_pair(fieldsolver::dPERBxdz , fsgrids::dperb::dPERBxdz )); - iDperb.push_back(std::make_pair(fieldsolver::dPERBydx , fsgrids::dperb::dPERBydx )); - iDperb.push_back(std::make_pair(fieldsolver::dPERBydz , fsgrids::dperb::dPERBydz )); - iDperb.push_back(std::make_pair(fieldsolver::dPERBzdx , fsgrids::dperb::dPERBzdx )); - iDperb.push_back(std::make_pair(fieldsolver::dPERBzdy , fsgrids::dperb::dPERBzdy )); - iDperb.push_back(std::make_pair(fieldsolver::dPERBxdyy, fsgrids::dperb::dPERBxdyy)); - iDperb.push_back(std::make_pair(fieldsolver::dPERBxdzz, fsgrids::dperb::dPERBxdzz)); - iDperb.push_back(std::make_pair(fieldsolver::dPERBydxx, fsgrids::dperb::dPERBydxx)); - iDperb.push_back(std::make_pair(fieldsolver::dPERBydzz, fsgrids::dperb::dPERBydzz)); - iDperb.push_back(std::make_pair(fieldsolver::dPERBzdxx, fsgrids::dperb::dPERBzdxx)); - iDperb.push_back(std::make_pair(fieldsolver::dPERBzdyy, fsgrids::dperb::dPERBzdyy)); - iDperb.push_back(std::make_pair(fieldsolver::dPERBxdyz, fsgrids::dperb::dPERBxdyz)); - iDperb.push_back(std::make_pair(fieldsolver::dPERBydxz, fsgrids::dperb::dPERBydxz)); - iDperb.push_back(std::make_pair(fieldsolver::dPERBzdxy, fsgrids::dperb::dPERBzdxy)); +// // Do the transfer +// dperbGrid.finishTransfersOut(); +// dmomentsGrid.finishTransfersOut(); +// technicalGrid.finishTransfersOut(); + +// std::vector> iDmoments; +// std::vector> iDperb; +// iDmoments.reserve(24); +// iDmoments.push_back(std::make_pair(fieldsolver::drhomdx, fsgrids::dmoments::drhomdx)); +// iDmoments.push_back(std::make_pair(fieldsolver::drhomdy, fsgrids::dmoments::drhomdy)); +// iDmoments.push_back(std::make_pair(fieldsolver::drhomdz, fsgrids::dmoments::drhomdz)); +// iDmoments.push_back(std::make_pair(fieldsolver::drhoqdx, fsgrids::dmoments::drhoqdx)); +// iDmoments.push_back(std::make_pair(fieldsolver::drhoqdy, fsgrids::dmoments::drhoqdy)); +// iDmoments.push_back(std::make_pair(fieldsolver::drhoqdz, fsgrids::dmoments::drhoqdz)); +// iDmoments.push_back(std::make_pair(fieldsolver::dp11dx , fsgrids::dmoments::dp11dx )); +// iDmoments.push_back(std::make_pair(fieldsolver::dp11dy , fsgrids::dmoments::dp11dy )); +// iDmoments.push_back(std::make_pair(fieldsolver::dp11dz , fsgrids::dmoments::dp11dz )); +// iDmoments.push_back(std::make_pair(fieldsolver::dp22dx , fsgrids::dmoments::dp22dx )); +// iDmoments.push_back(std::make_pair(fieldsolver::dp22dy , fsgrids::dmoments::dp22dy )); +// iDmoments.push_back(std::make_pair(fieldsolver::dp22dz , fsgrids::dmoments::dp22dz )); +// iDmoments.push_back(std::make_pair(fieldsolver::dp33dx , fsgrids::dmoments::dp33dx )); +// iDmoments.push_back(std::make_pair(fieldsolver::dp33dy , fsgrids::dmoments::dp33dy )); +// iDmoments.push_back(std::make_pair(fieldsolver::dp33dz , fsgrids::dmoments::dp33dz )); +// iDmoments.push_back(std::make_pair(fieldsolver::dVxdx , fsgrids::dmoments::dVxdx )); +// iDmoments.push_back(std::make_pair(fieldsolver::dVxdy , fsgrids::dmoments::dVxdy )); +// iDmoments.push_back(std::make_pair(fieldsolver::dVxdz , fsgrids::dmoments::dVxdz )); +// iDmoments.push_back(std::make_pair(fieldsolver::dVydx , fsgrids::dmoments::dVydx )); +// iDmoments.push_back(std::make_pair(fieldsolver::dVydy , fsgrids::dmoments::dVydy )); +// iDmoments.push_back(std::make_pair(fieldsolver::dVydz , fsgrids::dmoments::dVydz )); +// iDmoments.push_back(std::make_pair(fieldsolver::dVzdx , fsgrids::dmoments::dVzdx )); +// iDmoments.push_back(std::make_pair(fieldsolver::dVzdy , fsgrids::dmoments::dVzdy )); +// iDmoments.push_back(std::make_pair(fieldsolver::dVzdz , fsgrids::dmoments::dVzdz )); + +// iDperb.reserve(15); +// iDperb.push_back(std::make_pair(fieldsolver::dPERBxdy , fsgrids::dperb::dPERBxdy )); +// iDperb.push_back(std::make_pair(fieldsolver::dPERBxdz , fsgrids::dperb::dPERBxdz )); +// iDperb.push_back(std::make_pair(fieldsolver::dPERBydx , fsgrids::dperb::dPERBydx )); +// iDperb.push_back(std::make_pair(fieldsolver::dPERBydz , fsgrids::dperb::dPERBydz )); +// iDperb.push_back(std::make_pair(fieldsolver::dPERBzdx , fsgrids::dperb::dPERBzdx )); +// iDperb.push_back(std::make_pair(fieldsolver::dPERBzdy , fsgrids::dperb::dPERBzdy )); +// iDperb.push_back(std::make_pair(fieldsolver::dPERBxdyy, fsgrids::dperb::dPERBxdyy)); +// iDperb.push_back(std::make_pair(fieldsolver::dPERBxdzz, fsgrids::dperb::dPERBxdzz)); +// iDperb.push_back(std::make_pair(fieldsolver::dPERBydxx, fsgrids::dperb::dPERBydxx)); +// iDperb.push_back(std::make_pair(fieldsolver::dPERBydzz, fsgrids::dperb::dPERBydzz)); +// iDperb.push_back(std::make_pair(fieldsolver::dPERBzdxx, fsgrids::dperb::dPERBzdxx)); +// iDperb.push_back(std::make_pair(fieldsolver::dPERBzdyy, fsgrids::dperb::dPERBzdyy)); +// iDperb.push_back(std::make_pair(fieldsolver::dPERBxdyz, fsgrids::dperb::dPERBxdyz)); +// iDperb.push_back(std::make_pair(fieldsolver::dPERBydxz, fsgrids::dperb::dPERBydxz)); +// iDperb.push_back(std::make_pair(fieldsolver::dPERBzdxy, fsgrids::dperb::dPERBzdxy)); - // Distribute data from the transfer buffers back into the appropriate mpiGrid places - // Disregard DO_NOT_COMPUTE cells - #pragma omp parallel for - for(uint i = 0; i < cells.size(); ++i) { +// // Distribute data from the transfer buffers back into the appropriate mpiGrid places +// // Disregard DO_NOT_COMPUTE cells +// #pragma omp parallel for +// for(uint i = 0; i < cells.size(); ++i) { - const CellID dccrgId = cells[i]; +// const CellID dccrgId = cells[i]; - // Calculate the number of fsgrid cells we loop through - cint nCells = pow(pow(2,mpiGrid.mapping.get_maximum_refinement_level() - mpiGrid.mapping.get_refinement_level(dccrgId)),3); - // Count the number of fsgrid cells we need to average into the current dccrg cell - int nCellsToSum = 0; +// // Calculate the number of fsgrid cells we loop through +// cint nCells = pow(pow(2,mpiGrid.mapping.get_maximum_refinement_level() - mpiGrid.mapping.get_refinement_level(dccrgId)),3); +// // Count the number of fsgrid cells we need to average into the current dccrg cell +// int nCellsToSum = 0; - for (auto j : iDmoments) mpiGrid[dccrgId]->derivatives[j.first] = 0.0; - for (auto j : iDperb ) mpiGrid[dccrgId]->derivatives[j.first] = 0.0; +// for (auto j : iDmoments) mpiGrid[dccrgId]->derivatives[j.first] = 0.0; +// for (auto j : iDperb ) mpiGrid[dccrgId]->derivatives[j.first] = 0.0; - for(int iCell = 0; iCell < nCells; ++iCell) { - // The fsgrid cells that cover the i'th dccrg cell are pointed at by - // transferBufferPointer[i] ... transferBufferPointer[i] + nCell. - // We want to average over those who are not DO_NOT_COMPUTE to get the value for the dccrg cell - if ((transferBufferPointerTechnical[i] + iCell)->sysBoundaryFlag == sysboundarytype::DO_NOT_COMPUTE) { - continue; - } else { - nCellsToSum++; +// for(int iCell = 0; iCell < nCells; ++iCell) { +// // The fsgrid cells that cover the i'th dccrg cell are pointed at by +// // transferBufferPointer[i] ... transferBufferPointer[i] + nCell. +// // We want to average over those who are not DO_NOT_COMPUTE to get the value for the dccrg cell +// if ((transferBufferPointerTechnical[i] + iCell)->sysBoundaryFlag == sysboundarytype::DO_NOT_COMPUTE) { +// continue; +// } else { +// nCellsToSum++; - std::array* dperb = dperbTransferBufferPointer[i] + iCell; - std::array* dmoments = dmomentsTransferBufferPointer[i] + iCell; +// std::array* dperb = dperbTransferBufferPointer[i] + iCell; +// std::array* dmoments = dmomentsTransferBufferPointer[i] + iCell; - for (auto j : iDmoments) mpiGrid[dccrgId]->derivatives[j.first] += dmoments->at(j.second); - for (auto j : iDperb ) mpiGrid[dccrgId]->derivatives[j.first] += dperb ->at(j.second); - } - } +// for (auto j : iDmoments) mpiGrid[dccrgId]->derivatives[j.first] += dmoments->at(j.second); +// for (auto j : iDperb ) mpiGrid[dccrgId]->derivatives[j.first] += dperb ->at(j.second); +// } +// } - if (nCellsToSum > 0) { - for (auto j : iDmoments) mpiGrid[dccrgId]->derivatives[j.first] /= nCellsToSum; - for (auto j : iDperb ) mpiGrid[dccrgId]->derivatives[j.first] /= nCellsToSum; - } - } -} +// if (nCellsToSum > 0) { +// for (auto j : iDmoments) mpiGrid[dccrgId]->derivatives[j.first] /= nCellsToSum; +// for (auto j : iDperb ) mpiGrid[dccrgId]->derivatives[j.first] /= nCellsToSum; +// } +// } +// } /* diff --git a/grid.cpp b/grid.cpp index ef16201c7..f3ac02a8f 100644 --- a/grid.cpp +++ b/grid.cpp @@ -288,24 +288,24 @@ void initializeGrids( phiprof::stop("Init moments"); } - phiprof::start("Initial fsgrid coupling"); - // Couple FSGrids to mpiGrid. Note that the coupling information is shared - // between them. - technicalGrid.setupForGridCoupling(cells.size()); + // phiprof::start("Initial fsgrid coupling"); + // // Couple FSGrids to mpiGrid. Note that the coupling information is shared + // // between them. + // technicalGrid.setupForGridCoupling(cells.size()); - // Each dccrg cell may have to communicate with multiple fsgrid cells, if they are on a lower refinement level. - // Calculate the corresponding fsgrid ids for each dccrg cell and set coupling for each fsgrid id. - for(auto& dccrgId : cells) { - const auto fsgridIds = mapDccrgIdToFsGridGlobalID(mpiGrid, dccrgId); + // // Each dccrg cell may have to communicate with multiple fsgrid cells, if they are on a lower refinement level. + // // Calculate the corresponding fsgrid ids for each dccrg cell and set coupling for each fsgrid id. + // for(auto& dccrgId : cells) { + // const auto fsgridIds = mapDccrgIdToFsGridGlobalID(mpiGrid, dccrgId); - for (auto fsgridId : fsgridIds) { + // for (auto fsgridId : fsgridIds) { - technicalGrid.setGridCoupling(fsgridId, myRank); - } - } + // technicalGrid.setGridCoupling(fsgridId, myRank); + // } + // } - technicalGrid.finishGridCoupling(); - phiprof::stop("Initial fsgrid coupling"); + // technicalGrid.finishGridCoupling(); + // phiprof::stop("Initial fsgrid coupling"); phiprof::start("setProjectBField"); project.setProjectBField(perBGrid, BgBGrid, technicalGrid); @@ -314,8 +314,8 @@ void initializeGrids( phiprof::stop("setProjectBField"); phiprof::start("Finish fsgrid setup"); - getFieldDataFromFsGrid(perBGrid, technicalGrid, mpiGrid, cells, CellParams::PERBX); - getBgFieldsAndDerivativesFromFsGrid(BgBGrid, technicalGrid, mpiGrid, cells); + // getFieldDataFromFsGrid(perBGrid, technicalGrid, mpiGrid, cells, CellParams::PERBX); + // getBgFieldsAndDerivativesFromFsGrid(BgBGrid, technicalGrid, mpiGrid, cells); // WARNING this means moments and dt2 moments are the same here. feedMomentsIntoFsGrid(mpiGrid, cells, momentsGrid,false); diff --git a/iowrite.cpp b/iowrite.cpp index b8dd249e7..9bfd95a8c 100644 --- a/iowrite.cpp +++ b/iowrite.cpp @@ -1270,7 +1270,7 @@ bool writeRestart(dccrg::Dccrg& mpiGrid, phiprof::start("reduceddataIO"); //write out DROs we need for restarts DataReducer restartReducer; - restartReducer.addOperator(new DRO::DataReductionOperatorCellParams("background_B",CellParams::BGBX,3)); + //restartReducer.addOperator(new DRO::DataReductionOperatorCellParams("background_B",CellParams::BGBX,3)); restartReducer.addOperator(new DRO::DataReductionOperatorCellParams("perturbed_B",CellParams::PERBX,3)); restartReducer.addOperator(new DRO::DataReductionOperatorCellParams("moments",CellParams::RHOM,5)); restartReducer.addOperator(new DRO::DataReductionOperatorCellParams("moments_dt2",CellParams::RHOM_DT2,5)); @@ -1284,7 +1284,7 @@ bool writeRestart(dccrg::Dccrg& mpiGrid, restartReducer.addOperator(new DRO::DataReductionOperatorCellParams("max_v_dt",CellParams::MAXVDT,1)); restartReducer.addOperator(new DRO::DataReductionOperatorCellParams("max_r_dt",CellParams::MAXRDT,1)); restartReducer.addOperator(new DRO::DataReductionOperatorCellParams("max_fields_dt",CellParams::MAXFDT,1)); - restartReducer.addOperator(new DRO::DataReductionOperatorDerivatives("derivatives",0,fieldsolver::N_SPATIAL_CELL_DERIVATIVES)); + // restartReducer.addOperator(new DRO::DataReductionOperatorDerivatives("derivatives",0,fieldsolver::N_SPATIAL_CELL_DERIVATIVES)); restartReducer.addOperator(new DRO::DataReductionOperatorBVOLDerivatives("Bvolume_derivatives",0,bvolderivatives::N_BVOL_DERIVATIVES)); restartReducer.addOperator(new DRO::MPIrank); restartReducer.addOperator(new DRO::BoundaryType); diff --git a/spatial_cell.cpp b/spatial_cell.cpp index 12d59952a..49eff572e 100644 --- a/spatial_cell.cpp +++ b/spatial_cell.cpp @@ -48,10 +48,10 @@ namespace spatial_cell { this->parameters[i]=0.0; } - // reset spatial cell derivatives - for (unsigned int i = 0; i < fieldsolver::N_SPATIAL_CELL_DERIVATIVES; i++) { - this->derivatives[i]=0; - } + // // reset spatial cell derivatives + // for (unsigned int i = 0; i < fieldsolver::N_SPATIAL_CELL_DERIVATIVES; i++) { + // this->derivatives[i]=0; + // } // reset BVOL derivatives for (unsigned int i = 0; i < bvolderivatives::N_BVOL_DERIVATIVES; i++) { @@ -87,7 +87,7 @@ namespace spatial_cell { mpiTransferEnabled(other.mpiTransferEnabled), populations(other.populations), parameters(other.parameters), - derivatives(other.derivatives), + // derivatives(other.derivatives), derivativesBVOL(other.derivativesBVOL), null_block_data(std::array {}) { } @@ -735,11 +735,11 @@ namespace spatial_cell { block_lengths.push_back(sizeof(Real)); } - // send spatial cell derivatives - if ((SpatialCell::mpi_transfer_type & Transfer::CELL_DERIVATIVES)!=0){ - displacements.push_back((uint8_t*) &(this->derivatives[0]) - (uint8_t*) this); - block_lengths.push_back(sizeof(Real) * fieldsolver::N_SPATIAL_CELL_DERIVATIVES); - } + // // send spatial cell derivatives + // if ((SpatialCell::mpi_transfer_type & Transfer::CELL_DERIVATIVES)!=0){ + // displacements.push_back((uint8_t*) &(this->derivatives[0]) - (uint8_t*) this); + // block_lengths.push_back(sizeof(Real) * fieldsolver::N_SPATIAL_CELL_DERIVATIVES); + // } // send spatial cell BVOL derivatives if ((SpatialCell::mpi_transfer_type & Transfer::CELL_BVOL_DERIVATIVES)!=0){ @@ -752,11 +752,11 @@ namespace spatial_cell { block_lengths.push_back(sizeof(uint64_t)); } - // send Hall term components - if ((SpatialCell::mpi_transfer_type & Transfer::CELL_HALL_TERM)!=0){ - displacements.push_back((uint8_t*) &(this->parameters[CellParams::EXHALL_000_100]) - (uint8_t*) this); - block_lengths.push_back(sizeof(Real) * 12); - } + // // send Hall term components + // if ((SpatialCell::mpi_transfer_type & Transfer::CELL_HALL_TERM)!=0){ + // displacements.push_back((uint8_t*) &(this->parameters[CellParams::EXHALL_000_100]) - (uint8_t*) this); + // block_lengths.push_back(sizeof(Real) * 12); + // } // send electron pressure gradient term components if ((SpatialCell::mpi_transfer_type & Transfer::CELL_GRADPE_TERM)!=0){ displacements.push_back((uint8_t*) &(this->parameters[CellParams::EXGRADPE]) - (uint8_t*) this); diff --git a/spatial_cell.hpp b/spatial_cell.hpp index 6bc7e00f0..388055601 100644 --- a/spatial_cell.hpp +++ b/spatial_cell.hpp @@ -317,7 +317,7 @@ namespace spatial_cell { // Member variables // //Real derivatives[fieldsolver::N_SPATIAL_CELL_DERIVATIVES]; /**< Derivatives of bulk variables in this spatial cell.*/ - std::array derivatives; /**< Derivatives of bulk variables in this spatial cell.*/ + //std::array derivatives; /**< Derivatives of bulk variables in this spatial cell.*/ //Real derivativesBVOL[bvolderivatives::N_BVOL_DERIVATIVES]; /**< Derivatives of BVOL needed by the acceleration. // * Separate array because it does not need to be communicated.*/ std::array derivativesBVOL; /**< Derivatives of BVOL needed by the acceleration. diff --git a/vlasiator.cpp b/vlasiator.cpp index 4a3c80cec..035df5fd2 100644 --- a/vlasiator.cpp +++ b/vlasiator.cpp @@ -547,11 +547,11 @@ int main(int argn,char* args[]) { if (P::writeInitialState) { phiprof::start("write-initial-state"); phiprof::start("fsgrid-coupling-out"); - getFieldDataFromFsGrid(perBGrid,technicalGrid,mpiGrid,cells,CellParams::PERBX); - getFieldDataFromFsGrid(EGrid,technicalGrid,mpiGrid,cells,CellParams::EX); - getFieldDataFromFsGrid(EHallGrid,technicalGrid,mpiGrid,cells,CellParams::EXHALL_000_100); - getFieldDataFromFsGrid(EGradPeGrid,technicalGrid,mpiGrid,cells,CellParams::EXGRADPE); - getDerivativesFromFsGrid(dPerBGrid, dMomentsGrid, technicalGrid, mpiGrid, cells); + // getFieldDataFromFsGrid(perBGrid,technicalGrid,mpiGrid,cells,CellParams::PERBX); + // getFieldDataFromFsGrid(EGrid,technicalGrid,mpiGrid,cells,CellParams::EX); + // getFieldDataFromFsGrid(EHallGrid,technicalGrid,mpiGrid,cells,CellParams::EXHALL_000_100); + // getFieldDataFromFsGrid(EGradPeGrid,technicalGrid,mpiGrid,cells,CellParams::EXGRADPE); + // getDerivativesFromFsGrid(dPerBGrid, dMomentsGrid, technicalGrid, mpiGrid, cells); phiprof::stop("fsgrid-coupling-out"); if (myRank == MASTER_RANK) @@ -735,14 +735,16 @@ int main(int argn,char* args[]) { it != P::diagnosticVariableList.end(); it++) { if (*it == "FluxB") { - phiprof::start("fsgrid-coupling-out"); - getFieldDataFromFsGrid(perBGrid,technicalGrid,mpiGrid,cells,CellParams::PERBX); - phiprof::stop("fsgrid-coupling-out"); - } + if(myRank == MASTER_RANK) cerr << __FILE__ << " " << __LINE__ << "ERROR: Diagnostic output from FsGrid is no longer supported!" << endl; + // phiprof::start("fsgrid-coupling-out"); + // getFieldDataFromFsGrid(perBGrid,technicalGrid,mpiGrid,cells,CellParams::PERBX); + // phiprof::stop("fsgrid-coupling-out"); + } if (*it == "FluxE") { - phiprof::start("fsgrid-coupling-out"); - getFieldDataFromFsGrid(EGrid,technicalGrid,mpiGrid,cells,CellParams::EX); - phiprof::stop("fsgrid-coupling-out"); + if(myRank == MASTER_RANK) cerr << __FILE__ << " " << __LINE__ << "ERROR: Diagnostic output from FsGrid is no longer supported!" << endl; + // phiprof::start("fsgrid-coupling-out"); + // getFieldDataFromFsGrid(EGrid,technicalGrid,mpiGrid,cells,CellParams::EX); + // phiprof::stop("fsgrid-coupling-out"); } } @@ -758,42 +760,42 @@ int main(int argn,char* args[]) { // write system, loop through write classes for (uint i = 0; i < P::systemWriteTimeInterval.size(); i++) { if (P::systemWriteTimeInterval[i] >= 0.0 && - P::t >= P::systemWrites[i] * P::systemWriteTimeInterval[i] - DT_EPSILON) { - if (extractFsGridFields) { - vector::const_iterator it; - for (it = P::outputVariableList.begin(); - it != P::outputVariableList.end(); - it++) { - if (*it == "B" || - *it == "PerturbedB" - ) { - phiprof::start("fsgrid-coupling-out"); - getFieldDataFromFsGrid(perBGrid,technicalGrid,mpiGrid,cells,CellParams::PERBX); - phiprof::stop("fsgrid-coupling-out"); - } - if (*it == "E") { - phiprof::start("fsgrid-coupling-out"); - getFieldDataFromFsGrid(EGrid,technicalGrid,mpiGrid,cells,CellParams::EX); - phiprof::stop("fsgrid-coupling-out"); - } - if (*it == "HallE") { - phiprof::start("fsgrid-coupling-out"); - getFieldDataFromFsGrid(EHallGrid,technicalGrid,mpiGrid,cells,CellParams::EXHALL_000_100); - phiprof::stop("fsgrid-coupling-out"); - } - if (*it == "GradPeE") { - phiprof::start("fsgrid-coupling-out"); - getFieldDataFromFsGrid(EGradPeGrid,technicalGrid,mpiGrid,cells,CellParams::EXGRADPE); - phiprof::stop("fsgrid-coupling-out"); - } - if (*it == "derivs") { - phiprof::start("fsgrid-coupling-out"); - getDerivativesFromFsGrid(dPerBGrid, dMomentsGrid, technicalGrid, mpiGrid, cells); - phiprof::stop("fsgrid-coupling-out"); - } - } - extractFsGridFields = false; - } + P::t >= P::systemWrites[i] * P::systemWriteTimeInterval[i] - DT_EPSILON) { + // if (extractFsGridFields) { + // vector::const_iterator it; + // for (it = P::outputVariableList.begin(); + // it != P::outputVariableList.end(); + // it++) { + // if (*it == "B" || + // *it == "PerturbedB" + // ) { + // phiprof::start("fsgrid-coupling-out"); + // getFieldDataFromFsGrid(perBGrid,technicalGrid,mpiGrid,cells,CellParams::PERBX); + // phiprof::stop("fsgrid-coupling-out"); + // } + // if (*it == "E") { + // phiprof::start("fsgrid-coupling-out"); + // getFieldDataFromFsGrid(EGrid,technicalGrid,mpiGrid,cells,CellParams::EX); + // phiprof::stop("fsgrid-coupling-out"); + // } + // if (*it == "HallE") { + // phiprof::start("fsgrid-coupling-out"); + // getFieldDataFromFsGrid(EHallGrid,technicalGrid,mpiGrid,cells,CellParams::EXHALL_000_100); + // phiprof::stop("fsgrid-coupling-out"); + // } + // if (*it == "GradPeE") { + // phiprof::start("fsgrid-coupling-out"); + // getFieldDataFromFsGrid(EGradPeGrid,technicalGrid,mpiGrid,cells,CellParams::EXGRADPE); + // phiprof::stop("fsgrid-coupling-out"); + // } + // if (*it == "derivs") { + // phiprof::start("fsgrid-coupling-out"); + // getDerivativesFromFsGrid(dPerBGrid, dMomentsGrid, technicalGrid, mpiGrid, cells); + // phiprof::stop("fsgrid-coupling-out"); + // } + // } + // extractFsGridFields = false; + // } phiprof::start("write-system"); logFile << "(IO): Writing spatial cell and reduced system data to disk, tstep = " << P::tstep << " t = " << P::t << endl << writeVerbose; From c352a7c811d0e3ba93ee2e2ab509cdf5802a8348 Mon Sep 17 00:00:00 2001 From: ykempf Date: Tue, 14 May 2019 11:25:05 +0300 Subject: [PATCH 405/602] Removed Poisson solver, electric sail and antisymmetric from Makefile. --- Makefile | 33 ++++----------------------------- 1 file changed, 4 insertions(+), 29 deletions(-) diff --git a/Makefile b/Makefile index 4f092b8ea..49d618dfc 100644 --- a/Makefile +++ b/Makefile @@ -134,7 +134,6 @@ DEPS_PROJECTS = projects/project.h projects/project.cpp \ projects/Diffusion/Diffusion.h projects/Diffusion/Diffusion.cpp \ projects/Dispersion/Dispersion.h projects/Dispersion/Dispersion.cpp \ projects/Distributions/Distributions.h projects/Distributions/Distributions.cpp \ - projects/ElectricSail/electric_sail.h projects/ElectricSail/electric_sail.cpp \ projects/Firehose/Firehose.h projects/Firehose/Firehose.cpp \ projects/Flowthrough/Flowthrough.h projects/Flowthrough/Flowthrough.cpp \ projects/Fluctuations/Fluctuations.h projects/Fluctuations/Fluctuations.cpp \ @@ -187,10 +186,10 @@ DEPS_VLSVMOVER_AMR = ${DEPS_CELL} vlasovsolver_amr/vlasovmover.cpp vlasovsolver_ OBJS = version.o memoryallocation.o backgroundfield.o quadr.o dipole.o linedipole.o constantfield.o integratefunction.o \ datareducer.o datareductionoperator.o dro_populations.o amr_refinement_criteria.o\ - donotcompute.o ionosphere.o outflow.o setbyuser.o setmaxwellian.o antisymmetric.o\ + donotcompute.o ionosphere.o outflow.o setbyuser.o setmaxwellian.o\ sysboundary.o sysboundarycondition.o project_boundary.o particle_species.o\ project.o projectTriAxisSearch.o read_gaussian_population.o\ - Alfven.o Diffusion.o Dispersion.o Distributions.o electric_sail.o Firehose.o\ + Alfven.o Diffusion.o Dispersion.o Distributions.o Firehose.o\ Flowthrough.o Fluctuations.o Harris.o KHB.o Larmor.o Magnetosphere.o MultiPeak.o\ VelocityBox.o Riemann1.o Shock.o Template.o test_fp.o testAmr.o testHall.o test_trans.o\ IPShock.o object_wrapper.o\ @@ -209,9 +208,6 @@ endif # Add field solver objects OBJS_FSOLVER = ldz_magnetic_field.o ldz_volume.o derivatives.o ldz_electric_field.o ldz_hall.o ldz_gradpe.o -# Add Poisson solver objects -OBJS_POISSON = poisson_solver.o poisson_test.o poisson_solver_jacobi.o poisson_solver_sor.o poisson_solver_cg.o - help: @echo '' @echo 'make c(lean) delete all generated files' @@ -272,9 +268,6 @@ datareductionoperator.o: ${DEPS_COMMON} ${DEPS_CELL} parameters.h datareduction/ dro_populations.o: ${DEPS_COMMON} ${DEPS_CELL} parameters.h datareduction/datareductionoperator.h datareduction/datareductionoperator.cpp datareduction/dro_populations.h datareduction/dro_populations.cpp ${CMP} ${CXXFLAGS} ${FLAGS} ${MATHFLAGS} -c datareduction/dro_populations.cpp ${INC_DCCRG} ${INC_ZOLTAN} ${INC_MPI} ${INC_BOOST} ${INC_EIGEN} ${INC_VLSV} -antisymmetric.o: ${DEPS_SYSBOUND} sysboundary/antisymmetric.h sysboundary/antisymmetric.cpp - ${CMP} ${CXXFLAGS} ${FLAGS} ${MATHFLAGS} -c sysboundary/antisymmetric.cpp ${INC_DCCRG} ${INC_FSGRID} ${INC_ZOLTAN} ${INC_BOOST} ${INC_EIGEN} - donotcompute.o: ${DEPS_SYSBOUND} sysboundary/donotcompute.h sysboundary/donotcompute.cpp ${CMP} ${CXXFLAGS} ${FLAGS} ${MATHFLAGS} -c sysboundary/donotcompute.cpp ${INC_DCCRG} ${INC_FSGRID} ${INC_ZOLTAN} ${INC_BOOST} ${INC_EIGEN} @@ -319,9 +312,6 @@ Dispersion.o: ${DEPS_COMMON} projects/Dispersion/Dispersion.h projects/Dispersio Distributions.o: ${DEPS_COMMON} projects/Distributions/Distributions.h projects/Distributions/Distributions.cpp ${CMP} ${CXXFLAGS} ${FLAGS} ${MATHFLAGS} -c projects/Distributions/Distributions.cpp ${INC_DCCRG} ${INC_ZOLTAN} ${INC_BOOST} ${INC_EIGEN} ${INC_FSGRID} -electric_sail.o: ${DEPS_COMMON} projects/read_gaussian_population.h projects/ElectricSail/electric_sail.h projects/ElectricSail/electric_sail.cpp - ${CMP} ${CXXFLAGS} ${FLAGS} ${MATHFLAGS} -c projects/ElectricSail/electric_sail.cpp ${INC_DCCRG} ${INC_ZOLTAN} ${INC_BOOST} ${INC_EIGEN} ${INC_FSGRID} - Firehose.o: ${DEPS_COMMON} projects/Firehose/Firehose.h projects/Firehose/Firehose.cpp ${CMP} ${CXXFLAGS} ${FLAGS} ${MATHFLAGS} -c projects/Firehose/Firehose.cpp ${INC_DCCRG} ${INC_ZOLTAN} ${INC_BOOST} ${INC_EIGEN} ${INC_FSGRID} @@ -385,21 +375,6 @@ project.o: ${DEPS_COMMON} $(DEPS_PROJECTS) projectTriAxisSearch.o: ${DEPS_COMMON} $(DEPS_PROJECTS) projects/projectTriAxisSearch.h projects/projectTriAxisSearch.cpp ${CMP} ${CXXFLAGS} ${FLAGS} ${MATHFLAGS} -c projects/projectTriAxisSearch.cpp ${INC_DCCRG} ${INC_ZOLTAN} ${INC_BOOST} ${INC_EIGEN} ${INC_FSGRID} -poisson_solver.o: ${DEPS_COMMON} ${DEPS_CELL} poisson_solver/poisson_solver.h poisson_solver/poisson_solver.cpp - $(CMP) $(CXXFLAGS) $(FLAGS) ${MATHFLAGS} -c poisson_solver/poisson_solver.cpp ${INC_DCCRG} ${INC_FSGRID} ${INC_BOOST} ${INC_ZOLTAN} - -poisson_solver_cg.o: ${DEPS_COMMON} ${DEPS_CELL} poisson_solver/poisson_solver.h poisson_solver/poisson_solver_cg.h poisson_solver/poisson_solver_cg.cpp - $(CMP) $(CXXFLAGS) ${MATHFLAGS} $(FLAGS) -c poisson_solver/poisson_solver_cg.cpp ${INC_DCCRG} ${INC_FSGRID} ${INC_BOOST} ${INC_ZOLTAN} - -poisson_solver_jacobi.o: ${DEPS_COMMON} ${DEPS_CELL} poisson_solver/poisson_solver.h poisson_solver/poisson_solver_jacobi.h poisson_solver/poisson_solver_jacobi.cpp - $(CMP) $(CXXFLAGS) ${MATHFLAGS} $(FLAGS) -c poisson_solver/poisson_solver_jacobi.cpp ${INC_DCCRG} ${INC_FSGRID} ${INC_BOOST} ${INC_ZOLTAN} - -poisson_solver_sor.o: ${DEPS_COMMON} ${DEPS_CELL} poisson_solver/poisson_solver.h poisson_solver/poisson_solver_sor.h poisson_solver/poisson_solver_sor.cpp - $(CMP) $(CXXFLAGS) ${MATHFLAGS} $(FLAGS) -c poisson_solver/poisson_solver_sor.cpp ${INC_DCCRG} ${INC_FSGRID} ${INC_BOOST} ${INC_ZOLTAN} - -poisson_test.o: ${DEPS_COMMON} ${DEPS_CELL} projects/project.h projects/project.cpp projects/Poisson/poisson_test.h projects/Poisson/poisson_test.cpp - $(CMP) $(CXXFLAGS) ${MATHFLAGS} $(FLAGS) -c projects/Poisson/poisson_test.cpp ${INC_DCCRG} ${INC_FSGRID} ${INC_ZOLTAN} ${INC_BOOST} ${INC_EIGEN} - spatial_cell.o: ${DEPS_CELL} spatial_cell.cpp $(CMP) $(CXXFLAGS) ${MATHFLAGS} $(FLAGS) -c spatial_cell.cpp $(INC_BOOST) ${INC_DCCRG} ${INC_EIGEN} ${INC_ZOLTAN} ${INC_VECTORCLASS} ${INC_FSGRID} @@ -504,8 +479,8 @@ object_wrapper.o: $(DEPS_COMMON) object_wrapper.h object_wrapper.cpp ${CMP} ${CXXFLAGS} ${FLAGS} -c object_wrapper.cpp ${INC_DCCRG} ${INC_ZOLTAN} ${INC_BOOST} ${INC_FSGRID} # Make executable -vlasiator: $(OBJS) $(OBJS_POISSON) $(OBJS_FSOLVER) - $(LNK) ${LDFLAGS} -o ${EXE} $(OBJS) $(LIBS) $(OBJS_POISSON) $(OBJS_FSOLVER) +vlasiator: $(OBJS) $(OBJS_FSOLVER) + $(LNK) ${LDFLAGS} -o ${EXE} $(OBJS) $(LIBS) $(OBJS_FSOLVER) #/// TOOLS section///// From b94d52c6bdb475414a0f18a9a188ff3d48152342 Mon Sep 17 00:00:00 2001 From: Tuomas Koskela Date: Tue, 14 May 2019 11:26:23 +0300 Subject: [PATCH 406/602] Correction: Compiles now --- spatial_cell.cpp | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/spatial_cell.cpp b/spatial_cell.cpp index 49eff572e..b54652cbe 100644 --- a/spatial_cell.cpp +++ b/spatial_cell.cpp @@ -675,11 +675,11 @@ namespace spatial_cell { block_lengths.push_back(sizeof(Real) * 3); } - // send BGBX BGBY BGBZ - if ((SpatialCell::mpi_transfer_type & Transfer::CELL_BGB)!=0){ - displacements.push_back((uint8_t*) &(this->parameters[CellParams::BGBX]) - (uint8_t*) this); - block_lengths.push_back(sizeof(Real) * 3); - } +// // send BGBX BGBY BGBZ +// if ((SpatialCell::mpi_transfer_type & Transfer::CELL_BGB)!=0){ +// displacements.push_back((uint8_t*) &(this->parameters[CellParams::BGBX]) - (uint8_t*) this); +// block_lengths.push_back(sizeof(Real) * 3); +// } // send BGBXVOL BGBYVOL BGBZVOL PERBXVOL PERBYVOL PERBZVOL if ((SpatialCell::mpi_transfer_type & Transfer::CELL_BVOL)!=0){ From faf0d19950f3ed1df898fb31bc7251a231627f5e Mon Sep 17 00:00:00 2001 From: ykempf Date: Tue, 14 May 2019 11:56:14 +0300 Subject: [PATCH 407/602] Ripped out Poisson solver, electric sail, antisymmetric and project_boundary boundaries. Compiles. --- Makefile | 5 +- common.h | 10 - datareduction/datareducer.cpp | 21 - definitions.h | 1 - fieldsolver/ldz_volume.cpp | 91 --- grid.cpp | 10 - parameters.cpp | 7 +- parameters.h | 1 - poisson_solver/poisson_solver.cpp | 514 ------------- poisson_solver/poisson_solver.h | 248 ------- poisson_solver/poisson_solver_cg.cpp | 702 ------------------ poisson_solver/poisson_solver_cg.h | 98 --- poisson_solver/poisson_solver_jacobi.cpp | 165 ---- poisson_solver/poisson_solver_jacobi.h | 58 -- poisson_solver/poisson_solver_sor.cpp | 609 --------------- poisson_solver/poisson_solver_sor.h | 78 -- projects/ElectricSail/ElectricSail.cfg | 160 ---- projects/ElectricSail/electric_sail.cpp | 403 ---------- projects/ElectricSail/electric_sail.h | 100 --- projects/ElectricSail/esail_expressions.xml | 183 ----- projects/Poisson/PoissonTest.cfg | 88 --- projects/Poisson/poisson_test.cpp | 117 --- projects/Poisson/poisson_test.h | 67 -- projects/project.cpp | 10 - spatial_cell.cpp | 10 - spatial_cell.hpp | 8 +- sysboundary/antisymmetric.cpp | 411 ---------- sysboundary/antisymmetric.h | 132 ---- sysboundary/project_boundary.cpp | 331 --------- sysboundary/project_boundary.h | 140 ---- sysboundary/sysboundary.cpp | 50 -- .../acctest_1_maxw_500k_30kms_1deg.cfg | 4 - tools/esail_intpol.cpp | 153 ---- vlasiator.cpp | 18 - vlasovsolver/cpu_acc_transform.cpp | 40 +- 35 files changed, 16 insertions(+), 5027 deletions(-) delete mode 100644 poisson_solver/poisson_solver.cpp delete mode 100644 poisson_solver/poisson_solver.h delete mode 100644 poisson_solver/poisson_solver_cg.cpp delete mode 100644 poisson_solver/poisson_solver_cg.h delete mode 100644 poisson_solver/poisson_solver_jacobi.cpp delete mode 100644 poisson_solver/poisson_solver_jacobi.h delete mode 100644 poisson_solver/poisson_solver_sor.cpp delete mode 100644 poisson_solver/poisson_solver_sor.h delete mode 100644 projects/ElectricSail/ElectricSail.cfg delete mode 100644 projects/ElectricSail/electric_sail.cpp delete mode 100644 projects/ElectricSail/electric_sail.h delete mode 100644 projects/ElectricSail/esail_expressions.xml delete mode 100644 projects/Poisson/PoissonTest.cfg delete mode 100644 projects/Poisson/poisson_test.cpp delete mode 100644 projects/Poisson/poisson_test.h delete mode 100644 sysboundary/antisymmetric.cpp delete mode 100644 sysboundary/antisymmetric.h delete mode 100644 sysboundary/project_boundary.cpp delete mode 100644 sysboundary/project_boundary.h delete mode 100644 tools/esail_intpol.cpp diff --git a/Makefile b/Makefile index 49d618dfc..44a55cf35 100644 --- a/Makefile +++ b/Makefile @@ -187,7 +187,7 @@ DEPS_VLSVMOVER_AMR = ${DEPS_CELL} vlasovsolver_amr/vlasovmover.cpp vlasovsolver_ OBJS = version.o memoryallocation.o backgroundfield.o quadr.o dipole.o linedipole.o constantfield.o integratefunction.o \ datareducer.o datareductionoperator.o dro_populations.o amr_refinement_criteria.o\ donotcompute.o ionosphere.o outflow.o setbyuser.o setmaxwellian.o\ - sysboundary.o sysboundarycondition.o project_boundary.o particle_species.o\ + sysboundary.o sysboundarycondition.o particle_species.o\ project.o projectTriAxisSearch.o read_gaussian_population.o\ Alfven.o Diffusion.o Dispersion.o Distributions.o Firehose.o\ Flowthrough.o Fluctuations.o Harris.o KHB.o Larmor.o Magnetosphere.o MultiPeak.o\ @@ -278,9 +278,6 @@ ionosphere.o: ${DEPS_SYSBOUND} sysboundary/ionosphere.h sysboundary/ionosphere.c mesh_data_container.o: ${DEPS_COMMON} mesh_data_container.h mesh_data.h ${CMP} ${CXXFLAGS} ${FLAGS} ${MATHFLAGS} -c mesh_data_container.cpp ${INC_VLSV} ${INC_DCCRG} ${INC_ZOLTAN} ${INC_BOOST} ${INC_FSGRID} -project_boundary.o: ${DEPS_SYSBOUND} sysboundary/project_boundary.h sysboundary/project_boundary.cpp - ${CMP} ${CXXFLAGS} ${FLAGS} ${MATHFLAGS} -c sysboundary/project_boundary.cpp ${INC_DCCRG} ${INC_FSGRID} ${INC_ZOLTAN} ${INC_BOOST} ${INC_EIGEN} ${INC_VLSV} - outflow.o: ${DEPS_COMMON} sysboundary/outflow.h sysboundary/outflow.cpp projects/project.h projects/project.cpp fieldsolver/ldz_magnetic_field.cpp ${CMP} ${CXXFLAGS} ${FLAGS} -c sysboundary/outflow.cpp ${INC_FSGRID} ${INC_DCCRG} ${INC_ZOLTAN} ${INC_BOOST} ${INC_EIGEN} diff --git a/common.h b/common.h index 8ca859c52..b74e36012 100644 --- a/common.h +++ b/common.h @@ -208,13 +208,6 @@ namespace CellParams { MAXFDT, /*!< maximum timestep allowed in ordinary space by fieldsolver for this cell**/ LBWEIGHTCOUNTER, /*!< Counter for storing compute time weights needed by the load balancing**/ ISCELLSAVINGF, /*!< Value telling whether a cell is saving its distribution function when partial f data is written out. */ - PHI, /*!< Electrostatic potential.*/ - PHI_TMP, /*!< Temporary electrostatic potential.*/ - RHOQ_TOT, /*!< Total charge density, summed over all particle populations.*/ - RHOQ_EXT, /*addOperator(new DRO::DataReductionOperatorCellParams("DZ",CellParams::DZ,1)); continue; } - - if (*it == "Potential") { - // Poisson soler potential - outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("poisson/potential",CellParams::PHI,1)); - continue; - } - if (*it == "BackgroundVolE") { - outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("poisson/BGE_vol",CellParams::BGEXVOL,3)); - continue; - } - if (*it == "ChargeDensity") { - // Poisson-solver charge density - // TODO: This is redundant with Rhoq - outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("poisson/rho_q",CellParams::RHOQ_TOT,1)); - continue; - } - if (*it == "PotentialError") { - // Poisson solver convergence measure - outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("poisson/pot_error",CellParams::PHI_TMP,1)); - continue; - } if (*it == "MeshData") { outputReducer->addOperator(new DRO::VariableMeshData); continue; diff --git a/definitions.h b/definitions.h index abd033c6b..c8d20c114 100644 --- a/definitions.h +++ b/definitions.h @@ -112,7 +112,6 @@ typedef Realf (*AmrVelRefinement)(const Realf* velBlock); #define SHIFT_M_X_NEIGHBORHOOD_ID 17 //Shift in -x direction #define SHIFT_M_Y_NEIGHBORHOOD_ID 18 //Shift in -y direction #define SHIFT_M_Z_NEIGHBORHOOD_ID 19 //Shift in -z direction -#define POISSON_NEIGHBORHOOD_ID 20 // Nearest face neighbors //fieldsolver stencil. #define FS_STENCIL_WIDTH 2 diff --git a/fieldsolver/ldz_volume.cpp b/fieldsolver/ldz_volume.cpp index adcbae30e..f385e17bb 100644 --- a/fieldsolver/ldz_volume.cpp +++ b/fieldsolver/ldz_volume.cpp @@ -67,97 +67,6 @@ void calculateVolumeAveragedFields( volGrid0->at(fsgrids::volfields::PERBXVOL) = perturbedCoefficients[Rec::a_0]; volGrid0->at(fsgrids::volfields::PERBYVOL) = perturbedCoefficients[Rec::b_0]; volGrid0->at(fsgrids::volfields::PERBZVOL) = perturbedCoefficients[Rec::c_0]; - - if(P::propagatePotential) { - // Calculate volume average of E (FIXME NEEDS IMPROVEMENT): - std::array * EGrid_i1j1k1 = EGrid.get(i,j,k); - if ( technicalGrid.get(i,j,k)->sysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY || - (technicalGrid.get(i,j,k)->sysBoundaryFlag != sysboundarytype::NOT_SYSBOUNDARY && technicalGrid.get(i,j,k)->sysBoundaryLayer == 1) - ) { - #ifdef DEBUG_FSOLVER - bool ok = true; - if (technicalGrid.get(i ,j+1,k ) == NULL) ok = false; - if (technicalGrid.get(i ,j ,k+1) == NULL) ok = false; - if (technicalGrid.get(i ,j+1,k+1) == NULL) ok = false; - if (ok == false) { - stringstream ss; - ss << "ERROR, got NULL neighbor in " << __FILE__ << ":" << __LINE__ << endl; - cerr << ss.str(); exit(1); - } - #endif - - std::array * EGrid_i1j2k1 = EGrid.get(i ,j+1,k ); - std::array * EGrid_i1j1k2 = EGrid.get(i ,j ,k+1); - std::array * EGrid_i1j2k2 = EGrid.get(i ,j+1,k+1); - - CHECK_FLOAT(EGrid_i1j1k1->at(fsgrids::efield::EX)) - CHECK_FLOAT(EGrid_i1j2k1->at(fsgrids::efield::EX)) - CHECK_FLOAT(EGrid_i1j1k2->at(fsgrids::efield::EX)) - CHECK_FLOAT(EGrid_i1j2k2->at(fsgrids::efield::EX)) - volGrid0->at(fsgrids::volfields::EXVOL) = FOURTH*(EGrid_i1j1k1->at(fsgrids::efield::EX) + EGrid_i1j2k1->at(fsgrids::efield::EX) + EGrid_i1j1k2->at(fsgrids::efield::EX) + EGrid_i1j2k2->at(fsgrids::efield::EX)); - CHECK_FLOAT(volGrid0->at(fsgrids::volfields::EXVOL)) - } else { - volGrid0->at(fsgrids::volfields::EXVOL) = 0.0; - } - - if ( technicalGrid.get(i,j,k)->sysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY || - (technicalGrid.get(i,j,k)->sysBoundaryFlag != sysboundarytype::NOT_SYSBOUNDARY && technicalGrid.get(i,j,k)->sysBoundaryLayer == 1) - ) { - #ifdef DEBUG_FSOLVER - bool ok = true; - if (technicalGrid.get(i+1,j ,k ) == NULL) ok = false; - if (technicalGrid.get(i ,j ,k+1) == NULL) ok = false; - if (technicalGrid.get(i+1,j ,k+1) == NULL) ok = false; - if (ok == false) { - stringstream ss; - ss << "ERROR, got NULL neighbor in " << __FILE__ << ":" << __LINE__ << endl; - cerr << ss.str(); exit(1); - } - #endif - - std::array * EGrid_i2j1k1 = EGrid.get(i+1,j ,k ); - std::array * EGrid_i1j1k2 = EGrid.get(i ,j ,k+1); - std::array * EGrid_i2j1k2 = EGrid.get(i+1,j ,k+1); - - CHECK_FLOAT(EGrid_i1j1k1->at(fsgrids::efield::EY)) - CHECK_FLOAT(EGrid_i2j1k1->at(fsgrids::efield::EY)) - CHECK_FLOAT(EGrid_i1j1k2->at(fsgrids::efield::EY)) - CHECK_FLOAT(EGrid_i2j1k2->at(fsgrids::efield::EY)) - volGrid0->at(fsgrids::volfields::EYVOL) = FOURTH*(EGrid_i1j1k1->at(fsgrids::efield::EY) + EGrid_i2j1k1->at(fsgrids::efield::EY) + EGrid_i1j1k2->at(fsgrids::efield::EY) + EGrid_i2j1k2->at(fsgrids::efield::EY)); - CHECK_FLOAT(volGrid0->at(fsgrids::volfields::EYVOL)) - } else { - volGrid0->at(fsgrids::volfields::EYVOL) = 0.0; - } - - if ( technicalGrid.get(i,j,k)->sysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY || - (technicalGrid.get(i,j,k)->sysBoundaryFlag != sysboundarytype::NOT_SYSBOUNDARY && technicalGrid.get(i,j,k)->sysBoundaryLayer == 1) - ) { - #ifdef DEBUG_FSOLVER - bool ok = true; - if (technicalGrid.get(i+1,j ,k ) == NULL) ok = false; - if (technicalGrid.get(i ,j+1,k ) == NULL) ok = false; - if (technicalGrid.get(i+1,j+1,k ) == NULL) ok = false; - if (ok == false) { - stringstream ss; - ss << "ERROR, got NULL neighbor in " << __FILE__ << ":" << __LINE__ << endl; - cerr << ss.str(); exit(1); - } - #endif - - std::array * EGrid_i2j1k1 = EGrid.get(i+1,j ,k ); - std::array * EGrid_i1j2k1 = EGrid.get(i ,j+1,k ); - std::array * EGrid_i2j2k1 = EGrid.get(i+1,j+1,k ); - - CHECK_FLOAT(EGrid_i1j1k1->at(fsgrids::efield::EZ)) - CHECK_FLOAT(EGrid_i2j1k1->at(fsgrids::efield::EZ)) - CHECK_FLOAT(EGrid_i1j2k1->at(fsgrids::efield::EZ)) - CHECK_FLOAT(EGrid_i2j2k1->at(fsgrids::efield::EZ)) - volGrid0->at(fsgrids::volfields::EZVOL) = FOURTH*(EGrid_i1j1k1->at(fsgrids::efield::EZ) + EGrid_i2j1k1->at(fsgrids::efield::EZ) + EGrid_i1j2k1->at(fsgrids::efield::EZ) + EGrid_i2j2k1->at(fsgrids::efield::EZ)); - CHECK_FLOAT(volGrid0->at(fsgrids::volfields::EZVOL)) - } else { - volGrid0->at(fsgrids::volfields::EZVOL) = 0.0; - } - } } } } diff --git a/grid.cpp b/grid.cpp index ef16201c7..b1e93b75a 100644 --- a/grid.cpp +++ b/grid.cpp @@ -993,16 +993,6 @@ void initializeStencils(dccrg::Dccrg& mpi neighborhood.clear(); neighborhood.push_back({{0, 0, -1}}); mpiGrid.add_neighborhood(SHIFT_P_Z_NEIGHBORHOOD_ID, neighborhood); - - // Add face neighbors, needed for Poisson solver - neighborhood.clear(); - neighborhood.push_back({{-1, 0, 0}}); - neighborhood.push_back({{+1, 0, 0}}); - neighborhood.push_back({{ 0,-1, 0}}); - neighborhood.push_back({{ 0,+1, 0}}); - neighborhood.push_back({{ 0, 0,-1}}); - neighborhood.push_back({{ 0, 0,+1}}); - mpiGrid.add_neighborhood(POISSON_NEIGHBORHOOD_ID, neighborhood); } bool validateMesh(dccrg::Dccrg& mpiGrid,const uint popID) { diff --git a/parameters.cpp b/parameters.cpp index 1222e6485..9d41f222a 100644 --- a/parameters.cpp +++ b/parameters.cpp @@ -99,7 +99,6 @@ bool P::recalculateStencils = true; bool P::propagateVlasovAcceleration = true; bool P::propagateVlasovTranslation = true; bool P::propagateField = true; -bool P::propagatePotential = false; bool P::dynamicTimestep = true; @@ -167,13 +166,12 @@ bool Parameters::addParameters(){ Readparameters::add("io.write_as_float","If true, write in floats instead of doubles", false); Readparameters::add("io.restart_write_path", "Path to the location where restart files should be written. Defaults to the local directory, also if the specified destination is not writeable.", string("./")); - Readparameters::add("propagate_potential","Propagate electrostatic potential during the simulation",false); Readparameters::add("propagate_field","Propagate magnetic field during the simulation",true); Readparameters::add("propagate_vlasov_acceleration","Propagate distribution functions during the simulation in velocity space. If false, it is propagated with zero length timesteps.",true); Readparameters::add("propagate_vlasov_translation","Propagate distribution functions during the simulation in ordinary space. If false, it is propagated with zero length timesteps.",true); Readparameters::add("dynamic_timestep","If true, timestep is set based on CFL limits (default on)",true); Readparameters::add("hallMinimumRho", "Minimum rho value used for the Hall and electron pressure gradient terms in the Lorentz force and in the field solver. Default is very low and has no effect in practice.", 1.0); - Readparameters::add("project", "Specify the name of the project to use. Supported to date (20150610): Alfven Diffusion Dispersion Distributions Firehose Flowthrough Fluctuations Harris KHB Larmor Magnetosphere Multipeak PoissonTest Riemann1 Shock Shocktest Template test_fp testHall test_trans VelocityBox verificationLarmor", string("")); + Readparameters::add("project", "Specify the name of the project to use. Supported to date (20150610): Alfven Diffusion Dispersion Distributions Firehose Flowthrough Fluctuations Harris KHB Larmor Magnetosphere Multipeak Riemann1 Shock Shocktest Template test_fp testHall test_trans VelocityBox verificationLarmor", string("")); Readparameters::add("restart.filename","Restart from this vlsv file. No restart if empty file.",string("")); @@ -218,7 +216,7 @@ bool Parameters::addParameters(){ // Output variable parameters // NOTE Do not remove the : before the list of variable names as this is parsed by tools/check_vlasiator_cfg.sh - Readparameters::addComposing("variables.output", "List of data reduction operators (DROs) to add to the grid file output. Each variable to be added has to be on a new line output = XXX. Available (20190508): B fg_B BackgroundB vg_BackgroundB fg_BackgroundB PerturbedB vg_PerturbedB fg_PerturbedB E fg_E Rhom vg_Rhom fg_Rhom Rhoq vg_Rhoq fg_Rhoq populations_Rho V vg_V fg_V populations_V populations_moments_Backstream populations_moments_NonBackstream populations_EffectiveSparsityThreshold populations_RhoLossAdjust LBweight MaxVdt MaxRdt populations_MaxVdt populations_MaxRdt MaxFieldsdt MPIrank vg_rank FsGridRank fg_rank FsGridBoundaryType BoundaryType vg_BoundaryType fg_BoundaryType BoundaryLayer vg_BoundaryLayer fg_BoundaryLayer populations_Blocks fSaved populations_accSubcycles VolE vg_VolE fg_VolE HallE GradPeE VolB vg_VolB fg_VolB BackgroundVolB PerturbedVolB Pressure vg_Pressure fg_Pressure populations_PTensor derivs BVOLderivs GridCoordinates Potential BackgroundVolE ChargeDensity PotentialError MeshData"); + Readparameters::addComposing("variables.output", "List of data reduction operators (DROs) to add to the grid file output. Each variable to be added has to be on a new line output = XXX. Available (20190514): B fg_B BackgroundB vg_BackgroundB fg_BackgroundB PerturbedB vg_PerturbedB fg_PerturbedB E fg_E Rhom vg_Rhom fg_Rhom Rhoq vg_Rhoq fg_Rhoq populations_Rho V vg_V fg_V populations_V populations_moments_Backstream populations_moments_NonBackstream populations_EffectiveSparsityThreshold populations_RhoLossAdjust LBweight MaxVdt MaxRdt populations_MaxVdt populations_MaxRdt MaxFieldsdt MPIrank vg_rank FsGridRank fg_rank FsGridBoundaryType BoundaryType vg_BoundaryType fg_BoundaryType BoundaryLayer vg_BoundaryLayer fg_BoundaryLayer populations_Blocks fSaved populations_accSubcycles VolE vg_VolE fg_VolE HallE GradPeE VolB vg_VolB fg_VolB BackgroundVolB PerturbedVolB Pressure vg_Pressure fg_Pressure populations_PTensor derivs BVOLderivs GridCoordinates BackgroundVolE MeshData"); // NOTE Do not remove the : before the list of variable names as this is parsed by tools/check_vlasiator_cfg.sh Readparameters::addComposing("variables.diagnostic", "List of data reduction operators (DROs) to add to the diagnostic runtime output. Each variable to be added has to be on a new line diagnostic = XXX. Available (20190320): FluxB FluxE populations_Blocks Rhom populations_RhoLossAdjust LBweight populations_MaxVdt MaxVdt populations_MaxRdt MaxRdt MaxFieldsdt populations_MaxDistributionFunction populations_MinDistributionFunction"); @@ -352,7 +350,6 @@ bool Parameters::getParameters(){ } Readparameters::get("propagate_field",P::propagateField); - Readparameters::get("propagate_potential",P::propagatePotential); Readparameters::get("propagate_vlasov_acceleration",P::propagateVlasovAcceleration); Readparameters::get("propagate_vlasov_translation",P::propagateVlasovTranslation); Readparameters::get("dynamic_timestep",P::dynamicTimestep); diff --git a/parameters.h b/parameters.h index 3c9e3d80a..867eb37f9 100644 --- a/parameters.h +++ b/parameters.h @@ -92,7 +92,6 @@ struct Parameters { static bool recalculateStencils; /*!< If true, MPI stencils should be recalculated because of load balancing.*/ static bool propagateField; /*!< If true, magnetic field is propagated during the simulation.*/ - static bool propagatePotential; /*!< If true, electrostatic potential is solved during the simulation.*/ static bool propagateVlasovAcceleration; /*!< If true, distribution function is propagated in velocity space during the simulation.*/ static bool propagateVlasovTranslation; /*!< If true, distribution function is propagated in ordinary space during the simulation.*/ diff --git a/poisson_solver/poisson_solver.cpp b/poisson_solver/poisson_solver.cpp deleted file mode 100644 index 3527fe20f..000000000 --- a/poisson_solver/poisson_solver.cpp +++ /dev/null @@ -1,514 +0,0 @@ -/* - * This file is part of Vlasiator. - * Copyright 2010-2016 Finnish Meteorological Institute - * - * For details of usage, see the COPYING file and read the "Rules of the Road" - * at http://www.physics.helsinki.fi/vlasiator/ - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - * - * File: poisson_solver.cpp - * Author: sandroos - * - * Created on January 14, 2015, 1:42 PM - */ - -#include -#include -#include -#include - -#include "../common.h" -#include "../logger.h" -#include "../mpiconversion.h" -#include "../grid.h" -#include "../spatial_cell.hpp" -#include "../object_wrapper.h" - -#include "poisson_solver.h" -#include "poisson_solver_jacobi.h" -#include "poisson_solver_sor.h" -#include "poisson_solver_cg.h" -//#include "poisson_solver_cg2.h" - -#ifndef NDEBUG - #define DEBUG_POISSON -#endif - -using namespace std; - -extern Logger logFile; - -namespace poisson { - - // ***** INITIALIZE STATIC VARIABLES ***** // - int Poisson::RHOQ_TOT = CellParams::RHOQ_TOT; - int Poisson::PHI = CellParams::PHI; - ObjectFactory Poisson::solvers; - PoissonSolver* Poisson::solver = NULL; - bool Poisson::clearPotential = true; - bool Poisson::is2D = false; - string Poisson::solverName; - Real Poisson::maxAbsoluteError = 1e-4; - uint Poisson::maxIterations; - Real Poisson::minRelativePotentialChange; - vector Poisson::localCellParams; - bool Poisson::timeDependentBackground = false; - - void Poisson::cacheCellParameters(dccrg::Dccrg& mpiGrid, - const std::vector& cells) { - // NOTE: This is surprisingly slow as compared to the - // similar cache-function in poisson_solver_sor.cpp - - // Clear old cache - Poisson::localCellParams.clear(); - Poisson::localCellParams.resize(cells.size()); - - // Fetch pointers - for (size_t c=0; cparameters.data(); - } - } - - // ***** DEFINITION OF POISSON SOLVER BASE CLASS ***** // - - PoissonSolver::PoissonSolver() { } - - PoissonSolver::~PoissonSolver() { } - - bool PoissonSolver::initialize() {return true;} - - bool PoissonSolver::finalize() {return true;} - - bool PoissonSolver::calculateBackgroundField( - dccrg::Dccrg& mpiGrid, - const std::vector& cells) { - - phiprof::start("Background Field"); - - if (Poisson::clearPotential == true || Parameters::tstep == 0 || Parameters::meshRepartitioned == true) { - #pragma omp parallel for - for (size_t c=0; csetCellBackgroundField(cell); - } - - cell->parameters[CellParams::PHI] = 0; - cell->parameters[CellParams::PHI_TMP] = 0; - cell->parameters[CellParams::EXVOL] = cell->parameters[CellParams::BGEXVOL]; - cell->parameters[CellParams::EYVOL] = cell->parameters[CellParams::BGEYVOL]; - cell->parameters[CellParams::EZVOL] = cell->parameters[CellParams::BGEZVOL]; - } - } else { - #pragma omp parallel for - for (size_t c=0; csetCellBackgroundField(cell); - } - - cell->parameters[CellParams::EXVOL] = cell->parameters[CellParams::BGEXVOL]; - cell->parameters[CellParams::EYVOL] = cell->parameters[CellParams::BGEYVOL]; - cell->parameters[CellParams::EZVOL] = cell->parameters[CellParams::BGEZVOL]; - } - } - phiprof::stop("Background Field",cells.size(),"Spatial Cells"); - return true; - } - - void PoissonSolver::calculateChargeDensitySingle(spatial_cell::SpatialCell* cell) { - Real rho_q = 0.0; - // Iterate all particle species - for (uint popID=0; popID& blockContainer = cell->get_velocity_blocks(popID); - if (blockContainer.size() == 0) continue; - - const Real charge = getObjectWrapper().particleSpecies[popID].charge; - const Realf* data = blockContainer.getData(); - const Real* blockParams = blockContainer.getParameters(); - - // Sum charge density over all phase-space cells - for (vmesh::LocalID blockLID=0; blockLIDparameters[CellParams::RHOQ_TOT] = cell->parameters[CellParams::RHOQ_EXT] + rho_q/physicalconstants::EPS_0; - } - - /** Calculate total charge density on given spatial cells. - * @param mpiGrid Parallel grid library. - * @param cells List of spatial cells. - * @return If true, charge densities were successfully calculated.*/ - bool PoissonSolver::calculateChargeDensity(spatial_cell::SpatialCell* cell) { - phiprof::start("Charge Density"); - bool success = true; - - Real rho_q = 0.0; - #pragma omp parallel reduction (+:rho_q) - { - // Iterate all particle species - for (uint popID=0; popID& blockContainer = cell->get_velocity_blocks(popID); - if (blockContainer.size() == 0) continue; - - const Real charge = getObjectWrapper().particleSpecies[popID].charge; - const Realf* data = blockContainer.getData(); - const Real* blockParams = blockContainer.getParameters(); - - // Sum charge density over all phase-space cells - #pragma omp for - for (vmesh::LocalID blockLID=0; blockLIDparameters[CellParams::RHOQ_TOT] = cell->parameters[CellParams::RHOQ_EXT] + rho_q/physicalconstants::EPS_0; - - #ifdef DEBUG_POISSON - bool ok = true; - if (rho_q != rho_q) ok = false; - if (ok == false) { - stringstream ss; - ss << "(POISSON SOLVER) NAN detected, rho_q " << rho_q << '\t'; - ss << endl; - cerr << ss.str(); exit(1); - } - #endif - - size_t phaseSpaceCells=0; - for (uint popID=0; popIDget_velocity_blocks(popID).size()*WID3; - - phiprof::stop("Charge Density",phaseSpaceCells,"Phase-space cells"); - return success; - } - - /*bool PoissonSolver::checkGaussLaw(dccrg::Dccrg& mpiGrid, - const std::vector& cells, - Real& efieldFlux,Real& totalCharge) { - bool success = true; - Real chargeSum = 0; - Real eFluxSum = 0; - - #pragma omp parallel for reduction(+:chargeSum,eFluxSum) - for (size_t c=0; csysBoundaryFlag != sysboundarytype::NOT_SYSBOUNDARY) continue; - - const Real D3 - = cells[c][0][CellParams::DX] - * cells[c][0][CellParams::DY] - * cells[c][0][CellParams::DZ]; - chargeSum += cells[c][0][CellParams::RHOQ_TOT]*D3; - - spatial_cell::SpatialCell* nbr; - dccrg::Types<3>::indices_t indices = mpiGrid.mapping.get_indices(cells[c].cellID); - - // -x neighbor - indices[0] -= 1; - nbr = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]; - if (nbr != NULL) if (nbr->sysBoundaryFlag != sysboundarytype::NOT_SYSBOUNDARY) { - const Real area = cells[c][0][CellParams::DY]*cells[c][0][CellParams::DZ]; - const Real Ex = cells[c][0][CellParams::EXVOL] - cells[c][0][CellParams::BGEXVOL]; - const Real Ex_nbr = nbr->parameters[CellParams::EXVOL] - nbr->parameters[CellParams::BGEXVOL]; - - eFluxSum -= 0.5*(Ex+Ex_nbr)*area; - } - // +x neighbor - indices[0] += 2; - nbr = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]; - if (nbr != NULL) if (nbr->sysBoundaryFlag != sysboundarytype::NOT_SYSBOUNDARY) { - const Real area = cells[c][0][CellParams::DY]*cells[c][0][CellParams::DZ]; - const Real Ex = cells[c][0][CellParams::EXVOL] - cells[c][0][CellParams::BGEXVOL]; - const Real Ex_nbr = nbr->parameters[CellParams::EXVOL] - nbr->parameters[CellParams::BGEXVOL]; - - eFluxSum += 0.5*(Ex+Ex_nbr)*area; - } - indices[0] -= 1; - - // -y neighbor - indices[1] -= 1; - nbr = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]; - if (nbr != NULL) if (nbr->sysBoundaryFlag != sysboundarytype::NOT_SYSBOUNDARY) { - const Real area = cells[c][0][CellParams::DX]*cells[c][0][CellParams::DZ]; - const Real Ey = cells[c][0][CellParams::EYVOL] - cells[c][0][CellParams::BGEYVOL]; - const Real Ey_nbr = nbr->parameters[CellParams::EYVOL] - nbr->parameters[CellParams::BGEYVOL]; - - eFluxSum -= 0.5*(Ey+Ey_nbr)*area; - } - // +y neighbor - indices[1] += 2; - nbr = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]; - if (nbr != NULL) if (nbr->sysBoundaryFlag != sysboundarytype::NOT_SYSBOUNDARY) { - const Real area = cells[c][0][CellParams::DX]*cells[c][0][CellParams::DZ]; - const Real Ey = cells[c][0][CellParams::EYVOL] - cells[c][0][CellParams::BGEYVOL]; - const Real Ey_nbr = nbr->parameters[CellParams::EYVOL] - nbr->parameters[CellParams::BGEYVOL]; - - eFluxSum += 0.5*(Ey+Ey_nbr)*area; - } - indices[1] -= 1; - - // -z neighbor - indices[2] -= 1; - nbr = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]; - if (nbr != NULL) if (nbr->sysBoundaryFlag != sysboundarytype::NOT_SYSBOUNDARY) { - const Real area = cells[c][0][CellParams::DX]*cells[c][0][CellParams::DY]; - const Real Ez = cells[c][0][CellParams::EZVOL] - cells[c][0][CellParams::BGEZVOL]; - eFluxSum -= Ez*area; - } - // +z neighbor - indices[2] += 2; - nbr = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]; - if (nbr != NULL) if (nbr->sysBoundaryFlag != sysboundarytype::NOT_SYSBOUNDARY) { - const Real area = cells[c][0][CellParams::DX]*cells[c][0][CellParams::DY]; - const Real Ez = cells[c][0][CellParams::EZVOL] - cells[c][0][CellParams::BGEZVOL]; - eFluxSum += Ez*area; - } - indices[2] -= 1; - } - - efieldFlux += eFluxSum; - totalCharge += chargeSum; - - return success; - }*/ - - Real PoissonSolver::maxError2D(dccrg::Dccrg& mpiGrid) { - phiprof::start("Evaluate Error"); - - // DEBUG: Make sure values are up to date - SpatialCell::set_mpi_transfer_type(spatial_cell::Transfer::CELL_RHOQ_TOT,false); - mpiGrid.update_copies_of_remote_neighbors(POISSON_NEIGHBORHOOD_ID); - SpatialCell::set_mpi_transfer_type(spatial_cell::Transfer::CELL_PHI,false); - mpiGrid.update_copies_of_remote_neighbors(POISSON_NEIGHBORHOOD_ID); - - //Real localError = 0; - Real* maxError = new Real[omp_get_max_threads()]; - const vector& cells = getLocalCells(); - - #pragma omp parallel - { - const int tid = omp_get_thread_num(); - maxError[tid] = 0; - - #pragma omp for - for (size_t c=0; csysBoundaryFlag != 1) { - mpiGrid[cellID]->parameters[CellParams::PHI_TMP] = 0; - continue; - } - - // Fetch data - const Real rho_q = mpiGrid[cellID]->parameters[CellParams::RHOQ_TOT]; - Real phi_111 = mpiGrid[cellID]->parameters[CellParams::PHI]; - - // Calculate cell i/j/k indices - dccrg::Types<3>::indices_t indices = mpiGrid.mapping.get_indices(cellID); - CellID nbrID; - - // +/- x face neighbor potential - indices[0] -= 1; nbrID = mpiGrid.mapping.get_cell_from_indices(indices,0); - Real phi_011 = mpiGrid[nbrID]->parameters[CellParams::PHI]; - indices[0] += 2; nbrID = mpiGrid.mapping.get_cell_from_indices(indices,0); - Real phi_211 = mpiGrid[nbrID]->parameters[CellParams::PHI]; - indices[0] -= 1; - - // +/- y face neighbor potential - indices[1] -= 1; nbrID = mpiGrid.mapping.get_cell_from_indices(indices,0); - Real phi_101 = mpiGrid[nbrID]->parameters[CellParams::PHI]; - indices[1] += 2; nbrID = mpiGrid.mapping.get_cell_from_indices(indices,0); - Real phi_121 = mpiGrid[nbrID]->parameters[CellParams::PHI]; - indices[1] -= 1; - - // Evaluate error - Real DX2 = mpiGrid[cellID]->parameters[CellParams::DX]*mpiGrid[cellID]->parameters[CellParams::DX]; - - Real RHS = phi_011+phi_211+phi_101+phi_121-4*phi_111; - Real cellError = fabs(-rho_q*DX2 - RHS); - mpiGrid[cellID]->parameters[CellParams::PHI_TMP] = cellError; - if (fabs(cellError) > maxError[tid]) maxError[tid] = fabs(cellError); - } // for-loop over cells - - } // #pragma omp parallel - - // Compute max error (over threads) - for (int i=1; i maxError[0]) maxError[0] = maxError[i]; - } - - Real globalMaxError; - MPI_Allreduce(maxError,&globalMaxError,1,MPI_Type(),MPI_MAX,MPI_COMM_WORLD); - - delete [] maxError; maxError = NULL; - - phiprof::stop("Evaluate Error",cells.size(),"Spatial Cells"); - - return globalMaxError; - } - - // ***** DEFINITIONS OF HIGH-LEVEL DRIVER FUNCTIONS ***** // - - bool initialize(dccrg::Dccrg& mpiGrid) { - bool success = true; - - Poisson::solvers.add("Jacobi",makeJacobi); - Poisson::solvers.add("SOR",makeSOR); - Poisson::solvers.add("CG",makeCG); - //Poisson::solvers.add("CG2",makeCG2); - - // Create and initialize the Poisson solver - Poisson::solver = Poisson::solvers.create(Poisson::solverName); - if (Poisson::solver == NULL) { - logFile << "(POISSON SOLVER) ERROR: Failed to create Poisson solver '" << Poisson::solverName << "'" << endl << write; - return false; - } else { - if (Poisson::solver->initialize() == false) success = false; - if (success == true) { - logFile << "(POISSON SOLVER) Successfully initialized Poisson solver '" << Poisson::solverName << "'" << endl; - logFile << "Parameters are:" << endl; - logFile << "\t max absolute error: " << Poisson::maxAbsoluteError << endl; - logFile << "\t max iterations : " << Poisson::maxIterations << endl; - logFile << "\t time dep bground : "; - if (Poisson::timeDependentBackground == true) logFile << "Yes" << endl; - else logFile << "No" << endl; - logFile << "\t clear potential? : "; - if (Poisson::clearPotential == true) logFile << "Yes" << endl; - else logFile << "No" << endl; - logFile << "\t is 2D? : "; - if (Poisson::is2D == true) logFile << "Yes" << endl; - else logFile << "No" << endl; - logFile << write; - } else { - logFile << "(POISSON SOLVER) ERROR: Failed to initialize Poisson solver '" << Poisson::solverName << "'" << endl << write; - return success; - } - } - - // Set up the initial state unless the simulation was restarted - //if (Parameters::isRestart == true) return success; - - for (size_t c=0; ccalculateChargeDensity(cell) == false) { - logFile << "(POISSON SOLVER) ERROR: Failed to calculate charge density in " << __FILE__ << ":" << __LINE__ << endl << write; - success = false; - } - } - - // Force calculateBackgroundField to reset potential arrays to zero values. - // This may not otherwise happen if the simulation was restarted. - const bool oldValue = Poisson::clearPotential; - Poisson::clearPotential = true; - if (Poisson::solver->calculateBackgroundField(mpiGrid,getLocalCells()) == false) { - logFile << "(POISSON SOLVER) ERROR: Failed to calculate background field in " << __FILE__ << ":" << __LINE__ << endl << write; - success = false; - } - Poisson::clearPotential = oldValue; - if (solve(mpiGrid) == false) { - logFile << "(POISSON SOLVER) ERROR: Failed to solve potential in " << __FILE__ << ":" << __LINE__ << endl << write; - success = false; - } - - return success; - } - - bool finalize() { - bool success = true; - if (Poisson::solver != NULL) { - if (Poisson::solver->finalize() == false) success = false; - delete Poisson::solver; - Poisson::solver = NULL; - } - return success; - } - - bool solve(dccrg::Dccrg& mpiGrid) { - phiprof::start("Poisson Solver (Total)"); - bool success = true; - - // If mesh partitioning has changed, recalculate spatial - // cell parameters pointer cache: - if (Parameters::meshRepartitioned == true) { - phiprof::start("Cache Cell Parameters"); - Poisson::cacheCellParameters(mpiGrid,getLocalCells()); - phiprof::stop("Cache Cell Parameters"); - } - - // Solve Poisson equation - if (success == true) if (Poisson::solver != NULL) { - if (Poisson::solver->calculateBackgroundField(mpiGrid,getLocalCells()) == false) success = false; - - SpatialCell::set_mpi_transfer_type(Transfer::CELL_PHI,false); - mpiGrid.update_copies_of_remote_neighbors(POISSON_NEIGHBORHOOD_ID); - - if (Poisson::solver->solve(mpiGrid) == false) success = false; - } - - // Add electrostatic electric field to volume-averaged E - //if (success == true) if (Poisson::solver != NULL) { - // if (Poisson::solver->calculateElectrostaticField(mpiGrid) == false) success = false; - //} - - phiprof::stop("Poisson Solver (Total)"); - return success; - } - -} // namespace poisson diff --git a/poisson_solver/poisson_solver.h b/poisson_solver/poisson_solver.h deleted file mode 100644 index 4d6f0d536..000000000 --- a/poisson_solver/poisson_solver.h +++ /dev/null @@ -1,248 +0,0 @@ -/* - * This file is part of Vlasiator. - * Copyright 2010-2016 Finnish Meteorological Institute - * - * For details of usage, see the COPYING file and read the "Rules of the Road" - * at http://www.physics.helsinki.fi/vlasiator/ - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - * - * File: poisson_solver.h - * Author: sandroos - * - * Created on January 14, 2015, 1:42 PM - */ - -#ifndef POISSON_SOLVER_H -#define POISSON_SOLVER_H - -#ifdef _OPENMP - #include -#endif -#include -#include - -#include "../mpiconversion.h" -#include "../object_factory.h" -#include "../spatial_cell.hpp" - -namespace poisson { - - template - struct CellCache2D { - spatial_cell::SpatialCell* cell; - Real* parameters[5]; - Real*& operator[](const int& i) {return parameters[i];} - Real variables[VARS]; - }; - - template - struct CellCache3D { -#warning TEMP remove - CellID cellID; - spatial_cell::SpatialCell* cell; - Real* parameters[7]; - Real*& operator[](const int& i) {return parameters[i];} - Real* const& operator[](const int& i) const {return parameters[i];} - Real variables[VARS]; - }; - - class PoissonSolver { - public: - PoissonSolver(); - virtual ~PoissonSolver(); - - // ***** DECLARATIONS OF VIRTUAL MEMBER FUNCTIONS ***** // - - virtual bool calculateBackgroundField(dccrg::Dccrg& mpiGrid, - const std::vector& cells); - virtual bool calculateChargeDensity(spatial_cell::SpatialCell* cell); - virtual void calculateChargeDensitySingle(spatial_cell::SpatialCell* cell); - template - bool calculateElectrostaticField2D(const std::vector >& cells); - template - bool calculateElectrostaticField3D(const std::vector >& cells); - /*template - virtual bool checkGaussLaw(dccrg::Dccrg& mpiGrid, - const std::vector >& cells, - Real& efieldFlux,Real& totalCharge);*/ - template - Real error(std::vector >& cells); - virtual Real maxError2D(dccrg::Dccrg& mpiGrid); - virtual bool initialize(); - virtual bool finalize(); - - // ***** DECLARATIONS OF PURE VIRTUAL MEMBER FUNCTIONS ***** // - - /** Calculate electric field on all non-system boundary spatial cells. - * @param mpiGrid Parallel grid library. - * @return If true, electric field was calculated successfully.*/ - virtual bool calculateElectrostaticField(dccrg::Dccrg& mpiGrid) = 0; - - /** Solve Poisson equation on all non-system boundary spatial cells. - * @param mpiGrid Parallel grid library. - * @return If true, Poisson equation was successfully solved.*/ - virtual bool solve(dccrg::Dccrg& mpiGrid) = 0; - - protected: - - }; - - /** Wrapper for all variables needed by Poisson solvers.*/ - struct Poisson { - static int RHOQ_TOT; - static int PHI; - - static ObjectFactory solvers; /**< Container for all existing Poisson solvers.*/ - static PoissonSolver* solver; /**< Poisson solver used in the simulation.*/ - static std::string solverName; /**< Name of the Poisson solver in use.*/ - - static bool clearPotential; /**< If true, then potential is cleared each timestep - * before solving Poisson's equation. Otherwise the old - * potential is used as an initial guess.*/ - static bool is2D; /**< If true, then system is two-dimensional, i.e., - * electrostatic potential and electric field is solved - * in xy-plane.*/ - static Real maxAbsoluteError; /**< Potential iteration is stopped if maximum potential - * error drops below this value.*/ - static uint maxIterations; /**< Maximum number of iterations allowed, only - * has an effect on iterative solvers.*/ - static Real minRelativePotentialChange; /**< Iterative solvers keep on iterating the solution - * until the change in potential during successive - * iterations is less than this value.*/ - static std::vector localCellParams; /**< Pointers to spatial cell parameters, order - * is the same as in getLocalCells() vector.*/ - static bool timeDependentBackground; /**< If true, the background field / charge density is - * time-dependent and must be recalculated each time step.*/ - - static void cacheCellParameters(dccrg::Dccrg& mpiGrid, - const std::vector& cells); - }; - - bool initialize(dccrg::Dccrg& mpiGrid); - bool finalize(); - bool solve(dccrg::Dccrg& mpiGrid); - - - - - template inline - bool PoissonSolver::calculateElectrostaticField2D(const std::vector >& cells) { - phiprof::start("Electrostatic E"); - - #pragma omp parallel for - for (size_t c=0; c inline - bool PoissonSolver::calculateElectrostaticField3D(const std::vector >& cells) { - phiprof::start("Electrostatic E"); - - #pragma omp parallel for - for (size_t c=0; c inline - Real PoissonSolver::error(std::vector >& cells) { - phiprof::start("error evaluation"); - Real maxError = -std::numeric_limits::max(); - Real* threadMaxError = new Real[omp_get_max_threads()]; - - #pragma omp parallel - { - const int tid = omp_get_thread_num(); - Real myError = 0; - - #pragma omp for - for (size_t c=0; csysBoundaryFlag != sysboundarytype::NOT_SYSBOUNDARY) { - cells[c].parameters[0][CellParams::PHI_TMP] = 0; - continue; - } - - const Real DX2 = cells[c].parameters[0][CellParams::DX]*cells[c].parameters[0][CellParams::DX]; - const Real rho_q = cells[c].parameters[0][CellParams::RHOQ_TOT]; - const Real phi_111 = cells[c].parameters[0][CellParams::PHI]; - const Real phi_011 = cells[c].parameters[1][CellParams::PHI]; // -x neighbor - const Real phi_211 = cells[c].parameters[2][CellParams::PHI]; // +x neighbor - const Real phi_101 = cells[c].parameters[3][CellParams::PHI]; // -y neighbor - const Real phi_121 = cells[c].parameters[4][CellParams::PHI]; // +y neighbor - - const Real RHS = phi_011+phi_211+phi_101+phi_121-4*phi_111; - const Real cellError = fabs(-rho_q*DX2-RHS); - cells[c].parameters[0][CellParams::PHI_TMP] = cellError; - if (cellError > myError) myError = cellError; - } - threadMaxError[tid] = myError; - } - - // Calculate the maximum error over all per-thread values to variable maxError - for (int i=1; i threadMaxError[0]) threadMaxError[0] = threadMaxError[i]; - } - maxError = threadMaxError[0]; - delete [] threadMaxError; threadMaxError = NULL; - - // Reduce the maximum error to all processes - Real globalMaxError; - MPI_Allreduce(&maxError,&globalMaxError,1,MPI_Type(),MPI_MAX,MPI_COMM_WORLD); - phiprof::stop("error evaluation",cells.size(),"Spatial Cells"); - - return globalMaxError; - } - -} // namespace poisson - -#endif // POISSON_SOLVER_H - diff --git a/poisson_solver/poisson_solver_cg.cpp b/poisson_solver/poisson_solver_cg.cpp deleted file mode 100644 index 843bae136..000000000 --- a/poisson_solver/poisson_solver_cg.cpp +++ /dev/null @@ -1,702 +0,0 @@ -/* - * This file is part of Vlasiator. - * Copyright 2010-2016 Finnish Meteorological Institute - * - * For details of usage, see the COPYING file and read the "Rules of the Road" - * at http://www.physics.helsinki.fi/vlasiator/ - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - * - * File: poisson_solver_cg.cpp - * Author: sandroos - * - * Created on January 15, 2015, 12:45 PM - */ - -#include -#include -#include - -#include "../logger.h" -#include "../grid.h" -#include "../mpiconversion.h" - -#include "poisson_solver_cg.h" - -#ifndef NDEBUG - #define DEBUG_POISSON_CG -#endif - -using namespace std; - -extern Logger logFile; - -namespace poisson { - - static std::vector > innerCellPointers; - static std::vector > bndryCellPointers; - - PoissonSolver* makeCG() { - return new PoissonSolverCG(); - } - - PoissonSolverCG::PoissonSolverCG(): PoissonSolver() { - - } - - PoissonSolverCG::~PoissonSolverCG() { } - - bool PoissonSolverCG::initialize() { - bool success = true; - bndryCellParams[CellParams::PHI] = 0; - bndryCellParams[CellParams::PHI_TMP] = 0; - return success; - } - - bool PoissonSolverCG::finalize() { - bool success = true; - return success; - } - - inline void PoissonSolverCG::calculateAlpha(CellCache3D& cell,Real& mySum0,Real& mySum1) { - // Calculate r(transpose) * r - mySum0 += cell.variables[cgvar::R]*cell.variables[cgvar::R]; - - // Calculate p(transpose) * A * p - Real A_p = -4*cell.parameters[0][CellParams::PHI_TMP] - + cell.parameters[1][CellParams::PHI_TMP] - + cell.parameters[2][CellParams::PHI_TMP] - + cell.parameters[3][CellParams::PHI_TMP] - + cell.parameters[4][CellParams::PHI_TMP]; - cell.variables[cgvar::A_TIMES_P] = A_p; - mySum1 += cell.parameters[0][CellParams::PHI_TMP]*A_p; - } - - /** Calculate the value of alpha parameter. - * @return If true, all processes have the same value of alpha in PoissonSolverCG::alphaGlobal.*/ - bool PoissonSolverCG::calculateAlpha() { - phiprof::start("calculate alpha"); - Real t_start = 0; - if (Parameters::prepareForRebalance == true) t_start = MPI_Wtime(); - - Real sums[2]; - Real mySum0 = 0; - Real mySum1 = 0; - - // Calculate P(transpose)*A*P and R(transpose)*R for all local cells: - const size_t offset = bndryCellPointers.size(); - const size_t N_cells = bndryCellPointers.size()+innerCellPointers.size(); - - #pragma omp parallel for reduction(+:mySum0,mySum1) - for (size_t c=0; c= offset) calculateAlpha(innerCellPointers[c-offset],mySum0,mySum1); - else calculateAlpha(bndryCellPointers[c ],mySum0,mySum1); - } - - sums[0] = mySum0; - sums[1] = mySum1; - - // Measure computation time (if needed) - if (Parameters::prepareForRebalance == true) { - const size_t N = max((size_t)1,innerCellPointers.size()+bndryCellPointers.size()); - Real t_average = (MPI_Wtime() - t_start) / N; - - #pragma omp parallel - { - #pragma omp for nowait - for (size_t c=0; c& cell = bndryCellPointers[c]; - cell.parameters[0][CellParams::LBWEIGHTCOUNTER] += t_average; - } - #pragma omp for nowait - for (size_t c=0; c& cell = innerCellPointers[c]; - cell.parameters[0][CellParams::LBWEIGHTCOUNTER] += t_average; - } - } - } - - phiprof::stop("calculate alpha",bndryCellPointers.size()+innerCellPointers.size(),"Spatial Cells"); - - // Reduce sums to master process: - #warning TEST if Allreduce is faster here - phiprof::start("MPI (Alpha)"); - MPI_Reduce(sums,&(globalVariables[cgglobal::R_T_R]),2,MPI_Type(),MPI_SUM,0,MPI_COMM_WORLD); - - // Calculate alpha and broadcast to all processes: - globalVariables[cgglobal::ALPHA] - = globalVariables[cgglobal::R_T_R] - /(globalVariables[cgglobal::P_T_A_P] + 100*numeric_limits::min()); - MPI_Bcast(globalVariables,3,MPI_Type(),0,MPI_COMM_WORLD); - phiprof::stop("MPI (Alpha)"); - return true; - } - - bool PoissonSolverCG::calculateElectrostaticField(dccrg::Dccrg& mpiGrid) { - bool success = true; - SpatialCell::set_mpi_transfer_type(Transfer::CELL_PHI,false); - - mpiGrid.start_remote_neighbor_copy_updates(POISSON_NEIGHBORHOOD_ID); - - // Calculate electric field on inner cells - if (Poisson::is2D == true) { - if (calculateElectrostaticField2D(innerCellPointers) == false) success = false; - } else { - if (calculateElectrostaticField3D(innerCellPointers) == false) success = false; - } - - mpiGrid.wait_remote_neighbor_copy_updates(POISSON_NEIGHBORHOOD_ID); - - // Calculate electric field on boundary cells - if (Poisson::is2D == true) { - if (calculateElectrostaticField2D(bndryCellPointers) == false) success = false; - } else { - if (calculateElectrostaticField3D(bndryCellPointers) == false) success = false; - } - - return success; - } - - void PoissonSolverCG::cachePointers2D( - dccrg::Dccrg& mpiGrid, - const std::vector& cells, - std::vector >& cellCache) { - cellCache.clear(); - - for (size_t c=0; csysBoundaryFlag == sysboundarytype::DO_NOT_COMPUTE) continue; - //if (mpiGrid[cells[c]]->sysBoundaryFlag != sysboundarytype::NOT_SYSBOUNDARY) continue; - - // Calculate cell i/j/k indices - dccrg::Types<3>::indices_t indices = mpiGrid.mapping.get_indices(cells[c]); - - CellCache3D cache; - cache.cellID = cells[c]; - cache.cell = mpiGrid[cells[c]]; - cache[0] = mpiGrid[cells[c]]->parameters.data(); - - #ifdef DEBUG_POISSON_CG - if (cache.cell == NULL) { - stringstream s; - s << "ERROR, NULL pointer in " << __FILE__ << ":" << __LINE__ << endl; - s << "\t Cell ID " << cells[c] << endl; - cerr << s.str(); - exit(1); - } - #endif - - spatial_cell::SpatialCell* dummy = NULL; - switch (mpiGrid[cells[c]]->sysBoundaryFlag) { - case sysboundarytype::DO_NOT_COMPUTE: - break; - case sysboundarytype::NOT_SYSBOUNDARY: - // Fetch pointers to this cell's (cell) parameters array, - // and pointers to +/- xyz face neighbors' arrays - indices[0] -= 1; cache[1] = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]->parameters.data(); - indices[0] += 2; cache[2] = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]->parameters.data(); - indices[0] -= 1; - - if (indices[1] == 2) { - indices[1] -= 1; - dummy = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]; - if (dummy->sysBoundaryFlag == sysboundarytype::ANTISYMMETRIC) { - cache[3] = cache[0]; - } else { - cache[3] = dummy->get_cell_parameters(); - } - } else { - indices[1] -= 1; cache[3] = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]->parameters.data(); - } - - indices[1] += 2; cache[4] = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]->parameters.data(); - indices[1] -= 1; - break; - - case sysboundarytype::ANTISYMMETRIC: - continue; - - // Get +/- x-neighbor pointers - indices[0] -= 1; - dummy = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]; - if (dummy == NULL) cache[1] = bndryCellParams; - else cache[1] = dummy->parameters.data(); - indices[0] += 2; - dummy = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]; - if (dummy == NULL) cache[2] = bndryCellParams; - else cache[2] = dummy->parameters.data(); - indices[0] -= 1; - - // Set +/- y-neighbors both point to +y neighbor - // if we are at the lower y-boundary, otherwise set both - // y-neighbors point to -y neighbor. - if (indices[1] == 1) { - cache[3] = cache[0]; - indices[1] += 1; - dummy = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]; - if (dummy == NULL) { - cache[4] = bndryCellParams; - } else { - cache[4] = dummy->parameters.data(); - } - indices[1] -= 1; - } else { - cache[4] = cache[0]; - indices[1] -= 1; - dummy = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]; - if (dummy == NULL) { - cache[3] = bndryCellParams; - } else { - cache[3] = dummy->parameters.data(); - } - indices[1] += 1; - } - break; - - default: - indices[0] -= 1; - dummy = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]; - //if (dummy == NULL) cache[1] = bndryCellParams; - if (dummy == NULL) continue; - else cache[1] = dummy->parameters.data(); - indices[0] += 2; - dummy = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]; - //if (dummy == NULL) cache[2] = bndryCellParams; - if (dummy == NULL) continue; - else cache[2] = dummy->parameters.data(); - indices[0] -= 1; - - indices[1] -= 1; - dummy = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]; - //if (dummy == NULL) cache[3] = bndryCellParams; - if (dummy == NULL) continue; - else cache[3] = dummy->parameters.data(); - indices[1] += 2; - dummy = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]; - //if (dummy == NULL) cache[4] = bndryCellParams; - if (dummy == NULL) continue; - else cache[4] = dummy->parameters.data(); - indices[1] -= 1; - break; - } - - cellCache.push_back(cache); - } // for-loop over spatial cells - } -/* - void PoissonSolverCG::cachePointers3D( - dccrg::Dccrg& mpiGrid, - const std::vector& cells,std::vector& redCache, - std::vector& blackCache) { - redCache.clear(); - blackCache.clear(); - - for (size_t c=0; c::indices_t indices = mpiGrid.mapping.get_indices(cells[c]); - - if ((indices[0] + indices[1]%2 + indices[2]%2) % 2 == RED) { - CellCache3D cache; - - // Cells on domain boundaries are not iterated - if (mpiGrid[cells[c]]->sysBoundaryFlag == sysboundarytype::DO_NOT_COMPUTE) continue; - - // Fetch pointers to this cell's (cell) parameters array, - // and pointers to +/- xyz face neighbors' arrays - cache.cell = mpiGrid[cells[c]]; - cache[0] = mpiGrid[cells[c]]->parameters.data(); - - indices[0] -= 1; cache[1] = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]->parameters.data(); - indices[0] += 2; cache[2] = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]->parameters.data(); - indices[0] -= 1; - - indices[1] -= 1; cache[3] = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]->parameters.data(); - indices[1] += 2; cache[4] = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]->parameters.data(); - indices[1] -= 1; - - indices[2] -= 1; cache[5] = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]->parameters.data(); - indices[2] += 2; cache[6] = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]->parameters.data(); - indices[2] -= 1; - - redCache.push_back(cache); - } else { - CellCache3D cache; - - // Cells on domain boundaries are not iterated - if (mpiGrid[cells[c]]->sysBoundaryFlag != 1) continue; - - // Fetch pointers to this cell's (cell) parameters array, - // and pointers to +/- xyz face neighbors' arrays - cache.cell = mpiGrid[cells[c]]; - cache[0] = mpiGrid[cells[c]]->parameters.data(); - - indices[0] -= 1; cache[1] = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]->parameters.data(); - indices[0] += 2; cache[2] = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]->parameters.data(); - indices[0] -= 1; - - indices[1] -= 1; cache[3] = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]->parameters.data(); - indices[1] += 2; cache[4] = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]->parameters.data(); - indices[1] -= 1; - - indices[2] -= 1; cache[5] = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]->parameters.data(); - indices[2] += 2; cache[6] = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]->parameters.data(); - indices[2] -= 1; - - blackCache.push_back(cache); - } - } - } - */ - - void PoissonSolverCG::bvalue(dccrg::Dccrg& mpiGrid, - std::vector >& cells) { - #pragma omp parallel for - for (size_t c=0; c::indices_t indices = mpiGrid.mapping.get_indices(cells[c].cellID); - CellCache3D& cell = cells[c]; - - if (indices[0] < 2 && (indices[1] > 1 && indices[1] < Parameters::ycells_ini-2)) { - Real RHS = -3*cell.parameters[0][CellParams::PHI] - + cell.parameters[3][CellParams::PHI] + cell.parameters[4][CellParams::PHI]; - cell.variables[cgvar::R] = cell.variables[cgvar::B] - RHS; - continue; - } - - if ((indices[1] < 2 || indices[1] > Parameters::ycells_ini-3) - && (indices[0] > 1 && indices[0] < Parameters::xcells_ini-2)) { - Real RHS = -3*cell.parameters[0][CellParams::PHI] - + cell.parameters[1][CellParams::PHI] + cell.parameters[2][CellParams::PHI]; - cell.variables[cgvar::R] = cell.variables[cgvar::B] - RHS; - continue; - } - - if (indices[0] == 1 && (indices[1] == 1 || indices[1] == Parameters::ycells_ini-2)) { - cell.variables[cgvar::R] = 0; - continue; - } - } - } - - bool PoissonSolverCG::solve(dccrg::Dccrg& mpiGrid) { - bool success = true; - - // If mesh partitioning has changed, recalculate pointer caches - if (Parameters::meshRepartitioned == true) { - phiprof::start("Pointer Caching"); - if (Poisson::is2D == true) { - cachePointers2D(mpiGrid,mpiGrid.get_local_cells_on_process_boundary(POISSON_NEIGHBORHOOD_ID),bndryCellPointers); - cachePointers2D(mpiGrid,mpiGrid.get_local_cells_not_on_process_boundary(POISSON_NEIGHBORHOOD_ID),innerCellPointers); - } else { - //cachePointers3D(mpiGrid,mpiGrid.get_local_cells_on_process_boundary(POISSON_NEIGHBORHOOD_ID),bndryCellPointers); - //cachePointers3D(mpiGrid,mpiGrid.get_local_cells_not_on_process_boundary(POISSON_NEIGHBORHOOD_ID),innerCellPointers); - } - phiprof::stop("Pointer Caching"); - } - - // Calculate charge density and sync - SpatialCell::set_mpi_transfer_type(Transfer::CELL_RHOQ_TOT,false); - - //for (size_t c=0; c= Poisson::maxIterations) break; - if (globalVariables[cgglobal::R_MAX] < Poisson::maxAbsoluteError) break; - } while (true); - - if (calculateElectrostaticField(mpiGrid) == false) { - logFile << "(POISSON SOLVER CG) ERROR: Failed to calculate electrostatic field in "; - logFile << __FILE__ << ":" << __LINE__ << endl << write; - success = false; - } - - error(innerCellPointers); - error(bndryCellPointers); - - return success; - } - - /** - * Upon successful return, CellParams::PHI_TMP has the correct value of P0 - * on all cells (local and buffered). - * @param mpiGrid Parallel grid library. - * @return If true, CG solver is ready to iterate.*/ - bool PoissonSolverCG::startIteration(dccrg::Dccrg& mpiGrid) { - bool success = true; - if (startIteration(bndryCellPointers) == false) success = false; - SpatialCell::set_mpi_transfer_type(Transfer::CELL_PHI,false); - mpiGrid.start_remote_neighbor_copy_updates(POISSON_NEIGHBORHOOD_ID); - - if (startIteration(innerCellPointers) == false) success = false; - mpiGrid.wait_remote_neighbor_copy_updates(POISSON_NEIGHBORHOOD_ID); - - return success; - } - - /** Calculate the value of alpha for this iteration. - * @return If true, CG solver is ready to iterate.*/ - bool PoissonSolverCG::startIteration(std::vector >& cells) { - phiprof::start("start iteration"); - - #pragma omp parallel for - for (size_t c=0; c& mpiGrid) { - phiprof::start("R transpose * R"); - - Real R_T_R = 0; - Real t_start = 0; - Real t_total = 0; - if (Parameters::prepareForRebalance == true) t_start = MPI_Wtime(); - - #pragma omp parallel reduction(+:R_T_R) - { - #pragma omp for - for (size_t c=0; c& cell = bndryCellPointers[c]; - R_T_R += cell.variables[cgvar::R]*cell.variables[cgvar::R]; - } - - #pragma omp for - for (size_t c=0; c& cell = innerCellPointers[c]; - R_T_R += cell.variables[cgvar::R]*cell.variables[cgvar::R]; - } - } - const size_t N_cells = bndryCellPointers.size()+innerCellPointers.size(); - - if (Parameters::prepareForRebalance == true) t_total = (MPI_Wtime() - t_start); - phiprof::stop("R transpose * R",N_cells,"Spatial Cells"); - - // Reduce value to master, calculate beta parameter and broadcast it to all processes: - phiprof::start("MPI (R transp R)"); - Real global_R_T_R; - MPI_Reduce(&R_T_R,&global_R_T_R,1,MPI_Type(),MPI_SUM,0,MPI_COMM_WORLD); - globalVariables[cgglobal::BETA] = global_R_T_R / (globalVariables[cgglobal::R_T_R] + 100*numeric_limits::min()); - globalVariables[cgglobal::R_T_R] = global_R_T_R; - MPI_Bcast(globalVariables,cgglobal::SIZE,MPI_Type(),0,MPI_COMM_WORLD); - phiprof::stop("MPI (R transp R)"); - - phiprof::start("update P"); - t_start = 0; - if (Parameters::prepareForRebalance == true) t_start = MPI_Wtime(); - - #pragma omp parallel - { - const int tid = omp_get_thread_num(); - #pragma omp for nowait - for (size_t c=0; c& cell = bndryCellPointers[c]; - cell.parameters[0][CellParams::PHI_TMP] - = cell.variables[cgvar::R] - + globalVariables[cgglobal::BETA]*cell.parameters[0][CellParams::PHI_TMP]; - } - - // Send new P values to neighbor processes: - if (tid == 0) { - SpatialCell::set_mpi_transfer_type(Transfer::CELL_PHI,false); - mpiGrid.start_remote_neighbor_copy_updates(POISSON_NEIGHBORHOOD_ID); - } - - #pragma omp for nowait - for (size_t c=0; c& cell = innerCellPointers[c]; - cell.parameters[0][CellParams::PHI_TMP] - = cell.variables[cgvar::R] - + globalVariables[cgglobal::BETA]*cell.parameters[0][CellParams::PHI_TMP]; - } - } - - // Measure computation time if needed - if (Parameters::prepareForRebalance == true) { - const size_t N = max((size_t)1,bndryCellPointers.size()+innerCellPointers.size()); - t_total += (MPI_Wtime() - t_start); - const Real t_average = t_total / N; - - #pragma omp parallel - { - #pragma omp for nowait - for (size_t c=0; c& cell = bndryCellPointers[c]; - cell.parameters[0][CellParams::LBWEIGHTCOUNTER] += t_average; - } - #pragma omp for nowait - for (size_t c=0; c& cell = innerCellPointers[c]; - cell.parameters[0][CellParams::LBWEIGHTCOUNTER] += t_average; - } - } - } - phiprof::stop("update P",innerCellPointers.size()+bndryCellPointers.size(),"Spatial Cells"); - - // Wait for MPI to complete: - phiprof::start("MPI (update P)"); - mpiGrid.wait_remote_neighbor_copy_updates(POISSON_NEIGHBORHOOD_ID); - phiprof::stop("MPI (update P)"); - - return true; - } - - /** - * - * @return */ - bool PoissonSolverCG::update_x_r() { - phiprof::start("update x and r"); - - Real t_start = 0; - if (Parameters::prepareForRebalance == true) t_start = MPI_Wtime(); - Real R_max = -numeric_limits::max(); - Real* thread_R_max = new Real[omp_get_max_threads()]; - - #pragma omp parallel - { - Real my_R_max = 0; - #pragma omp for nowait - for (size_t c=0; c& cell = bndryCellPointers[c]; - cell.parameters[0][CellParams::PHI] += globalVariables[cgglobal::ALPHA]*cell.parameters[0][CellParams::PHI_TMP]; - cell.variables[cgvar::R] -= globalVariables[cgglobal::ALPHA]*cell.variables[cgvar::A_TIMES_P]; - if (fabs(cell.variables[cgvar::R]) > my_R_max) my_R_max = fabs(cell.variables[cgvar::R]); - } - #pragma omp for nowait - for (size_t c=0; c& cell = innerCellPointers[c]; - cell.parameters[0][CellParams::PHI] += globalVariables[cgglobal::ALPHA]*cell.parameters[0][CellParams::PHI_TMP]; - cell.variables[cgvar::R] -= globalVariables[cgglobal::ALPHA]*cell.variables[cgvar::A_TIMES_P]; - if (fabs(cell.variables[cgvar::R]) > my_R_max) my_R_max = fabs(cell.variables[cgvar::R]); - } - const int tid = omp_get_thread_num(); - thread_R_max[tid] = my_R_max; - } - - for (int tid=1; tid thread_R_max[0]) thread_R_max[0] = thread_R_max[tid]; - } - R_max = thread_R_max[0]; - delete [] thread_R_max; thread_R_max = NULL; - - size_t N_cells = bndryCellPointers.size() + innerCellPointers.size(); - - // Measure computation time if needed - if (Parameters::prepareForRebalance == true) { - const Real t_average = (MPI_Wtime() - t_start) / max((size_t)1,N_cells); - - #pragma omp parallel - { - #pragma omp for nowait - for (size_t c=0; c& cell = bndryCellPointers[c]; - cell.parameters[0][CellParams::LBWEIGHTCOUNTER] += t_average; - } - #pragma omp for nowait - for (size_t c=0; c& cell = innerCellPointers[c]; - cell.parameters[0][CellParams::LBWEIGHTCOUNTER] += t_average; - } - } - } - - phiprof::stop("update x and r",N_cells,"Spatial Cells"); - - phiprof::start("MPI (x and r)"); - MPI_Allreduce(&R_max,&(globalVariables[cgglobal::R_MAX]),1,MPI_Type(),MPI_MAX,MPI_COMM_WORLD); - phiprof::stop("MPI (x and r)"); - - return true; - } - -} // namespace poisson diff --git a/poisson_solver/poisson_solver_cg.h b/poisson_solver/poisson_solver_cg.h deleted file mode 100644 index e6d9e7fae..000000000 --- a/poisson_solver/poisson_solver_cg.h +++ /dev/null @@ -1,98 +0,0 @@ -/* - * This file is part of Vlasiator. - * Copyright 2010-2016 Finnish Meteorological Institute - * - * For details of usage, see the COPYING file and read the "Rules of the Road" - * at http://www.physics.helsinki.fi/vlasiator/ - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - * - * File: poisson_solver_cg.h - * Author: sandroos - * - * Created on January 15, 2015, 12:45 PM - */ - -#ifndef POISSON_SOLVER_CG_H -#define POISSON_SOLVER_CG_H - -#include - -#include "poisson_solver.h" - -namespace poisson { - - namespace cgvar { - enum Variable { - PHI, - B, /**< Charge density multiplied by dx2/epsilon0.*/ - R, - A_TIMES_P, /**< Matrix A times P.*/ - SIZE - }; - } - - namespace cgglobal { - enum Variable { - ALPHA, - R_T_R, - P_T_A_P, - BETA, - R_MAX, - SIZE - }; - } - - class PoissonSolverCG: public PoissonSolver { - public: - PoissonSolverCG(); - ~PoissonSolverCG(); - - bool calculateElectrostaticField(dccrg::Dccrg& mpiGrid); - bool initialize(); - bool finalize(); - bool solve(dccrg::Dccrg& mpiGrid); - - private: - - Real bndryCellParams[CellParams::N_SPATIAL_CELL_PARAMS]; - - void bvalue(dccrg::Dccrg& mpiGrid, - std::vector >& cells); - - void cachePointers2D(dccrg::Dccrg& mpiGrid, - const std::vector& cells, - std::vector >& cellCache); - /*void cachePointers3D(dccrg::Dccrg& mpiGrid, - const std::vector& cells,std::vector& redCache, - std::vector& blackCache);*/ - - bool calculateAlpha(); - void calculateAlpha(CellCache3D& cell,Real& mySum0,Real& mySum1); - bool startIteration(dccrg::Dccrg& mpiGrid); - bool startIteration(std::vector >& cells); - bool update_p(dccrg::Dccrg& mpiGrid); - bool update_x_r(); - - Real globalVariables[cgglobal::SIZE]; - uint iterations; - }; - - PoissonSolver* makeCG(); - -} // namespace poisson - -#endif // POISSON_SOLVER_CG_H - diff --git a/poisson_solver/poisson_solver_jacobi.cpp b/poisson_solver/poisson_solver_jacobi.cpp deleted file mode 100644 index 4845f815d..000000000 --- a/poisson_solver/poisson_solver_jacobi.cpp +++ /dev/null @@ -1,165 +0,0 @@ -/* - * This file is part of Vlasiator. - * Copyright 2010-2016 Finnish Meteorological Institute - * - * For details of usage, see the COPYING file and read the "Rules of the Road" - * at http://www.physics.helsinki.fi/vlasiator/ - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - * - * File: poisson_solver_jacobi.cpp - * Author: sandroos - * - * Created on January 16, 2015. - */ - -#include -#include - -#include "../grid.h" - -#include "poisson_solver_jacobi.h" - -using namespace std; - -namespace poisson { - - static const int RED = 0; - static const int BLACK = 1; - - PoissonSolver* makeJacobi() { - return new PoissonSolverJacobi(); - } - - PoissonSolverJacobi::PoissonSolverJacobi(): PoissonSolver() { } - - PoissonSolverJacobi::~PoissonSolverJacobi() { } - - bool PoissonSolverJacobi::initialize() { - bool success = true; - return success; - } - - bool PoissonSolverJacobi::finalize() { - bool success = true; - return success; - } - - bool PoissonSolverJacobi::calculateElectrostaticField(dccrg::Dccrg& mpiGrid) { - #warning Jacobi solver does not calculate electric field - return false; - } - - void PoissonSolverJacobi::evaluate(dccrg::Dccrg& mpiGrid, - const std::vector& cells) { - for (size_t c=0; csysBoundaryFlag != 1) continue; - - // Fetch data - const Real rho_q = mpiGrid[cellID]->parameters[CellParams::RHOQ_TOT]; - Real phi_111 = mpiGrid[cellID]->parameters[CellParams::PHI_TMP]; - - // Calculate cell i/j/k indices - dccrg::Types<3>::indices_t indices = mpiGrid.mapping.get_indices(cellID); - CellID nbrID; - - // +/- x face neighbor potential - indices[0] -= 1; nbrID = mpiGrid.mapping.get_cell_from_indices(indices,0); - Real phi_011 = mpiGrid[nbrID]->parameters[CellParams::PHI_TMP]; - indices[0] += 2; nbrID = mpiGrid.mapping.get_cell_from_indices(indices,0); - Real phi_211 = mpiGrid[nbrID]->parameters[CellParams::PHI_TMP]; - indices[0] -= 1; - - // +/- y face neighbor potential - indices[1] -= 1; nbrID = mpiGrid.mapping.get_cell_from_indices(indices,0); - Real phi_101 = mpiGrid[nbrID]->parameters[CellParams::PHI_TMP]; - indices[1] += 2; nbrID = mpiGrid.mapping.get_cell_from_indices(indices,0); - Real phi_121 = mpiGrid[nbrID]->parameters[CellParams::PHI_TMP]; - indices[1] -= 1; - - // +/- z face neighbor potential - /*indices[2] -= 1; nbrID = mpiGrid.mapping.get_cell_from_indices(indices,0); - Real phi_110 = mpiGrid[nbrID]->parameters[CellParams::PHI_TMP]; - indices[2] += 2; mpiGrid.mapping.get_cell_from_indices(indices,0); - Real phi_112 = mpiGrid[nbrID]->parameters[CellParams::PHI_TMP]; - indices[2] -= 1;*/ - Real phi_110 = phi_111; Real phi_112 = phi_111; - - Real DX2 = mpiGrid[cellID]->parameters[CellParams::DX]*mpiGrid[cellID]->parameters[CellParams::DX]; - Real DY2 = mpiGrid[cellID]->parameters[CellParams::DY]*mpiGrid[cellID]->parameters[CellParams::DY]; - Real DZ2 = mpiGrid[cellID]->parameters[CellParams::DZ]*mpiGrid[cellID]->parameters[CellParams::DZ]; - Real factor = 2*(1/DX2 + 1/DY2 + 1/DZ2); - Real rhs = ((phi_011+phi_211)/DX2 + (phi_101+phi_121)/DY2 + (phi_110+phi_112)/DZ2 + rho_q)/factor; - mpiGrid[cellID]->parameters[CellParams::PHI] = rhs; - } - } - - bool PoissonSolverJacobi::solve(dccrg::Dccrg& mpiGrid) { - bool success = true; - phiprof::start("Poisson Solver"); - - // Update charge density - SpatialCell::set_mpi_transfer_type(Transfer::CELL_RHOQ_TOT,false); - mpiGrid.update_copies_of_remote_neighbors(POISSON_NEIGHBORHOOD_ID); - - SpatialCell::set_mpi_transfer_type(Transfer::CELL_PHI,false); - - do { - if (iterate(mpiGrid) == false) success = false; - - // Evaluate the error in potential solution and reiterate if necessary - break; - } while (true); - - phiprof::stop("Poisson Solver"); - - return success; - } - - bool PoissonSolverJacobi::iterate(dccrg::Dccrg& mpiGrid) { - - bool success = true; - - // Swap new and temporary potentials: - vector cells = mpiGrid.get_cells(); - for (size_t c=0; cparameters[CellParams::PHI_TMP] = mpiGrid[cells[c]]->parameters[CellParams::PHI]; - } - cells = mpiGrid.get_remote_cells_on_process_boundary(POISSON_NEIGHBORHOOD_ID); - for (size_t c=0; cparameters[CellParams::PHI_TMP] = mpiGrid[cells[c]]->parameters[CellParams::PHI]; - } - - // Compute new potential on process boundary cells - cells = mpiGrid.get_local_cells_on_process_boundary(POISSON_NEIGHBORHOOD_ID); - evaluate(mpiGrid,cells); - - // Exchange new potential values on process boundaries - mpiGrid.start_remote_neighbor_copy_updates(POISSON_NEIGHBORHOOD_ID); - - // Compute new potential on inner cells - cells = mpiGrid.get_local_cells_not_on_process_boundary(POISSON_NEIGHBORHOOD_ID); - evaluate(mpiGrid,cells); - - // Wait for MPI transfers to complete - mpiGrid.wait_remote_neighbor_copy_updates(POISSON_NEIGHBORHOOD_ID); - - return success; - } - -} // namespace poisson diff --git a/poisson_solver/poisson_solver_jacobi.h b/poisson_solver/poisson_solver_jacobi.h deleted file mode 100644 index b93cb7484..000000000 --- a/poisson_solver/poisson_solver_jacobi.h +++ /dev/null @@ -1,58 +0,0 @@ -/* - * This file is part of Vlasiator. - * Copyright 2010-2016 Finnish Meteorological Institute - * - * For details of usage, see the COPYING file and read the "Rules of the Road" - * at http://www.physics.helsinki.fi/vlasiator/ - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - * - * File: poisson_solver_jacobi.h - * Author: sandroos - * - * Created on January 16, 2015. - */ - -#ifndef POISSON_SOLVER_JACOBI_H -#define POISSON_SOLVER_JACOBI_H - -#include - -#include "poisson_solver.h" - -namespace poisson { - - class PoissonSolverJacobi: public PoissonSolver { - public: - PoissonSolverJacobi(); - ~PoissonSolverJacobi(); - - bool calculateElectrostaticField(dccrg::Dccrg& mpiGrid); - bool initialize(); - bool finalize(); - bool solve(dccrg::Dccrg& mpiGrid); - - private: - void evaluate(dccrg::Dccrg& mpiGrid, - const std::vector& cells); - bool iterate(dccrg::Dccrg& mpiGrid); - }; - - PoissonSolver* makeJacobi(); - -} // namespace poisson - -#endif // POISSON_SOLVER_JACOBI_H - diff --git a/poisson_solver/poisson_solver_sor.cpp b/poisson_solver/poisson_solver_sor.cpp deleted file mode 100644 index f042e04aa..000000000 --- a/poisson_solver/poisson_solver_sor.cpp +++ /dev/null @@ -1,609 +0,0 @@ -/* - * This file is part of Vlasiator. - * Copyright 2010-2016 Finnish Meteorological Institute - * - * For details of usage, see the COPYING file and read the "Rules of the Road" - * at http://www.physics.helsinki.fi/vlasiator/ - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - * - * File: poisson_solver_sor.cpp - * Author: sandroos - * - * Created on January 15, 2015, 12:45 PM - */ - -#include -#include -#include - -#include "../logger.h" -#include "../grid.h" - -#include "poisson_solver_sor.h" - -#ifndef NDEBUG - #define DEBUG_POISSON_SOR -#endif - -using namespace std; - -extern Logger logFile; - -namespace poisson { - - enum Face { - INNER, - XNEG, - XPOS, - YNEG, - YPOS - }; - - static const int RED = 0; - static const int BLACK = 1; - - vector > innerCellPointersRED; - vector > bndryCellPointersRED; - vector > innerCellPointersBLACK; - vector > bndryCellPointersBLACK; - - PoissonSolver* makeSOR() { - return new PoissonSolverSOR(); - } - - PoissonSolverSOR::PoissonSolverSOR(): PoissonSolver() { } - - PoissonSolverSOR::~PoissonSolverSOR() { } - - bool PoissonSolverSOR::initialize() { - bool success = true; - bndryCellParams[CellParams::PHI] = 0; - bndryCellParams[CellParams::PHI_TMP] = 0; - return success; - } - - bool PoissonSolverSOR::finalize() { - bool success = true; - return success; - } - - void PoissonSolverSOR::boundaryConds(dccrg::Dccrg& mpiGrid, - std::vector >& cells) { - if (Parameters::geometry != geometry::XY4D) return; - - #pragma omp parallel for - for (size_t c=0; c::indices_t indices = mpiGrid.mapping.get_indices(cells[c].cellID); - - Face cellFace = INNER; - - if (indices[0] == 1) { - if (indices[1] >= 2 && indices[1] <= Parameters::ycells_ini-3) cellFace = XNEG; - } else if (indices[0] == Parameters::xcells_ini-2) { - if (indices[1] >= 2 && indices[1] <= Parameters::ycells_ini-3) cellFace = XPOS; - } - if (indices[1] == 1) { - if (indices[0] >= 2 && indices[0] <= Parameters::xcells_ini-3) cellFace = YNEG; - } else if (indices[1] == Parameters::ycells_ini-2) { - if (indices[0] >= 2 && indices[0] <= Parameters::xcells_ini-3) cellFace = YPOS; - } - - switch (cellFace) { - case INNER: - break; - case XNEG: - // Copy value from +x neighbor, sets derivative to zero at interface - //cells[c].parameters[0][CellParams::PHI] = cells[c].parameters[2][CellParams::PHI]; - cells[c].parameters[0][CellParams::PHI] = 0; - break; - case XPOS: - cells[c].parameters[0][CellParams::PHI] = 0; - //cells[c].parameters[0][CellParams::PHI] = cells[c].parameters[1][CellParams::PHI]; - break; - case YNEG: - cells[c].parameters[0][CellParams::PHI] = 0; - //cells[c].parameters[0][CellParams::PHI] = cells[c].parameters[4][CellParams::PHI]; - break; - case YPOS: - cells[c].parameters[0][CellParams::PHI] = 0; - //cells[c].parameters[0][CellParams::PHI] = cells[c].parameters[3][CellParams::PHI]; - break; - default: - break; - } - } - } - - bool PoissonSolverSOR::boundaryConds(dccrg::Dccrg& mpiGrid) { - SpatialCell::set_mpi_transfer_type(spatial_cell::Transfer::CELL_PHI); - mpiGrid.start_remote_neighbor_copy_updates(POISSON_NEIGHBORHOOD_ID); - - boundaryConds(mpiGrid,innerCellPointersRED); - boundaryConds(mpiGrid,innerCellPointersBLACK); - - mpiGrid.wait_remote_neighbor_copy_updates(POISSON_NEIGHBORHOOD_ID); - - boundaryConds(mpiGrid,bndryCellPointersRED); - boundaryConds(mpiGrid,bndryCellPointersBLACK); - - return true; - } - - bool PoissonSolverSOR::calculateElectrostaticField(dccrg::Dccrg& mpiGrid) { - bool success = true; - SpatialCell::set_mpi_transfer_type(Transfer::CELL_PHI,false); - - mpiGrid.start_remote_neighbor_copy_updates(POISSON_NEIGHBORHOOD_ID); - - // Calculate electric field on inner cells - if (Poisson::is2D == true) { - if (calculateElectrostaticField2D(innerCellPointersRED) == false) success = false; - if (calculateElectrostaticField2D(innerCellPointersBLACK) == false) success = false; - } else { - if (calculateElectrostaticField3D(innerCellPointersRED) == false) success = false; - if (calculateElectrostaticField3D(innerCellPointersBLACK) == false) success = false; - } - - mpiGrid.wait_remote_neighbor_copy_updates(POISSON_NEIGHBORHOOD_ID); - - // Calculate electric field on boundary cells - if (Poisson::is2D == true) { - if (calculateElectrostaticField2D(bndryCellPointersRED) == false) success = false; - if (calculateElectrostaticField2D(bndryCellPointersBLACK) == false) success = false; - } else { - if (calculateElectrostaticField3D(bndryCellPointersRED) == false) success = false; - if (calculateElectrostaticField3D(bndryCellPointersBLACK) == false) success = false; - } - - return success; - } - - void PoissonSolverSOR::evaluate2D(std::vector >& cellPointers,const int& cellColor) { - const Real weight = 1.5; - - Real t_start = 0; - if (Parameters::prepareForRebalance == true) t_start = MPI_Wtime(); - - #pragma omp parallel for - for (size_t c=0; csysBoundaryFlag != sysboundarytype::NOT_SYSBOUNDARY) continue; - - #ifdef DEBUG_POISSON_SOR - bool ok = true; - if (cellPointers[c][0] == NULL) ok = false; - if (cellPointers[c][1] == NULL) ok = false; - if (cellPointers[c][2] == NULL) ok = false; - if (cellPointers[c][3] == NULL) ok = false; - if (cellPointers[c][4] == NULL) ok = false; - if (ok == false) { - stringstream ss; - ss << "ERROR, NULL pointer in " << __FILE__ << ":" << __LINE__ << endl; - cerr << ss.str(); - } - #endif - - Real DX2 = cellPointers[c][0][CellParams::DX]; DX2 *= DX2; - Real DY2 = cellPointers[c][0][CellParams::DY]; DY2 *= DY2; - Real phi_111 = cellPointers[c][0][CellParams::PHI]; - Real rho_q = cellPointers[c][0][CellParams::RHOQ_TOT]; - - Real phi_011 = cellPointers[c][1][CellParams::PHI]; - Real phi_211 = cellPointers[c][2][CellParams::PHI]; - Real phi_101 = cellPointers[c][3][CellParams::PHI]; - Real phi_121 = cellPointers[c][4][CellParams::PHI]; - - Real factor = 2*(1/DX2 + 1/DY2); - Real rhs = ((phi_011+phi_211)/DX2 + (phi_101+phi_121)/DY2 + rho_q)/factor; - Real correction = rhs - phi_111; - cellPointers[c][0][CellParams::PHI] = phi_111 + weight*correction; - - #ifdef DEBUG_POISSON_SOR - ok = true; - if (factor != factor) ok = false; - if (rhs != rhs) ok = false; - if (correction != correction) ok = false; - if (ok == false) { - stringstream ss; - ss << "(SOR) NAN detected in cell " << cellPointers[c].cellID << ' '; - ss << "factor: " << factor << " phi: "; - ss << phi_011 << '\t' << phi_111 << '\t' << phi_211 << '\t' << phi_101 << '\t' << phi_121 << '\t'; - ss << "rho_q: " << rho_q << '\t'; - ss << "rhs: " << rhs << '\t'; - ss << endl; - cerr << ss.str(); - exit(1); - } - #endif - } // for-loop over cells - - if (Parameters::prepareForRebalance == true) { - const size_t N = max((size_t)1,cellPointers.size()); - Real t_average = (MPI_Wtime() - t_start) / N; - - #pragma omp parallel for - for (size_t c=0; c >& cellPointers,const int& cellColor) { - - const Real weight = 1.5; - - Real t_start = 0; - if (Parameters::prepareForRebalance == true) t_start = MPI_Wtime(); - - #pragma omp for - for (size_t c=0; c& mpiGrid, - const std::vector& cells, - std::vector >& redCache, - std::vector >& blackCache) { - redCache.clear(); - blackCache.clear(); - - for (size_t c=0; csysBoundaryFlag == sysboundarytype::DO_NOT_COMPUTE) continue; - - // Calculate cell i/j/k indices - dccrg::Types<3>::indices_t indices = mpiGrid.mapping.get_indices(cells[c]); - - CellCache3D cache; - cache.cellID = cells[c]; - cache.cell = mpiGrid[cells[c]]; - cache[0] = mpiGrid[cells[c]]->parameters.data(); - - #ifdef DEBUG_POISSON_SOR - if (cache.cell == NULL) { - stringstream s; - s << "ERROR, NULL pointer in " << __FILE__ << ":" << __LINE__ << endl; - s << "\t Cell ID " << cells[c] << endl; - cerr << s.str(); - exit(1); - } - #endif - - spatial_cell::SpatialCell* dummy = NULL; - switch (mpiGrid[cells[c]]->sysBoundaryFlag) { - case sysboundarytype::DO_NOT_COMPUTE: - break; - case sysboundarytype::NOT_SYSBOUNDARY: - // Fetch pointers to this cell's (cell) parameters array, - // and pointers to +/- xyz face neighbors' arrays - indices[0] -= 1; cache[1] = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]->parameters.data(); - indices[0] += 2; cache[2] = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]->parameters.data(); - indices[0] -= 1; - - indices[1] -= 1; cache[3] = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]->parameters.data(); - indices[1] += 2; cache[4] = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]->parameters.data(); - indices[1] -= 1; - break; - - case sysboundarytype::ANTISYMMETRIC: - // Get +/- x-neighbor pointers - indices[0] -= 1; - dummy = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]; - if (dummy == NULL) cache[1] = bndryCellParams; - else cache[1] = dummy->parameters.data(); - indices[0] += 2; - dummy = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]; - if (dummy == NULL) cache[2] = bndryCellParams; - else cache[2] = dummy->parameters.data(); - indices[0] -= 1; - - // Set +/- y-neighbors both point to +y neighbor - // if we are at the lower y-boundary, otherwise set both - // y-neighbors point to -y neighbor. - if (indices[1] == 1) { - cache[3] = cache[0]; - indices[1] += 1; - dummy = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]; - if (dummy == NULL) { - cache[4] = bndryCellParams; - } else { - cache[4] = dummy->parameters.data(); - } - indices[1] -= 1; - } else { - cache[4] = cache[0]; - indices[1] -= 1; - dummy = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]; - if (dummy == NULL) { - cache[3] = bndryCellParams; - } else { - cache[3] = dummy->parameters.data(); - } - indices[1] += 1; - } - break; - - default: - indices[0] -= 1; - dummy = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]; - if (dummy == NULL) cache[1] = bndryCellParams; - else cache[1] = dummy->parameters.data(); - indices[0] += 2; - dummy = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]; - if (dummy == NULL) cache[2] = bndryCellParams; - else cache[2] = dummy->parameters.data(); - indices[0] -= 1; - - indices[1] -= 1; - dummy = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]; - if (dummy == NULL) cache[3] = bndryCellParams; - else cache[3] = dummy->parameters.data(); - indices[1] += 2; - dummy = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]; - if (dummy == NULL) cache[4] = bndryCellParams; - else cache[4] = dummy->parameters.data(); - indices[1] -= 1; - break; - } - - if ((indices[0] + indices[1]%2 + indices[2]%2) % 2 == RED) { - redCache.push_back(cache); - } else { - blackCache.push_back(cache); - } - } // for-loop over spatial cells - } - - void PoissonSolverSOR::cachePointers3D( - dccrg::Dccrg& mpiGrid, - const std::vector& cells,std::vector >& redCache, - std::vector >& blackCache) { - redCache.clear(); - blackCache.clear(); - - for (size_t c=0; c::indices_t indices = mpiGrid.mapping.get_indices(cells[c]); - CellCache3D cache; - - if ((indices[0] + indices[1]%2 + indices[2]%2) % 2 == RED) { - // Cells on domain boundaries are not iterated - if (mpiGrid[cells[c]]->sysBoundaryFlag == sysboundarytype::DO_NOT_COMPUTE) continue; - - // Fetch pointers to this cell's (cell) parameters array, - // and pointers to +/- xyz face neighbors' arrays - cache.cell = mpiGrid[cells[c]]; - cache[0] = mpiGrid[cells[c]]->parameters.data(); - - indices[0] -= 1; cache[1] = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]->parameters.data(); - indices[0] += 2; cache[2] = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]->parameters.data(); - indices[0] -= 1; - - indices[1] -= 1; cache[3] = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]->parameters.data(); - indices[1] += 2; cache[4] = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]->parameters.data(); - indices[1] -= 1; - - indices[2] -= 1; cache[5] = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]->parameters.data(); - indices[2] += 2; cache[6] = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]->parameters.data(); - indices[2] -= 1; - - redCache.push_back(cache); - } else { - // Cells on domain boundaries are not iterated - if (mpiGrid[cells[c]]->sysBoundaryFlag == sysboundarytype::DO_NOT_COMPUTE) continue; - - // Fetch pointers to this cell's (cell) parameters array, - // and pointers to +/- xyz face neighbors' arrays - cache.cell = mpiGrid[cells[c]]; - cache[0] = mpiGrid[cells[c]]->parameters.data(); - - indices[0] -= 1; cache[1] = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]->parameters.data(); - indices[0] += 2; cache[2] = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]->parameters.data(); - indices[0] -= 1; - - indices[1] -= 1; cache[3] = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]->parameters.data(); - indices[1] += 2; cache[4] = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]->parameters.data(); - indices[1] -= 1; - - indices[2] -= 1; cache[5] = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]->parameters.data(); - indices[2] += 2; cache[6] = mpiGrid[ mpiGrid.mapping.get_cell_from_indices(indices,0) ]->parameters.data(); - indices[2] -= 1; - - blackCache.push_back(cache); - } - } - } - - bool PoissonSolverSOR::solve(dccrg::Dccrg& mpiGrid) { - bool success = true; - - // If mesh partitioning has changed, recalculate pointer caches - if (Parameters::meshRepartitioned == true) { - phiprof::start("Pointer Caching"); - if (Poisson::is2D == true) { - cachePointers2D(mpiGrid,mpiGrid.get_local_cells_on_process_boundary(POISSON_NEIGHBORHOOD_ID),bndryCellPointersRED,bndryCellPointersBLACK); - cachePointers2D(mpiGrid,mpiGrid.get_local_cells_not_on_process_boundary(POISSON_NEIGHBORHOOD_ID),innerCellPointersRED,innerCellPointersBLACK); - } else { - cachePointers3D(mpiGrid,mpiGrid.get_local_cells_on_process_boundary(POISSON_NEIGHBORHOOD_ID),bndryCellPointersRED,bndryCellPointersBLACK); - cachePointers3D(mpiGrid,mpiGrid.get_local_cells_not_on_process_boundary(POISSON_NEIGHBORHOOD_ID),innerCellPointersRED,innerCellPointersBLACK); - } - phiprof::stop("Pointer Caching"); - } - - // Calculate charge density -#warning CHANGE ME after DCCRG works - //phiprof::start("MPI (RHOQ)"); - SpatialCell::set_mpi_transfer_type(Transfer::CELL_RHOQ_TOT,false); - //mpiGrid.start_remote_neighbor_copy_receives(POISSON_NEIGHBORHOOD_ID); - //phiprof::stop("MPI (RHOQ)"); - for (size_t c=0; c(mpiGrid); - - maxError = 0; - Real curError; - curError = error(innerCellPointersRED ); if (curError > maxError) maxError = curError; - curError = error(innerCellPointersBLACK); if (curError > maxError) maxError = curError; - curError = error(bndryCellPointersRED ); if (curError > maxError) maxError = curError; - curError = error(bndryCellPointersBLACK); if (curError > maxError) maxError = curError; - - //cerr << iterations << "\t" << maxError << endl; - - if (maxError < Poisson::maxAbsoluteError) break; - //if (relPotentialChange <= Poisson::minRelativePotentialChange) break; - if (iterations >= Poisson::maxIterations) break; - } while (true); - - if (calculateElectrostaticField(mpiGrid) == false) success = false; - return success; - } - - bool PoissonSolverSOR::solve(dccrg::Dccrg& mpiGrid, - const int& oddness) { - // NOTE: This function is entered by all threads in OpenMP run, - // so everything must be thread-safe! - - bool success = true; - const int tid = omp_get_thread_num(); - - #warning Always uses 2D solver at the moment - //if (Poisson::is2D == true) evaluator = this->evaluate2D; - //else evaluator = evaluate3D; - - // Compute new potential on process boundary cells - if (tid == 0) phiprof::start("Evaluate potential"); - if (oddness == RED) evaluate2D(bndryCellPointersRED,oddness); - else evaluate2D(bndryCellPointersBLACK,oddness); - if (tid == 0) { -// size_t cells = bndryCellPointersRED.size() + bndryCellPointersBLACK.size(); -// phiprof::stop("Evaluate potential",cells,"Spatial Cells"); - - // Exchange new potential values on process boundaries -// phiprof::start("MPI (start copy)"); - mpiGrid.start_remote_neighbor_copy_updates(POISSON_NEIGHBORHOOD_ID); -// phiprof::stop("MPI (start copy)"); - -// phiprof::start("Evaluate potential"); - } - - // Compute new potential on inner cells - if (oddness == RED) evaluate2D(innerCellPointersRED,oddness); - else evaluate2D(innerCellPointersBLACK,oddness); - - // Wait for MPI transfers to complete - if (tid == 0) { - size_t cells = innerCellPointersRED.size() + innerCellPointersBLACK.size(); - cells += bndryCellPointersRED.size() + bndryCellPointersBLACK.size(); -// phiprof::stop("Evaluate potential",cells,"Spatial Cells"); -// phiprof::start("MPI (wait copy)"); - mpiGrid.wait_remote_neighbor_copy_updates(POISSON_NEIGHBORHOOD_ID); -// phiprof::stop("MPI (wait copy)"); - - - phiprof::stop("Evaluate potential",cells,"Spatial Cells"); - } -// #pragma omp barrier - - return success; - } - -} // namespace poisson diff --git a/poisson_solver/poisson_solver_sor.h b/poisson_solver/poisson_solver_sor.h deleted file mode 100644 index 5468e8707..000000000 --- a/poisson_solver/poisson_solver_sor.h +++ /dev/null @@ -1,78 +0,0 @@ -/* - * This file is part of Vlasiator. - * Copyright 2010-2016 Finnish Meteorological Institute - * - * For details of usage, see the COPYING file and read the "Rules of the Road" - * at http://www.physics.helsinki.fi/vlasiator/ - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - * - * File: poisson_solver_sor.h - * Author: sandroos - * - * Created on January 15, 2015, 12:45 PM - */ - -#ifndef POISSON_SOLVER_SOR_H -#define POISSON_SOLVER_SOR_H - -#include - -#include "poisson_solver.h" - -namespace poisson { - - const unsigned int SOR_VARS = 0; - - class PoissonSolverSOR: public PoissonSolver { - public: - PoissonSolverSOR(); - ~PoissonSolverSOR(); - - bool calculateElectrostaticField(dccrg::Dccrg& mpiGrid); - bool initialize(); - bool finalize(); - bool solve(dccrg::Dccrg& mpiGrid); - - private: - - Real bndryCellParams[CellParams::N_SPATIAL_CELL_PARAMS]; - - bool boundaryConds(dccrg::Dccrg& mpiGrid); - void boundaryConds(dccrg::Dccrg& mpiGrid, - std::vector >& cells); - - void cachePointers2D(dccrg::Dccrg& mpiGrid, - const std::vector& cells,std::vector >& redCache, - std::vector >& blackCache); - void cachePointers3D(dccrg::Dccrg& mpiGrid, - const std::vector& cells,std::vector >& redCache, - std::vector >& blackCache); - void evaluate2D(std::vector >& cellPointers,const int& cellColor); - void evaluate3D(std::vector >& cellPointers,const int& cellColor); - - void (*evaluator)(std::vector >& cellPointers,const int& cellColor); - - bool solve(dccrg::Dccrg& mpiGrid, - const int& oddness); - - }; - - PoissonSolver* makeSOR(); - -} // namespace poisson - -#endif // POISSON_SOLVER_SOR_H - diff --git a/projects/ElectricSail/ElectricSail.cfg b/projects/ElectricSail/ElectricSail.cfg deleted file mode 100644 index e9baf5832..000000000 --- a/projects/ElectricSail/ElectricSail.cfg +++ /dev/null @@ -1,160 +0,0 @@ -propagate_field = 0 -propagate_potential = 1 -propagate_vlasov_acceleration = 1 -propagate_vlasov_translation = 1 -dynamic_timestep = 1 -project = ElectricSail - -[loadBalance] -rebalanceInterval = 10 - -[io] -diagnostic_write_interval = 1 -write_initial_state = 0 -restart_walltime_interval = 100000 -number_of_restarts = 1 - -system_write_t_interval = 2e-6 -system_write_file_name = fullf -system_write_distribution_stride = 0 -system_write_distribution_xline_stride = 0 -system_write_distribution_yline_stride = 0 -system_write_distribution_zline_stride = 0 - -[gridbuilder] -geometry = XY4D -x_length = 240 -y_length = 240 -z_length = 1 -x_min = -600.0 -x_max = +600.0 -y_min = -600.0 -y_max = +600.0 -z_min = -2.50 -z_max = +2.50 -#t_max = 4e-5 -t_max = 0 -#timestep_max = 10 -dt = 1e-7 - -[velocitymesh] -name = ElectronMesh -vx_min = -5.0e7 -vx_max = +5.0e7 -vy_min = -5.0e7 -vy_max = +5.0e7 -vz_min = -2.5e5 -vz_max = +2.5e5 -vx_length = 200 -vy_length = 200 -vz_length = 1 - -name = IonMesh -vx_min = -2.0e6 -vx_max = +2.0e6 -vy_min = -2.0e6 -vy_max = +2.0e6 -vz_min = -4.0e4 -vz_max = +4.0e4 -vx_length = 50 -vy_length = 50 -vz_length = 1 - -[bailout] -min_dt = 1e-8 - -[ParticlePopulation] -name = Electron -charge = -1 -mass_units = ELECTRON -mass = 10.0 -sparse_min_value = 1.0e-18 -mesh = ElectronMesh - -name = Proton -charge = +1 -mass_units = PROTON -mass = 1.0 -sparse_min_value = 1.0e-16 -mesh = IonMesh - -[ElectricSail] -solver = SOR -tether_x = 0 -tether_y = 0 -tether_voltage = 10000 -is_2D = 1 -clear_potential = 0 -max_iterations = 10000 -#max_iterations = 1 -n = 2 - -# Electrons -rho = 7e6 -rhoPertAbsAmp = 0 -Vx = 4.0e5 -Vy = 0.0 -Vz = 0.0 -Tx = 1.0e5 -Ty = 1.0e5 -Tz = 1.0e5 - -# Protons -rho = 7e6 -rhoPertAbsAmp = 0 -Vx = 4.0e5 -Vy = 0.0 -Vz = 0.0 -Tx = 1.0e6 -Ty = 1.0e6 -Tz = 1.0e6 - -[vlasovsolver] -#minCFL = 0.4 -#maxCFL = 0.6 -#vlasovSemiLagAcceleration = 0 - -[boundaries] -periodic_x = no -periodic_y = no -periodic_z = yes -#boundary = Outflow -#boundary = Antisymmetric -boundary = ProjectBoundary - -[variables] -output = Rho -output = BackgroundVolE -output = RhoV -output = VolE -output = PTensor -output = MPIrank -output = Blocks -output = Potential -output = PotentialError -output = ChargeDensity -output = BoundaryType -output = SpeciesMoments -output = fSaved -output = LBweight - -[sparse] -minValue = 1.0e-16 - -#[outflow] -#face = x- -#face = x+ -#face = y+ -#precedence = 1 - -[projectboundary] -face = x- -face = x+ -face = y+ -face = y- -precedence = 1 - -[antisymmetric] -face = y- -precedence = 10 - diff --git a/projects/ElectricSail/electric_sail.cpp b/projects/ElectricSail/electric_sail.cpp deleted file mode 100644 index 725d423e0..000000000 --- a/projects/ElectricSail/electric_sail.cpp +++ /dev/null @@ -1,403 +0,0 @@ -/* - * This file is part of Vlasiator. - * Copyright 2010-2016 Finnish Meteorological Institute - * - * For details of usage, see the COPYING file and read the "Rules of the Road" - * at http://www.physics.helsinki.fi/vlasiator/ - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - * - * File: electric_sail.cpp - * Author: sandroos - * - * Created on March 3, 2015 - */ - -#include -#include - -#include "../../readparameters.h" -#include "../../logger.h" -#include "../../object_wrapper.h" -#include "../../poisson_solver/poisson_solver.h" -#include "../read_gaussian_population.h" - -#include "electric_sail.h" - -using namespace std; -using namespace spatial_cell; - -extern Logger logFile; - -namespace projects { - - static Real radius = 0; - - Population::Population(const Real& rho,const Real& Tx,const Real& Ty,const Real& Tz, - const Real& Vx,const Real& Vy,const Real& Vz) { - this->rho = rho; - T[0] = Tx; - T[1] = Ty; - T[2] = Tz; - V[0] = Vx; - V[1] = Vy; - V[2] = Vz; - } - - ElectricSail::ElectricSail(): TriAxisSearch() { } - - ElectricSail::~ElectricSail() { } - - void ElectricSail::addParameters() { - typedef Readparameters RP; - - RP::add("ElectricSail.solver","Name of the Poisson solver",string("SOR")); - RP::add("ElectricSail.radius","Radius where charge density is non-zero",(Real)15e3); - RP::add("ElectricSail.max_iterations","Maximum number of iterations",(uint)1000); - RP::add("ElectricSail.min_relative_change","Potential is iterated until it the relative change is less than this value",(Real)1e-5); - RP::add("ElectricSail.clear_potential","Clear potential each timestep before solving Poisson?",true); - RP::add("ElectricSail.is_2D","If true then system is two-dimensional in xy-plane",true); - RP::add("ElectricSail.tether_x","Electric sail tether x-position",(Real)0.0); - RP::add("ElectricSail.tether_y","Electric sail tether y-position",(Real)0.0); - RP::add("ElectricSail.max_absolute_error","Maximum absolute error allowed in Poisson solution",(Real)1e-4); - RP::add("ElectricSail.add_particle_cloud","If true, add charge neutralizing particle cloud around tethet (bool)",false); - RP::add("ElectricSail.tetherCharge","Tether charge per meter in elementary charges",(Real)200e9); - RP::add("ElectricSail.timeDependentCharge","If true, tether charge is time dependent (bool)",false); - RP::add("ElectricSail.tetherChargeRiseTime","Time when tether charge reaches its maximum value",0.0); - RP::add("ElectricSail.useBackgroundField","Use background field instead of external charge density?",false); - - projects::ReadGaussianPopulation rgp; - rgp.addParameters("ElectricSail"); - } - - Real ElectricSail::getCorrectNumberDensity(spatial_cell::SpatialCell* cell,const uint popID) const { - if (addParticleCloud == false) return populations[popID].rho; - if (getObjectWrapper().particleSpecies[popID].name != "Electron") return populations[popID].rho; - - if (tetherUnitCharge < 0) { - cerr << "negative tether not implemented in " << __FILE__ << ":" << __LINE__ << endl; - exit(1); - } - - const Real* parameters = cell->get_cell_parameters(); - - Real pos[3]; - pos[0] = parameters[CellParams::XCRD] + 0.5*parameters[CellParams::DX]; - pos[1] = parameters[CellParams::YCRD] + 0.5*parameters[CellParams::DY]; - pos[2] = parameters[CellParams::ZCRD] + 0.5*parameters[CellParams::DZ]; - Real radius2 = pos[0]*pos[0] + pos[1]*pos[1] + pos[2]*pos[2]; - - if (radius2 > particleCloudRadius*particleCloudRadius) return populations[popID].rho; - - const Real charge = getObjectWrapper().particleSpecies[popID].charge; - const Real DZ = parameters[CellParams::DZ]; - Real cloudDens = -tetherUnitCharge / (M_PI*particleCloudRadius*particleCloudRadius*charge); - return populations[popID].rho + cloudDens; - } - - Real ElectricSail::getDistribValue(creal& vx,creal& vy,creal& vz,creal& dvx,creal& dvy,creal& dvz,const uint popID) const { - creal mass = getObjectWrapper().particleSpecies[popID].mass; - creal kb = physicalconstants::K_B; - const Population& pop = populations[popID]; - - Real value - = pop.rho - * mass / (2.0 * M_PI * kb ) - * exp(-mass * ( pow(vx-pop.V[0],2.0) / (2.0*kb*pop.T[0]) - + pow(vy-pop.V[1],2.0) / (2.0*kb*pop.T[1]) - ) - ); - - return value; - } - - void ElectricSail::getParameters() { - bool success = true; - - if(getObjectWrapper().particleSpecies.size() > 1) { - std::cerr << "The selected project does not support multiple particle populations! Aborting in " << __FILE__ << " line " << __LINE__ << std::endl; - abort(); - } - - Project::getParameters(); - typedef Readparameters RP; - RP::get("ElectricSail.solver",poisson::Poisson::solverName); - RP::get("ElectricSail.radius",radius); - RP::get("ElectricSail.max_iterations",poisson::Poisson::maxIterations); - RP::get("ElectricSail.min_relative_change",poisson::Poisson::minRelativePotentialChange); - RP::get("ElectricSail.is_2D",poisson::Poisson::is2D); - RP::get("ElectricSail.clear_potential",poisson::Poisson::clearPotential); - RP::get("ElectricSail.tether_x",tether_x); - RP::get("ElectricSail.tether_y",tether_y); - RP::get("ElectricSail.max_absolute_error",poisson::Poisson::maxAbsoluteError); - RP::get("ElectricSail.add_particle_cloud",addParticleCloud); - RP::get("ElectricSail.tetherCharge",tetherUnitCharge); - RP::get("ElectricSail.timeDependentCharge",timeDependentCharge); - RP::get("ElectricSail.tetherChargeRiseTime",tetherChargeRiseTime); - RP::get("ElectricSail.useBackgroundField",useBackgroundField); - - projects::ReadGaussianPopulation rgp; - projects::GaussianPopulation gaussPops; - if (rgp.getParameters("ElectricSail",gaussPops) == false) success = false; - - if (success == false) { - stringstream ss; - ss << "(ElectricSail) ERROR: Could not find parameters for all species."; - ss << "\t CHECK YOUR CONFIG FILE!" << endl; - cerr << ss.str(); - exit(1); - } - - for (size_t i=0; i, 2>& perBGrid, - FsGrid< std::array, 2>& BgBGrid, - FsGrid< fsgrids::technical, 2>& technicalGrid - ) { -#warning this is not supported at the moment, needs to be ported to fsgrid - std::cerr << "ERROR: ElectricSail::setProjectBField is not ported to fsgrid! Aborting." << std::endl; - abort(); -// Real X = cell->parameters[CellParams::XCRD]; -// Real Y = cell->parameters[CellParams::YCRD]; -// Real Z = cell->parameters[CellParams::ZCRD]; -// Real DX = cell->parameters[CellParams::DX]; -// Real DY = cell->parameters[CellParams::DY]; -// Real DZ = cell->parameters[CellParams::DZ]; -// -// cell->parameters[CellParams::RHOQ_EXT] = 0; -// Real pos[3]; -// pos[0] = cell->parameters[CellParams::XCRD] + 0.5*cell->parameters[CellParams::DX]; -// pos[1] = cell->parameters[CellParams::YCRD] + 0.5*cell->parameters[CellParams::DY]; -// pos[2] = cell->parameters[CellParams::ZCRD] + 0.5*cell->parameters[CellParams::DZ]; -// -// Real factor = 1.0; -// if (timeDependentCharge == true) { -// factor = max((Real)0.0,(Real)1.0+(Parameters::t-tetherChargeRiseTime)/tetherChargeRiseTime); -// factor = min((Real)1.0,factor); -// } -// -// if (useBackgroundField == false) { -// Real rad = sqrt(pos[0]*pos[0]+pos[1]*pos[1]+pos[2]*pos[2]); -// Real D3 = cell->parameters[CellParams::DX]*cell->parameters[CellParams::DY]; -// if (rad <= 5) cell->parameters[CellParams::RHOQ_EXT] = 0.25*factor*tetherUnitCharge/D3/physicalconstants::EPS_0; -// -// cell->parameters[CellParams::BGEXVOL] = 0; -// cell->parameters[CellParams::BGEYVOL] = 0; -// cell->parameters[CellParams::BGEZVOL] = 0; -// return; -// } -// -// cell->parameters[CellParams::RHOQ_EXT] = 0; -// -// const Real EPSILON = 1e-30; -// uint N = 1; -// int N3_sum = 0; -// Real E_vol[3] = {0,0,0}; -// -// bool ok = false; -// do { -// Real E_current[3] = {0,0,0}; -// -// const Real DX_N = DX / N; -// const Real DY_N = DY / N; -// const Real DZ_N = DZ / N; -// -// // Sample E using N points -// Real E_dummy[3] = {0,0,0}; -// for (uint k=0; k= poisson::Poisson::maxIterations) ok = true; -// -// // Add new E values to accumulated sums -// for (int i=0; i<3; ++i) E_vol[i] += E_current[i]; -// N3_sum += N*N*N; -// ++N; -// } while (ok == false); -// -// // Store the computed volume-average -// cell->parameters[CellParams::BGEXVOL] = E_vol[0] / N3_sum; -// cell->parameters[CellParams::BGEYVOL] = E_vol[1] / N3_sum; -// cell->parameters[CellParams::BGEZVOL] = E_vol[2] / N3_sum; - - } - - void ElectricSail::calcCellParameters(spatial_cell::SpatialCell* cell,creal& t) { - Real* cellParams = cell->get_cell_parameters(); - Real dx = cellParams[CellParams::DX]; - Real dy = cellParams[CellParams::DY]; - Real dz = cellParams[CellParams::DZ]; - Real x = cellParams[CellParams::XCRD] + 0.5*dx; - Real y = cellParams[CellParams::YCRD] + 0.5*dy; - Real z = cellParams[CellParams::ZCRD] + 0.5*dz; - Real R = sqrt(x*x + y*y + z*z); - - cellParams[CellParams::RHOQ_TOT] = 0; - - if (R > radius) return; - - const Real volume = dx*dy*dz; - cellParams[CellParams::RHOQ_TOT] = physicalconstants::CHARGE - / volume / physicalconstants::EPS_0; - - if (Parameters::isRestart == true) return; - cellParams[CellParams::PHI] = 0; - } - - Real ElectricSail::calcPhaseSpaceDensity( - creal& x, creal& y, creal& z, - creal& dx, creal& dy, creal& dz, - creal& vx, creal& vy, creal& vz, - creal& dvx, creal& dvy, creal& dvz,const uint popID) const { - - // Iterative sampling of the distribution function. Keep track of the - // accumulated volume average over the iterations. When the next - // iteration improves the average by less than 1%, return the value. - Real avgTotal = 0.0; - bool ok = false; - uint N = 2; // Start by using a single velocity sample - int N3_sum = 0; // Sum of sampling points used so far - do { - Real avg = 0.0; // Volume average obtained during this sampling - creal DVX = dvx / N; - creal DVY = dvy / N; - //creal DVZ = dvz / N; - creal DVZ = dvz / 1; - - // Sample the distribution using N*N*N points - for (uint vi=0; vi::min(),avg * static_cast(1e-6)); - Real avgAccum = avgTotal / (avg + N3_sum); - //Real avgCurrent = avg / (N*N*N); - Real avgCurrent = avg / (N*N); - if (fabs(avgCurrent-avgAccum)/(avgAccum+eps) < 0.01) ok = true; - else if (avg < getObjectWrapper().particleSpecies[popID].sparseMinValue*0.01) ok = true; - else if (N > 10) { - ok = true; - } - - avgTotal += avg; - //N3_sum += N*N*N; - N3_sum += N*N; - ++N; - } while (ok == false); - - return avgTotal / N3_sum; - } - - std::vector> ElectricSail::getV0(creal x,creal y,creal z, const uint popID) const { - vector> centerPoints; - for(uint i=0; i point {{populations[i].V[0],populations[i].V[1],populations[i].V[2]}}; - centerPoints.push_back(point); - } - return centerPoints; - } - - void ElectricSail::tetherElectricField(Real* x,Real* E) const { - const Real minRadius2 = 5.0*5.0; - Real constant = tetherUnitCharge / (2*M_PI*physicalconstants::EPS_0); - if (timeDependentCharge == true) { - Real factor = max((Real)0.0,(Real)1.0+(Parameters::t-tetherChargeRiseTime)/tetherChargeRiseTime); - factor = min((Real)1.0,factor); - constant *= factor; - } - - E[0] = constant * (x[0] - tether_x); - E[1] = constant * (x[1] - tether_y); - Real radius2 = (x[0]-tether_x)*(x[0]-tether_x) + (x[1]-tether_y)*(x[1]-tether_y); - radius2 = max(minRadius2,radius2); - E[0] /= radius2; - E[1] /= radius2; - } - -} // namespace projects diff --git a/projects/ElectricSail/electric_sail.h b/projects/ElectricSail/electric_sail.h deleted file mode 100644 index 5a2866150..000000000 --- a/projects/ElectricSail/electric_sail.h +++ /dev/null @@ -1,100 +0,0 @@ -/* - * This file is part of Vlasiator. - * Copyright 2010-2016 Finnish Meteorological Institute - * - * For details of usage, see the COPYING file and read the "Rules of the Road" - * at http://www.physics.helsinki.fi/vlasiator/ - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - * - * File: electric_sail.h - * Author: sandroos - * - * Created on March 3, 2015 - */ - -#ifndef ELECTRIC_SAIL_H -#define ELECTRIC_SAIL_H - -#include -#include "../project.h" -#include "../projectTriAxisSearch.h" - -namespace projects { - - struct Population { - Real rho; - Real T[3]; - Real V[3]; - - Population(const Real& rho,const Real& Tx,const Real& Ty,const Real& Tz, - const Real& Vx,const Real& Vy,const Real& Vz); - }; - - class ElectricSail: public TriAxisSearch { - public: - ElectricSail(); - virtual ~ElectricSail(); - - static void addParameters(); - Real getCorrectNumberDensity(spatial_cell::SpatialCell* cell,const uint popID) const; - virtual void getParameters(); - virtual bool initialize(); - virtual void setProjectBField( - FsGrid< std::array, 2>& perBGrid, - FsGrid< std::array, 2>& BgBGrid, - FsGrid< fsgrids::technical, 2>& technicalGrid - ); - - protected: - uint popID; - std::vector populations; - - bool addParticleCloud; /**< If true, a charge-neutralising particle cloud is added around the tether.*/ - Real particleCloudRadius; /**< Radius of the particle cloud.*/ - - Real tether_x; /**< Electric sail tether x-position.*/ - Real tether_y; /**< Electric sail tether y-position.*/ - Real tetherChargeRiseTime; /**< Time when tether charge reaches its maximum value. - * Only has an effect if ElectricSail::timeDependentCharge is true.*/ - Real tetherUnitCharge; /**< Unit charge per meter of the tether in Coulombs.*/ - bool timeDependentCharge; /**< If true, tether charge is time-dependent.*/ - bool useBackgroundField; /**< If true, then tether electric field is calculated as a constant - * background field. If false, a charge placed near the tether position - * is used instead.*/ - - void tetherElectricField(Real* x,Real* E) const; - - virtual void calcCellParameters(spatial_cell::SpatialCell* cell,creal& t); - - virtual Real calcPhaseSpaceDensity( - creal& x, creal& y, creal& z, - creal& dx, creal& dy, creal& dz, - creal& vx, creal& vy, creal& vz, - creal& dvx, creal& dvy, creal& dvz,const uint popID) const; - - Real getDistribValue(creal& vx,creal& vy, creal& vz, - creal& dvx, creal& dvy, creal& dvz,const uint popID) const; - - std::vector> getV0(creal x,creal y,creal z, const uint popID) const; - - virtual bool rescalesDensity(const uint popID) const; - - }; // class PoissonTest - -} // namespace projects - -#endif // POISSON_TEST_H - diff --git a/projects/ElectricSail/esail_expressions.xml b/projects/ElectricSail/esail_expressions.xml deleted file mode 100644 index 7d28efc2d..000000000 --- a/projects/ElectricSail/esail_expressions.xml +++ /dev/null @@ -1,183 +0,0 @@ - - - - Electron/n_diff - \-7e6 - false - ScalarMeshVar - false - false - __none__ - - __none__ - false - - - Proton/n_diff - \-7e6 - false - ScalarMeshVar - false - false - __none__ - - __none__ - false - - - E_vol_x - E_vol[0] - false - ScalarMeshVar - false - false - __none__ - - __none__ - false - - - E_vol_y - E_vol[1] - false - ScalarMeshVar - false - false - __none__ - - __none__ - false - - - E_vol_z - E_vol[2] - false - ScalarMeshVar - false - false - __none__ - - __none__ - false - - - Electron/Vx - "\[0] / (1e-10 + \)" - false - ScalarMeshVar - false - false - __none__ - - __none__ - false - - - Electron/Vy - "\[1] / (1e-10 + \)" - false - ScalarMeshVar - false - false - __none__ - - __none__ - false - - - Proton/Vx - "\[0] / (\+1e-10)" - false - ScalarMeshVar - false - false - __none__ - - __none__ - false - - - poisson/charges - \*8.854e-12/1.602e-19 - false - ScalarMeshVar - false - false - __none__ - - __none__ - false - - - Proton/V - "\ / (\+1e-20)" - false - VectorMeshVar - false - false - __none__ - - __none__ - false - - - Electron/V - "\ / (\+1e-20)" - false - VectorMeshVar - false - false - __none__ - - __none__ - false - - - Electron/V_mag - magnitude(\) - false - ScalarMeshVar - false - false - __none__ - - __none__ - false - - - Proton/V_mag - magnitude(\) - false - ScalarMeshVar - false - false - __none__ - - __none__ - false - - - poisson/E_scons - "E_vol - \" - false - VectorMeshVar - false - false - __none__ - - __none__ - false - - - poisson/E_scons_magnitude - magnitude(\) - false - ScalarMeshVar - false - false - __none__ - - __none__ - false - - diff --git a/projects/Poisson/PoissonTest.cfg b/projects/Poisson/PoissonTest.cfg deleted file mode 100644 index db9acb914..000000000 --- a/projects/Poisson/PoissonTest.cfg +++ /dev/null @@ -1,88 +0,0 @@ -propagate_field = 0 -propagate_potential = 1 -propagate_vlasov_acceleration = 0 -propagate_vlasov_translation = 0 -dynamic_timestep = 0 -project = PoissonTest - -[loadBalance] -rebalanceInterval = 100 - -[io] -diagnostic_write_interval = 1 -write_initial_state = 1 -restart_walltime_interval = 100000 -number_of_restarts = 1 - -system_write_t_interval = 10.0 -system_write_file_name = fullf -system_write_distribution_stride = 1 -system_write_distribution_xline_stride = 0 -system_write_distribution_yline_stride = 0 -system_write_distribution_zline_stride = 0 - -[gridbuilder] -x_length = 33 -y_length = 33 -z_length = 33 -x_min = -1.65e6 -x_max = +1.65e6 -y_min = -1.65e6 -y_max = +1.65e6 -z_min = -1.65e6 -z_max = +1.65e6 -vx_min = -5.0e5 -vx_max = +5.0e5 -vy_min = -5.0e5 -vy_max = +5.0e5 -vz_min = -5.0e5 -vz_max = +5.0e5 -vx_length = 1 -vy_length = 1 -vz_length = 1 -t_max = 100.0 -dt = 1.0 - -[Poisson] -#solver = Jacobi -solver = SOR - -[vlasovsolver] -#minCFL = 0.4 -#maxCFL = 0.6 -#vlasovSemiLagAcceleration = 0 - -[boundaries] -periodic_x = no -periodic_y = no -periodic_z = no -boundary = Outflow - -[variables] -output = Rho -output = B -output = BackgroundB -output = PerturbedB -output = Pressure -output = RhoV -output = E -output = PTensor -output = MPIrank -output = Blocks -output = Potential -output = PotentialError -output = ChargeDensity - -[sparse] -minValue = 1.0e-22 - -[outflow] -face = x- -face = x+ -face = y- -face = y+ -face = z- -face = z+ -precedence = 1 - - diff --git a/projects/Poisson/poisson_test.cpp b/projects/Poisson/poisson_test.cpp deleted file mode 100644 index 757c13da6..000000000 --- a/projects/Poisson/poisson_test.cpp +++ /dev/null @@ -1,117 +0,0 @@ -/* - * This file is part of Vlasiator. - * Copyright 2010-2016 Finnish Meteorological Institute - * - * For details of usage, see the COPYING file and read the "Rules of the Road" - * at http://www.physics.helsinki.fi/vlasiator/ - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - * - * File: poisson_test.cpp - * Author: sandroos - * - * Created on January 14, 2015, 3:17 PM - */ - -#include -#include - -#include "../../readparameters.h" -#include "../../object_wrapper.h" - -#include "../../poisson_solver/poisson_solver.h" -#include "poisson_test.h" - -using namespace std; -using namespace spatial_cell; - -namespace projects { - - static Real radius = 0; - - PoissonTest::PoissonTest(): Project() { } - - PoissonTest::~PoissonTest() { } - - void PoissonTest::addParameters() { - typedef Readparameters RP; - RP::add("Poisson.solver","Name of the Poisson solver",string("SOR")); - RP::add("Poisson.radius","Radius where charge density is non-zero",(Real)15e3); - RP::add("Poisson.max_iterations","Maximum number of iterations",(uint)1000); - RP::add("Poisson.min_relative_change","Potential is iterated until it the relative change is less than this value",(Real)1e-5); - RP::add("Poisson.is_2D","If true then system is two-dimensional in xy-plane",true); - } - - void PoissonTest::getParameters() { - typedef Readparameters RP; - - if(getObjectWrapper().particleSpecies.size() > 1) { - std::cerr << "The selected project does not support multiple particle populations! Aborting in " << __FILE__ << " line " << __LINE__ << std::endl; - abort(); - } - RP::get("Poisson.solver",poisson::Poisson::solverName); - RP::get("Poisson.radius",radius); - RP::get("Poisson.max_iterations",poisson::Poisson::maxIterations); - RP::get("Poisson.min_relative_change",poisson::Poisson::minRelativePotentialChange); - RP::get("Poisson.is_2D",poisson::Poisson::is2D); - } - - bool PoissonTest::initialize() { - return true; - } - - void PoissonTest::setProjectBField( - FsGrid< std::array, 2> & perBGrid, - FsGrid< std::array, 2>& BgBGrid, - FsGrid< fsgrids::technical, 2>& technicalGrid - ) { } - - void PoissonTest::calcCellParameters(spatial_cell::SpatialCell* cell,creal& t) { - Real* cellParams = cell->get_cell_parameters(); - Real dx = cellParams[CellParams::DX]; - Real dy = cellParams[CellParams::DY]; - Real dz = cellParams[CellParams::DZ]; - Real x = cellParams[CellParams::XCRD] + 0.5*dx; - Real y = cellParams[CellParams::YCRD] + 0.5*dy; - Real z = cellParams[CellParams::ZCRD] + 0.5*dz; - Real R = sqrt(x*x + y*y + z*z); - - cellParams[CellParams::RHOQ_TOT] = 0; - - if (R > radius) return; - - const Real volume = dx*dy*dz; - cellParams[CellParams::RHOQ_TOT] = physicalconstants::CHARGE - / volume / physicalconstants::EPS_0; - - if (Parameters::isRestart == true) return; - cellParams[CellParams::PHI] = 0; - } - - Real PoissonTest::calcPhaseSpaceDensity( - creal& x, creal& y, creal& z, - creal& dx, creal& dy, creal& dz, - creal& vx, creal& vy, creal& vz, - creal& dvx, creal& dvy, creal& dvz, - const uint popID) const { - return 0.0; - } - - std::vector PoissonTest::findBlocksToInitialize(SpatialCell* cell) { - vector blockList; - return blockList; - } - -} // namespace projects diff --git a/projects/Poisson/poisson_test.h b/projects/Poisson/poisson_test.h deleted file mode 100644 index 23080a5ef..000000000 --- a/projects/Poisson/poisson_test.h +++ /dev/null @@ -1,67 +0,0 @@ -/* - * This file is part of Vlasiator. - * Copyright 2010-2016 Finnish Meteorological Institute - * - * For details of usage, see the COPYING file and read the "Rules of the Road" - * at http://www.physics.helsinki.fi/vlasiator/ - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - * - * File: poisson_test.h - * Author: sandroos - * - * Created on January 14, 2015, 3:17 PM - */ - -#ifndef POISSON_TEST_H -#define POISSON_TEST_H - -#include -#include "../project.h" - -namespace projects { - - class PoissonTest: public Project { - public: - PoissonTest(); - virtual ~PoissonTest(); - - static void addParameters(); - virtual void getParameters(); - virtual bool initialize(); - virtual void setProjectBField( - FsGrid< std::array, 2> & perBGrid, - FsGrid< std::array, 2>& BgBGrid, - FsGrid< fsgrids::technical, 2>& technicalGrid - ); - - protected: - virtual void calcCellParameters(spatial_cell::SpatialCell* cell,creal& t); - - virtual Real calcPhaseSpaceDensity( - creal& x, creal& y, creal& z, - creal& dx, creal& dy, creal& dz, - creal& vx, creal& vy, creal& vz, - creal& dvx, creal& dvy, creal& dvz, - const uint popID) const; - - virtual std::vector findBlocksToInitialize(spatial_cell::SpatialCell* cell); - - }; // class PoissonTest - -} // namespace projects - -#endif // POISSON_TEST_H - diff --git a/projects/project.cpp b/projects/project.cpp index f13da0ecc..dc14e1b0e 100644 --- a/projects/project.cpp +++ b/projects/project.cpp @@ -33,7 +33,6 @@ #include "Diffusion/Diffusion.h" #include "Dispersion/Dispersion.h" #include "Distributions/Distributions.h" -#include "ElectricSail/electric_sail.h" #include "Firehose/Firehose.h" #include "Flowthrough/Flowthrough.h" #include "Fluctuations/Fluctuations.h" @@ -55,7 +54,6 @@ #include "../backgroundfield/backgroundfield.h" #include "../backgroundfield/constantfield.hpp" #include "Shocktest/Shocktest.h" -#include "Poisson/poisson_test.h" using namespace std; @@ -111,7 +109,6 @@ namespace projects { projects::Diffusion::addParameters(); projects::Dispersion::addParameters(); projects::Distributions::addParameters(); - projects::ElectricSail::addParameters(); projects::Firehose::addParameters(); projects::Flowthrough::addParameters(); projects::Fluctuations::addParameters(); @@ -131,7 +128,6 @@ namespace projects { projects::test_trans::addParameters(); projects::verificationLarmor::addParameters(); projects::Shocktest::addParameters(); - projects::PoissonTest::addParameters(); RP::add("Project_common.seed", "Seed for the RNG", 42); } @@ -546,9 +542,6 @@ Project* createProject() { if(Parameters::projectName == "Distributions") { rvalue = new projects::Distributions; } - if (Parameters::projectName == "ElectricSail") { - return new projects::ElectricSail; - } if(Parameters::projectName == "Firehose") { rvalue = new projects::Firehose; } @@ -606,9 +599,6 @@ Project* createProject() { if(Parameters::projectName == "Shocktest") { rvalue = new projects::Shocktest; } - if (Parameters::projectName == "PoissonTest") { - rvalue = new projects::PoissonTest; - } if (rvalue == NULL) { cerr << "Unknown project name!" << endl; abort(); diff --git a/spatial_cell.cpp b/spatial_cell.cpp index 12d59952a..45063e0ca 100644 --- a/spatial_cell.cpp +++ b/spatial_cell.cpp @@ -775,16 +775,6 @@ namespace spatial_cell { block_lengths.push_back(sizeof(Real) * 3); } - if ((SpatialCell::mpi_transfer_type & Transfer::CELL_RHOQ_TOT) != 0) { - displacements.push_back((uint8_t*) &(this->parameters[CellParams::RHOQ_TOT]) - (uint8_t*) this); - block_lengths.push_back(sizeof(Real)); - } - - if ((SpatialCell::mpi_transfer_type & Transfer::CELL_PHI) != 0) { - displacements.push_back((uint8_t*) &(this->parameters[CellParams::PHI]) - (uint8_t*) this); - block_lengths.push_back(sizeof(Real)*2); - } - // send sysBoundaryFlag if ((SpatialCell::mpi_transfer_type & Transfer::CELL_SYSBOUNDARYFLAG)!=0){ displacements.push_back((uint8_t*) &(this->sysBoundaryFlag) - (uint8_t*) this); diff --git a/spatial_cell.hpp b/spatial_cell.hpp index 6bc7e00f0..fcb327316 100644 --- a/spatial_cell.hpp +++ b/spatial_cell.hpp @@ -115,11 +115,9 @@ namespace spatial_cell { const uint64_t CELL_HALL_TERM = (1ull<<24); const uint64_t CELL_P = (1ull<<25); const uint64_t CELL_PDT2 = (1ull<<26); - const uint64_t CELL_RHOQ_TOT = (1ull<<27); - const uint64_t CELL_PHI = (1ull<<28); - const uint64_t POP_METADATA = (1ull<<29); - const uint64_t RANDOMGEN = (1ull<<30); - const uint64_t CELL_GRADPE_TERM = (1ull<<31); + const uint64_t POP_METADATA = (1ull<<27); + const uint64_t RANDOMGEN = (1ull<<28); + const uint64_t CELL_GRADPE_TERM = (1ull<<29); //all data const uint64_t ALL_DATA = CELL_PARAMETERS diff --git a/sysboundary/antisymmetric.cpp b/sysboundary/antisymmetric.cpp deleted file mode 100644 index 60a3a0cf4..000000000 --- a/sysboundary/antisymmetric.cpp +++ /dev/null @@ -1,411 +0,0 @@ -/* - * This file is part of Vlasiator. - * Copyright 2010-2016 Finnish Meteorological Institute - * - * For details of usage, see the COPYING file and read the "Rules of the Road" - * at http://www.physics.helsinki.fi/vlasiator/ - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - */ - -/*!\file antisymmetric.cpp - * \brief Implementation of the class SysBoundaryCondition::Antisymmetric to - * handle cells classified as sysboundarytype::ANTISYMMETRIC. - */ - -#include -#include - -#include "antisymmetric.h" -#include "../projects/projects_common.h" -#include "../fieldsolver/fs_common.h" - -#ifndef NDEBUG - #define DEBUG_ANTISYMMETRIC -#endif -#ifdef DEBUG_SYSBOUNDARY - #define DEBUG_ANTISYMMETRIC -#endif - -using namespace std; - -#warning Antisymmetric boundaries do not yet support multipop - -namespace SBC { - Antisymmetric::Antisymmetric(): SysBoundaryCondition() { } - Antisymmetric::~Antisymmetric() { } - - void Antisymmetric::addParameters() { - Readparameters::addComposing("antisymmetric.face", "List of faces on which outflow boundary conditions are to be applied ([xyz][+-])."); - Readparameters::add("antisymmetric.precedence", "Precedence value of the outflow system boundary condition (integer), the higher the stronger.", 4); - } - - void Antisymmetric::getParameters() { - int myRank; - MPI_Comm_rank(MPI_COMM_WORLD,&myRank); - if(!Readparameters::get("antisymmetric.face", faceList)) { - if(myRank == MASTER_RANK) cerr << __FILE__ << ":" << __LINE__ << " ERROR: This option has not been added!" << endl; - exit(1); - } - if(!Readparameters::get("antisymmetric.precedence", precedence)) { - if(myRank == MASTER_RANK) cerr << __FILE__ << ":" << __LINE__ << " ERROR: This option has not been added!" << endl; - exit(1); - } - } - - bool Antisymmetric::initSysBoundary( - creal& t, - Project &project - ) { - /* The array of bool describes which of the x+, x-, y+, y-, z+, z- faces are to have outflow system boundary conditions. - * A true indicates the corresponding face will have outflow. - * The 6 elements correspond to x+, x-, y+, y-, z+, z- respectively. - */ - for(uint i=0; i<6; i++) facesToProcess[i] = false; - - this->getParameters(); - - isThisDynamic = false; - - vector::const_iterator it; - for (it = faceList.begin(); - it != faceList.end(); - it++) { - if(*it == "x+") facesToProcess[0] = true; - if(*it == "x-") facesToProcess[1] = true; - if(*it == "y+") facesToProcess[2] = true; - if(*it == "y-") facesToProcess[3] = true; - if(*it == "z+") facesToProcess[4] = true; - if(*it == "z-") facesToProcess[5] = true; - } - return true; - } - - bool Antisymmetric::assignSysBoundary(dccrg::Dccrg& mpiGrid, - FsGrid< fsgrids::technical, 2> & technicalGrid) { - bool doAssign; - std::array isThisCellOnAFace; - - const vector& cells = getLocalCells(); - for (size_t c=0; csysBoundaryFlag == sysboundarytype::DO_NOT_COMPUTE) continue; - creal* const cellParams = mpiGrid[cells[c]]->parameters.data(); - creal dx = cellParams[CellParams::DX]; - creal dy = cellParams[CellParams::DY]; - creal dz = cellParams[CellParams::DZ]; - creal x = cellParams[CellParams::XCRD] + 0.5*dx; - creal y = cellParams[CellParams::YCRD] + 0.5*dy; - creal z = cellParams[CellParams::ZCRD] + 0.5*dz; - - isThisCellOnAFace.fill(false); - determineFace(isThisCellOnAFace.data(), x, y, z, dx, dy, dz); - - // Comparison of the array defining which faces to use and the - // array telling on which faces this cell is - doAssign = false; - for (int j=0; j<6; j++) doAssign = doAssign || (facesToProcess[j] && isThisCellOnAFace[j]); - if (doAssign) { - uint flag = getIndex(); - //if (x < Parameters::xmin + 2*Parameters::dx_ini) flag = sysboundarytype::DO_NOT_COMPUTE; - //if (x >= Parameters::xmax - 2*Parameters::dx_ini) flag = sysboundarytype::DO_NOT_COMPUTE; - if (y < Parameters::ymin+Parameters::dy_ini) flag = sysboundarytype::DO_NOT_COMPUTE; - - mpiGrid[cells[c]]->sysBoundaryFlag = flag; - } - } - - // Assign boundary flags to local fsgrid cells - const std::array gridDims(technicalGrid.getLocalSize()); - for (int k=0; ksysBoundaryFlag = sysboundarytype::DO_NOT_COMPUTE; - } else { - technicalGrid.get(i,j,k)->sysBoundaryFlag = this->getIndex(); - } - } - } - } - } - - return true; - } - - bool Antisymmetric::applyInitialState( - const dccrg::Dccrg& mpiGrid, - Project &project - ) { - vector cells = mpiGrid.get_cells(); - #pragma omp parallel for - for (uint i=0; isysBoundaryFlag != this->getIndex()) continue; - - // Defined in project.cpp, used here as the outflow cell has the same state as the initial state of non-system boundary cells. - project.setCell(cell); - // WARNING Time-independence assumed here. - cell->parameters[CellParams::RHOM_DT2] = cell->parameters[CellParams::RHOM]; - cell->parameters[CellParams::VX_DT2] = cell->parameters[CellParams::VX]; - cell->parameters[CellParams::VY_DT2] = cell->parameters[CellParams::VY]; - cell->parameters[CellParams::VZ_DT2] = cell->parameters[CellParams::VZ]; - cell->parameters[CellParams::RHOQ_DT2] = cell->parameters[CellParams::RHOQ]; - } - - return true; - } - - Real Antisymmetric::fieldSolverBoundaryCondMagneticField( - FsGrid< std::array, 2> & perBGrid, - FsGrid< std::array, 2> & perBDt2Grid, - FsGrid< std::array, 2> & EGrid, - FsGrid< std::array, 2> & EDt2Grid, - FsGrid< fsgrids::technical, 2> & technicalGrid, - cint i, - cint j, - cint k, - creal& dt, - cuint& RKCase, - cuint& component - ) { - if (RKCase == RK_ORDER1 || RKCase == RK_ORDER2_STEP2) { - return fieldBoundaryCopyFromExistingFaceNbrMagneticField(perBGrid, technicalGrid, i, j, k, component); - } else { // Return PERB[XYZ]_DT2 - return fieldBoundaryCopyFromExistingFaceNbrMagneticField(perBDt2Grid, technicalGrid, i, j, k, component); - } - } - - void Antisymmetric::fieldSolverBoundaryCondElectricField( - FsGrid< std::array, 2> & EGrid, - cint i, - cint j, - cint k, - cuint component - ) { - EGrid.get(i,j,k)->at(fsgrids::efield::EX+component) = 0.0; - } - - void Antisymmetric::fieldSolverBoundaryCondHallElectricField( - FsGrid< std::array, 2> & EHallGrid, - cint i, - cint j, - cint k, - cuint component - ) { - std::array * cp = EHallGrid.get(i,j,k); - switch (component) { - case 0: - cp->at(fsgrids::ehall::EXHALL_000_100) = 0.0; - cp->at(fsgrids::ehall::EXHALL_010_110) = 0.0; - cp->at(fsgrids::ehall::EXHALL_001_101) = 0.0; - cp->at(fsgrids::ehall::EXHALL_011_111) = 0.0; - break; - case 1: - cp->at(fsgrids::ehall::EYHALL_000_010) = 0.0; - cp->at(fsgrids::ehall::EYHALL_100_110) = 0.0; - cp->at(fsgrids::ehall::EYHALL_001_011) = 0.0; - cp->at(fsgrids::ehall::EYHALL_101_111) = 0.0; - break; - case 2: - cp->at(fsgrids::ehall::EZHALL_000_001) = 0.0; - cp->at(fsgrids::ehall::EZHALL_100_101) = 0.0; - cp->at(fsgrids::ehall::EZHALL_010_011) = 0.0; - cp->at(fsgrids::ehall::EZHALL_110_111) = 0.0; - break; - default: - cerr << __FILE__ << ":" << __LINE__ << ":" << " Invalid component" << endl; - } - } - - void Antisymmetric::fieldSolverBoundaryCondGradPeElectricField( - FsGrid< std::array, 2> & EGradPeGrid, - cint i, - cint j, - cint k, - cuint component - ) { - EGradPeGrid.get(i,j,k)->at(fsgrids::egradpe::EXGRADPE+component) = 0.0; - } - - void Antisymmetric::fieldSolverBoundaryCondDerivatives( - FsGrid< std::array, 2> & dPerBGrid, - FsGrid< std::array, 2> & dMomentsGrid, - cint i, - cint j, - cint k, - cuint& RKCase, - cuint& component - ) { - this->setCellDerivativesToZero(dPerBGrid, dMomentsGrid, i, j, k, component); - } - - void Antisymmetric::fieldSolverBoundaryCondBVOLDerivatives( - FsGrid< std::array, 2> & volGrid, - cint i, - cint j, - cint k, - cuint& component - ) { - this->setCellBVOLDerivativesToZero(volGrid, i, j, k, component); - } - - /** - * NOTE that this is called once for each particle species! - * @param mpiGrid - * @param cellID - */ - void Antisymmetric::vlasovBoundaryCondition( - const dccrg::Dccrg& mpiGrid, - const CellID& cellID, - const uint popID - ) { - //cerr << "AS vlasovBoundaryCondition cell " << cellID << " called " << endl; - -// phiprof::start("vlasovBoundaryCondition (Antisymmetric)"); -// vlasovBoundaryCopyFromTheClosestNbr(mpiGrid,cellID,popID); -// phiprof::stop("vlasovBoundaryCondition (Antisymmetric)"); - - dccrg::Types<3>::indices_t indices = mpiGrid.mapping.get_indices(cellID); - if (indices[1] == 1 || indices[1] == Parameters::ycells_ini-2) { - if (indices[1] == 1) ++indices[1]; - else --indices[1]; - } else { - cerr << "ERROR in antisymmetric boundaries in " << __FILE__ << ":" << __LINE__ << endl; - return; - } - - // Clear data in this cell - vmesh::VelocityMesh& vmesh = mpiGrid[cellID]->get_velocity_mesh(popID); - vmesh::VelocityBlockContainer& blockContainer = mpiGrid[cellID]->get_velocity_blocks(popID); - vmesh.clear(); - blockContainer.clear(); - - // Get data from neighbor cell - const CellID nbrID = mpiGrid.mapping.get_cell_from_indices(indices,0); - const vmesh::VelocityMesh& nbrVmesh = mpiGrid[nbrID]->get_velocity_mesh(popID); - const vmesh::VelocityBlockContainer& nbrBlockContainer = mpiGrid[nbrID]->get_velocity_blocks(popID); - - //cerr << "BOUNDARY CELL " << cellID << " HAS " << vmesh.size() << " BLOCKS BEFORE COPY NBR " << nbrID << " HAS "; - //cerr << nbrVmesh.size() << endl; - - // vx,vz components are untouched, vy is both mirrored (vy -> -vy) - // and copied (vy -> vy). - for (vmesh::LocalID srcLID=0; srcLID 0) continue; - - for (int dir=-1; dir<0; dir+=2) { -// for (int dir=-1; dir<2; dir+=2) { - V_trgt[0] = V[0] + (i+0.5)*dV[0]; - V_trgt[1] = (V[1] + (j+0.5)*dV[1]) * dir; - V_trgt[2] = V[2] + (k+0.5)*dV[2]; - - const vmesh::GlobalID trgtGID = vmesh.getGlobalID(0,V_trgt); - - Real V_trgt_block[3]; - Real dV_trgt_block[3]; - vmesh.getBlockCoordinates(trgtGID,V_trgt_block); - vmesh.getBlockSize(trgtGID,dV_trgt_block); - for (int t=0; t<3; ++t) dV_trgt_block[t] /= WID; - - // Make sure target block exists - if (vmesh.getLocalID(trgtGID) == vmesh::INVALID_LOCALID) { - mpiGrid[cellID]->add_velocity_block(trgtGID,popID); - } - - // Get target block local ID - const vmesh::LocalID trgtLID = vmesh.getLocalID(trgtGID); - if (trgtLID == vmesh::INVALID_LOCALID) { - cerr << "ERROR, got invalid local ID in antisymmetric" << endl; - continue; - } - - // Calculate target cell indices - const int ii = static_cast((V_trgt[0] - V_trgt_block[0]) / dV_trgt_block[0]); - const int jj = static_cast((V_trgt[1] - V_trgt_block[1]) / dV_trgt_block[1]); - const int kk = static_cast((V_trgt[2] - V_trgt_block[2]) / dV_trgt_block[2]); - - /*bool ok = true; - if (ii < 0 || ii >= WID) ok = false; - if (jj < 0 || jj >= WID) ok = false; - if (kk < 0 || kk >= WID) ok = false; - if (ok == false) { - cerr << "ERROR " << ii << ' ' << jj << ' ' << kk << endl; - exit(1); - }*/ - - /*uint8_t ref=0; - vmesh::LocalID i_srcBlock[3]; - nbrVmesh.getIndices(srcGID,ref,i_srcBlock[0],i_srcBlock[1],i_srcBlock[2]); - - vmesh::LocalID i_trgtBlock[3]; - vmesh.getIndices(trgtGID,ref,i_trgtBlock[0],i_trgtBlock[1],i_trgtBlock[2]); - - cerr << "\t src indices: " << i_srcBlock[0] << ' ' << i_srcBlock[1] << ' ' << i_srcBlock[2] << " ("; - cerr << i << ' ' << j << ' ' << k << ") trgt "; - cerr << i_trgtBlock[0] << ' ' << i_trgtBlock[1] << ' ' << i_trgtBlock[2] << " ("; - cerr << ii << ' ' << jj << ' ' << kk << ")" << endl; - */ - Realf* data = blockContainer.getData(trgtLID); - data[cellIndex(ii,jj,kk)] += srcData[cellIndex(i,j,k)]; - } - } // for-loops over phase-space cells in source block - } // for-loop over velocity blocks in neighbor cell - - //cerr << "BOUNDARY CELL HAS " << vmesh.size() << " velocity blocks" << endl; - } - - void Antisymmetric::getFaces(bool* faces) { - for(uint i=0; i<6; i++) faces[i] = facesToProcess[i]; - } - - std::string Antisymmetric::getName() const {return "Antisymmetric";} - - uint Antisymmetric::getIndex() const {return sysboundarytype::ANTISYMMETRIC;} -} diff --git a/sysboundary/antisymmetric.h b/sysboundary/antisymmetric.h deleted file mode 100644 index 9f1218481..000000000 --- a/sysboundary/antisymmetric.h +++ /dev/null @@ -1,132 +0,0 @@ -/* - * This file is part of Vlasiator. - * Copyright 2010-2016 Finnish Meteorological Institute - * - * For details of usage, see the COPYING file and read the "Rules of the Road" - * at http://www.physics.helsinki.fi/vlasiator/ - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - */ - -#ifndef ANTISYMMETRIC_H -#define ANTISYMMETRIC_H - -#include -#include "../definitions.h" -#include "../readparameters.h" -#include "../spatial_cell.hpp" -#include "sysboundarycondition.h" - -namespace SBC { - /*!\brief Antisymmetric is a class applying antisymmetric boundary conditions. - * Normal field components are antisymmetric. - * Tangential field components are symmetric. - * - * Distribution function is mirrored in the same way. - * - * Antisymmetric is a class handling cells tagged as sysboundarytype::ANTISYMMETRIC by this system - * boundary condition. It applies antisymmetric boundary conditions. - */ - class Antisymmetric: public SysBoundaryCondition { - public: - Antisymmetric(); - virtual ~Antisymmetric(); - - static void addParameters(); - virtual void getParameters(); - - virtual bool initSysBoundary( - creal& t, - Project &project - ); - virtual bool assignSysBoundary(dccrg::Dccrg& mpiGrid, - FsGrid< fsgrids::technical, 2> & technicalGrid); - virtual bool applyInitialState( - const dccrg::Dccrg& mpiGrid, - Project &project - ); -// virtual bool applySysBoundaryCondition( -// const dccrg::Dccrg& mpiGrid, -// creal& t -// ); - virtual Real fieldSolverBoundaryCondMagneticField( - FsGrid< std::array, 2> & perBGrid, - FsGrid< std::array, 2> & perBDt2Grid, - FsGrid< std::array, 2> & EGrid, - FsGrid< std::array, 2> & EDt2Grid, - FsGrid< fsgrids::technical, 2> & technicalGrid, - cint i, - cint j, - cint k, - creal& dt, - cuint& RKCase, - cuint& component - ); - virtual void fieldSolverBoundaryCondElectricField( - FsGrid< std::array, 2> & EGrid, - cint i, - cint j, - cint k, - cuint component - ); - virtual void fieldSolverBoundaryCondHallElectricField( - FsGrid< std::array, 2> & EHallGrid, - cint i, - cint j, - cint k, - cuint component - ); - virtual void fieldSolverBoundaryCondGradPeElectricField( - FsGrid< std::array, 2> & EGradPeGrid, - cint i, - cint j, - cint k, - cuint component - ); - virtual void fieldSolverBoundaryCondDerivatives( - FsGrid< std::array, 2> & dPerBGrid, - FsGrid< std::array, 2> & dMomentsGrid, - cint i, - cint j, - cint k, - cuint& RKCase, - cuint& component - ); - virtual void fieldSolverBoundaryCondBVOLDerivatives( - FsGrid< std::array, 2> & volGrid, - cint i, - cint j, - cint k, - cuint& component - ); - virtual void vlasovBoundaryCondition( - const dccrg::Dccrg& mpiGrid, - const CellID& cellID, - const uint popID - ); - - virtual void getFaces(bool* faces); - virtual std::string getName() const; - virtual uint getIndex() const; - - protected: - /*! Array of bool telling which faces are going to be processed by the system boundary condition.*/ - bool facesToProcess[6]; - /*! List of faces on which outflow boundary conditions are to be applied ([xyz][+-]). */ - std::vector faceList; - }; // class Antisymmetric -} // namespace SBC - -#endif diff --git a/sysboundary/project_boundary.cpp b/sysboundary/project_boundary.cpp deleted file mode 100644 index ec5945b59..000000000 --- a/sysboundary/project_boundary.cpp +++ /dev/null @@ -1,331 +0,0 @@ -/* - * This file is part of Vlasiator. - * Copyright 2010-2016 Finnish Meteorological Institute - * - * For details of usage, see the COPYING file and read the "Rules of the Road" - * at http://www.physics.helsinki.fi/vlasiator/ - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - */ - -/*!\file project_boundary.cpp - * \brief Implementation of the class SysBoundaryCondition::ProjectBoundary. - */ - -#include -#include - -#include "project_boundary.h" -#include "../vlasovmover.h" -#include "../fieldsolver/fs_common.h" -#include "../object_wrapper.h" - -using namespace std; - -#warning Project boundaries do not yet support multipop, not checked whether any changes are needed. - -namespace SBC { - ProjectBoundary::ProjectBoundary(): SysBoundaryCondition() { - project = NULL; - } - ProjectBoundary::~ProjectBoundary() { - project = NULL; - } - - void ProjectBoundary::addParameters() { - Readparameters::addComposing("projectboundary.face", "List of faces on which outflow boundary conditions are to be applied ([xyz][+-])."); - Readparameters::add("projectboundary.precedence", "Precedence value of the outflow system boundary condition (integer), the higher the stronger.", 4); - return; - } - - void ProjectBoundary::getParameters() { - int myRank; - MPI_Comm_rank(MPI_COMM_WORLD,&myRank); - if (!Readparameters::get("projectboundary.face", faceList)) { - if(myRank == MASTER_RANK) cerr << __FILE__ << ":" << __LINE__ << " ERROR: This option has not been added!" << endl; - exit(1); - } - if (!Readparameters::get("projectboundary.precedence", precedence)) { - if(myRank == MASTER_RANK) cerr << __FILE__ << ":" << __LINE__ << " ERROR: This option has not been added!" << endl; - exit(1); - } - return; - } - - bool ProjectBoundary::initSysBoundary(creal& t,Project& project) { - this->project = &project; - - /* The array of bool describes which of the x+, x-, y+, y-, z+, z- faces are to have user-set system boundary conditions. - * A true indicates the corresponding face will have user-set system boundary conditions. - * The 6 elements correspond to x+, x-, y+, y-, z+, z- respectively. - */ - bool success = true; - for(uint i=0; i<6; i++) facesToProcess[i] = false; - - this->getParameters(); - - vector::const_iterator it; - for (it = faceList.begin(); - it != faceList.end(); - it++) { - if(*it == "x+") facesToProcess[0] = true; - if(*it == "x-") facesToProcess[1] = true; - if(*it == "y+") facesToProcess[2] = true; - if(*it == "y-") facesToProcess[3] = true; - if(*it == "z+") facesToProcess[4] = true; - if(*it == "z-") facesToProcess[5] = true; - } - - success = success & generateTemplateCell(); - return success; - } - - bool ProjectBoundary::assignSysBoundary(dccrg::Dccrg& mpiGrid, - FsGrid< fsgrids::technical, 2> & technicalGrid) { - - bool doAssign; - std::array isThisCellOnAFace; - - vector cells = mpiGrid.get_cells(); - for(uint i = 0; i < cells.size(); i++) { - if(mpiGrid[cells[i]]->sysBoundaryFlag == sysboundarytype::DO_NOT_COMPUTE) continue; - creal* const cellParams = &(mpiGrid[cells[i]]->parameters[0]); - creal dx = cellParams[CellParams::DX]; - creal dy = cellParams[CellParams::DY]; - creal dz = cellParams[CellParams::DZ]; - creal x = cellParams[CellParams::XCRD] + 0.5*dx; - creal y = cellParams[CellParams::YCRD] + 0.5*dy; - creal z = cellParams[CellParams::ZCRD] + 0.5*dz; - - isThisCellOnAFace.fill(false); - determineFace(isThisCellOnAFace.data(), x, y, z, dx, dy, dz); - // Comparison of the array defining which faces to use and the array telling on which faces this cell is - doAssign = false; - for(int j=0; j<6; j++) doAssign = doAssign || (facesToProcess[j] && isThisCellOnAFace[j]); - if(doAssign) { - mpiGrid[cells[i]]->sysBoundaryFlag = this->getIndex(); - } - } - - // Assign boundary flags to local fsgrid cells - const std::array gridDims(technicalGrid.getLocalSize()); - for (int k=0; ksysBoundaryFlag = this->getIndex(); - } - } - } - } - - return true; - } - - bool ProjectBoundary::applyInitialState( - const dccrg::Dccrg& mpiGrid, - Project &project - ) { - bool success = true; - const vector& cells = getLocalCells(); - - for (size_t c=0; csysBoundaryFlag != getIndex()) continue; - this->project->setCell(cell); - } - - return success; - } - - Real ProjectBoundary::fieldSolverBoundaryCondMagneticField( - FsGrid< std::array, 2> & perBGrid, - FsGrid< std::array, 2> & perBDt2Grid, - FsGrid< std::array, 2> & EGrid, - FsGrid< std::array, 2> & EDt2Grid, - FsGrid< fsgrids::technical, 2> & technicalGrid, - cint i, - cint j, - cint k, - creal& dt, - cuint& RKCase, - cuint& component - ) { - return 0.0; - } - - void ProjectBoundary::fieldSolverBoundaryCondElectricField( - FsGrid< std::array, 2> & EGrid, - cint i, - cint j, - cint k, - cuint component - ) { - creal dx = Parameters::dx_ini; - creal dy = Parameters::dy_ini; - creal dz = Parameters::dz_ini; - const std::array globalIndices = EGrid.getGlobalIndices(i,j,k); - creal x = (convert(globalIndices[0])+0.5)*EGrid.DX + Parameters::xmin; - creal y = (convert(globalIndices[1])+0.5)*EGrid.DY + Parameters::ymin; - creal z = (convert(globalIndices[2])+0.5)*EGrid.DZ + Parameters::zmin; - - bool isThisCellOnAFace[6]; - determineFace(&isThisCellOnAFace[0], x, y, z, dx, dy, dz); - -#warning this function makes little sense as is. It made little before, too. - for (uint fi=0; fi<6; fi++) if (facesToProcess[fi] && isThisCellOnAFace[fi]) { - switch (fi) { - case 0: - EGrid.get(i,j,k)->at(fsgrids::efield::EX+component) = 0; - break; - case 1: - EGrid.get(i,j,k)->at(fsgrids::efield::EX+component) = 0; - break; - case 2: - EGrid.get(i,j,k)->at(fsgrids::efield::EX+component) = 0; - break; - case 3: - EGrid.get(i,j,k)->at(fsgrids::efield::EX+component) = 0; - break; - case 4: - EGrid.get(i,j,k)->at(fsgrids::efield::EX+component) = 0; - break; - case 5: - EGrid.get(i,j,k)->at(fsgrids::efield::EX+component) = 0; - break; - } - } - } - - void ProjectBoundary::fieldSolverBoundaryCondGradPeElectricField( - FsGrid< std::array, 2> & EGradPeGrid, - cint i, - cint j, - cint k, - cuint component - ) { - EGradPeGrid.get(i,j,k)->at(fsgrids::egradpe::EXGRADPE+component) = 0.0; - } - - void ProjectBoundary::fieldSolverBoundaryCondHallElectricField( - FsGrid< std::array, 2> & EHallGrid, - cint i, - cint j, - cint k, - cuint component - ) { - std::array * cp = EHallGrid.get(i,j,k); - switch (component) { - case 0: - cp->at(fsgrids::ehall::EXHALL_000_100) = 0.0; - cp->at(fsgrids::ehall::EXHALL_010_110) = 0.0; - cp->at(fsgrids::ehall::EXHALL_001_101) = 0.0; - cp->at(fsgrids::ehall::EXHALL_011_111) = 0.0; - break; - case 1: - cp->at(fsgrids::ehall::EYHALL_000_010) = 0.0; - cp->at(fsgrids::ehall::EYHALL_100_110) = 0.0; - cp->at(fsgrids::ehall::EYHALL_001_011) = 0.0; - cp->at(fsgrids::ehall::EYHALL_101_111) = 0.0; - break; - case 2: - cp->at(fsgrids::ehall::EZHALL_000_001) = 0.0; - cp->at(fsgrids::ehall::EZHALL_100_101) = 0.0; - cp->at(fsgrids::ehall::EZHALL_010_011) = 0.0; - cp->at(fsgrids::ehall::EZHALL_110_111) = 0.0; - break; - default: - cerr << __FILE__ << ":" << __LINE__ << ":" << " Invalid component" << endl; - } - } - - void ProjectBoundary::fieldSolverBoundaryCondDerivatives( - FsGrid< std::array, 2> & dPerBGrid, - FsGrid< std::array, 2> & dMomentsGrid, - cint i, - cint j, - cint k, - cuint& RKCase, - cuint& component - ) { - this->setCellDerivativesToZero(dPerBGrid, dMomentsGrid, i, j, k, component); - } - - void ProjectBoundary::fieldSolverBoundaryCondBVOLDerivatives( - FsGrid< std::array, 2> & volGrid, - cint i, - cint j, - cint k, - cuint& component - ) { - this->setCellBVOLDerivativesToZero(volGrid, i, j, k, component); - } - - void ProjectBoundary::vlasovBoundaryCondition( - const dccrg::Dccrg& mpiGrid, - const CellID& cellID, - const uint popID - ) { - SpatialCell* cell = mpiGrid[cellID]; - cell->get_velocity_mesh(popID) = templateCell.get_velocity_mesh(popID); - cell->get_velocity_blocks(popID) = templateCell.get_velocity_blocks(popID); - } - - void ProjectBoundary::getFaces(bool* faces) { - for(uint i=0; i<6; i++) faces[i] = facesToProcess[i]; - } - - bool ProjectBoundary::generateTemplateCell() { - if (project == NULL) return false; - templateCell.parameters[CellParams::XCRD] = -2*Parameters::xmin; - templateCell.parameters[CellParams::YCRD] = -2*Parameters::ymin; - templateCell.parameters[CellParams::ZCRD] = -2*Parameters::zmin; - templateCell.parameters[CellParams::DX] = -2*Parameters::dx_ini; - templateCell.parameters[CellParams::DY] = -2*Parameters::dy_ini; - templateCell.parameters[CellParams::DZ] = -2*Parameters::dz_ini; - project->setCell(&templateCell); - return true; - } - - string ProjectBoundary::getName() const { - return "ProjectBoundary"; - } - - uint ProjectBoundary::getIndex() const { - return sysboundarytype::PROJECT; - } -} diff --git a/sysboundary/project_boundary.h b/sysboundary/project_boundary.h deleted file mode 100644 index 510479207..000000000 --- a/sysboundary/project_boundary.h +++ /dev/null @@ -1,140 +0,0 @@ -/* - * This file is part of Vlasiator. - * Copyright 2010-2016 Finnish Meteorological Institute - * - * For details of usage, see the COPYING file and read the "Rules of the Road" - * at http://www.physics.helsinki.fi/vlasiator/ - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - */ - -#ifndef PROJECT_BOUNDARY_H -#define PROJECT_BOUNDARY_H - -#include -#include "../definitions.h" -#include "../readparameters.h" -#include "../spatial_cell.hpp" -#include "sysboundarycondition.h" - -namespace SBC { - /*!\brief Base class for system boundary conditions with user-set settings and parameters read from file. - * - * ProjectBoundary uses the simulated project to set the boundary conditions. - * - * This class handles the import and interpolation in time of the input parameters read - * from file as well as the assignment of the state from the template cells. - * - * The daughter classes have then to handle parameters and generate the template cells as - * wished from the data returned. - */ - class ProjectBoundary: public SysBoundaryCondition { - public: - ProjectBoundary(); - virtual ~ProjectBoundary(); - - static void addParameters(); - virtual void getParameters(); - - virtual bool initSysBoundary( - creal& t, - Project &project - ); - virtual bool assignSysBoundary(dccrg::Dccrg& mpiGrid, - FsGrid< fsgrids::technical, 2> & technicalGrid); - virtual bool applyInitialState( - const dccrg::Dccrg& mpiGrid, - Project &project - ); -// virtual bool applySysBoundaryCondition( -// const dccrg::Dccrg& mpiGrid, -// creal& t -// ); - virtual Real fieldSolverBoundaryCondMagneticField( - FsGrid< std::array, 2> & perBGrid, - FsGrid< std::array, 2> & perBDt2Grid, - FsGrid< std::array, 2> & EGrid, - FsGrid< std::array, 2> & EDt2Grid, - FsGrid< fsgrids::technical, 2> & technicalGrid, - cint i, - cint j, - cint k, - creal& dt, - cuint& RKCase, - cuint& component - ); - virtual void fieldSolverBoundaryCondElectricField( - FsGrid< std::array, 2> & EGrid, - cint i, - cint j, - cint k, - cuint component - ); - virtual void fieldSolverBoundaryCondHallElectricField( - FsGrid< std::array, 2> & EHallGrid, - cint i, - cint j, - cint k, - cuint component - ); - virtual void fieldSolverBoundaryCondGradPeElectricField( - FsGrid< std::array, 2> & EGradPeGrid, - cint i, - cint j, - cint k, - cuint component - ); - virtual void fieldSolverBoundaryCondDerivatives( - FsGrid< std::array, 2> & dPerBGrid, - FsGrid< std::array, 2> & dMomentsGrid, - cint i, - cint j, - cint k, - cuint& RKCase, - cuint& component - ); - virtual void fieldSolverBoundaryCondBVOLDerivatives( - FsGrid< std::array, 2> & volGrid, - cint i, - cint j, - cint k, - cuint& component - ); - virtual void vlasovBoundaryCondition( - const dccrg::Dccrg& mpiGrid, - const CellID& cellID,const uint popID - ); - - virtual void getFaces(bool* faces); - - virtual std::string getName() const; - virtual uint getIndex() const; - - protected: - - bool generateTemplateCell(); - - /*! Array of bool telling which faces are going to be processed by the system boundary condition.*/ - bool facesToProcess[6]; - - Project* project; - spatial_cell::SpatialCell templateCell; - - /*! List of faces on which user-set boundary conditions are to be applied ([xyz][+-]). */ - std::vector faceList; - }; -} - -#endif diff --git a/sysboundary/sysboundary.cpp b/sysboundary/sysboundary.cpp index 3c1603cb4..b6fcd8443 100644 --- a/sysboundary/sysboundary.cpp +++ b/sysboundary/sysboundary.cpp @@ -35,8 +35,6 @@ #include "ionosphere.h" #include "outflow.h" #include "setmaxwellian.h" -#include "antisymmetric.h" -#include "project_boundary.h" using namespace std; using namespace spatial_cell; @@ -87,8 +85,6 @@ void SysBoundary::addParameters() { SBC::Ionosphere::addParameters(); SBC::Outflow::addParameters(); SBC::SetMaxwellian::addParameters(); - SBC::Antisymmetric::addParameters(); - SBC::ProjectBoundary::addParameters(); } /*!\brief Get this class' parameters. @@ -266,52 +262,6 @@ bool SysBoundary::initSysBoundaries( exit(1); } } - if (*it == "Antisymmetric") { - if (addSysBoundary(new SBC::Antisymmetric,project,t) == false) { - if (myRank == MASTER_RANK) cerr << "Error in adding Antisymmetric boundary condition." << endl; - success = false; - } - isThisDynamic = isThisDynamic | getSysBoundary(sysboundarytype::ANTISYMMETRIC)->isDynamic(); - bool faces[6]; - getSysBoundary(sysboundarytype::ANTISYMMETRIC)->getFaces(&faces[0]); - if ((faces[0] || faces[1]) && isPeriodic[0]) { - if (myRank == MASTER_RANK) { - cerr << "You set boundaries.periodic_x = yes and load Outflow system boundary"; - cerr << "conditions on the x+ or x- face, are you sure this is correct?" << endl; - } - } - if ((faces[2] || faces[3]) && isPeriodic[1]) { - if (myRank == MASTER_RANK) { - cerr << "You set boundaries.periodic_y = yes and load Outflow system boundary"; - cerr << "conditions on the y+ or y- face, are you sure this is correct?" << endl; - } - } - if ((faces[4] || faces[5]) && isPeriodic[2]) { - if (myRank == MASTER_RANK) { - cerr << "You set boundaries.periodic_z = yes and load Outflow system boundary"; - cerr << "conditions on the z+ or z- face, are you sure this is correct?" << endl; - } - } - } - if(*it == "ProjectBoundary") { - if (this->addSysBoundary(new SBC::ProjectBoundary, project, t) == false) { - if(myRank == MASTER_RANK) cerr << "Error in adding ProjectBoundary boundary." << endl; - success = false; - } - isThisDynamic = isThisDynamic| - this->getSysBoundary(sysboundarytype::PROJECT)->isDynamic(); - bool faces[6]; - this->getSysBoundary(sysboundarytype::PROJECT)->getFaces(&faces[0]); - if((faces[0] || faces[1]) && isPeriodic[0]) { - if(myRank == MASTER_RANK) cerr << "You set boundaries.periodic_x = yes and load ProjectBoundary system boundary conditions on the x+ or x- face, are you sure this is correct?" << endl; - } - if((faces[2] || faces[3]) && isPeriodic[1]) { - if(myRank == MASTER_RANK) cerr << "You set boundaries.periodic_y = yes and load ProjectBoundary system boundary conditions on the y+ or y- face, are you sure this is correct?" << endl; - } - if((faces[4] || faces[5]) && isPeriodic[2]) { - if(myRank == MASTER_RANK) cerr << "You set boundaries.periodic_z = yes and load ProjectBoundary system boundary conditions on the z+ or z- face, are you sure this is correct?" << endl; - } - } } list::iterator it2; diff --git a/testpackage/tests/acctest_1_maxw_500k_30kms_1deg/acctest_1_maxw_500k_30kms_1deg.cfg b/testpackage/tests/acctest_1_maxw_500k_30kms_1deg/acctest_1_maxw_500k_30kms_1deg.cfg index bfa83496d..7e85b12d5 100644 --- a/testpackage/tests/acctest_1_maxw_500k_30kms_1deg/acctest_1_maxw_500k_30kms_1deg.cfg +++ b/testpackage/tests/acctest_1_maxw_500k_30kms_1deg/acctest_1_maxw_500k_30kms_1deg.cfg @@ -56,10 +56,6 @@ output = populations_PTensor output = derivs output = BVOLderivs output = GridCoordinates -output = Potential -output = BackgroundVolE -output = ChargeDensity -output = PotentialError output = MeshData #output = VelocitySubSteps diff --git a/tools/esail_intpol.cpp b/tools/esail_intpol.cpp deleted file mode 100644 index 1bd28a663..000000000 --- a/tools/esail_intpol.cpp +++ /dev/null @@ -1,153 +0,0 @@ -/* - * This file is part of Vlasiator. - * Copyright 2010-2016 Finnish Meteorological Institute - * - * For details of usage, see the COPYING file and read the "Rules of the Road" - * at http://www.physics.helsinki.fi/vlasiator/ - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - */ -#include -#include -#include -#include -#include - -#include "../fieldsolver/fs_limiters.h" -#include "vlsvreaderinterface.h" -#include "vlsv_util.h" - -using namespace std; - -struct Values { - double t; - double E[3]; - double phi; -}; - -void processFile(const size_t& index,const string& fname,vector& values0,vector& values1) { - vlsvinterface::Reader vlsvReader; - if (vlsvReader.open(fname) == false) { - stringstream ss; - ss << "ERROR, could not open file '" << fname << "' in " << __FILE__ << ":" << __LINE__ << endl; - cerr << ss.str(); - return; - } - - if (vlsvReader.setCellIds() == false) { - stringstream ss; - ss << "ERROR, could not read Cell IDs from file '" << fname << "' in " << __FILE__ << ":" << __LINE__ << endl; - cerr << ss.str(); - return; - } - - vector cellIDs; - cellIDs.push_back(63400); - cellIDs.push_back(63401); - cellIDs.push_back(63799); - cellIDs.push_back(63800); - cellIDs.push_back(63801); - cellIDs.push_back(63802); - cellIDs.push_back(64199); - cellIDs.push_back(64200); - cellIDs.push_back(64201); - cellIDs.push_back(64202); - cellIDs.push_back(64600); - cellIDs.push_back(64601); - - map > E_values; - map phi_values; - for (size_t c=0; c >::iterator it=E_values.begin(); it!=E_values.end(); ++it) { - array arr; - if (vlsvReader.getVariable("E_vol",cellIDs[c],arr) == false) { - //if (vlsvReader.getVariable("E_vol",it->first,it->second) == false) { - stringstream ss; - ss << "ERROR, could not read E value from file '" << fname << "' in " << __FILE__ << ":" << __LINE__ << endl; - cerr << ss.str(); - return; - } /*else { - cout << "Read " << it->first << '\t' << (it->second)[0] << endl; - }*/ - E_values[cellIDs[c]] = arr; - - array phi_arr; - if (vlsvReader.getVariable("poisson/potential",cellIDs[c],phi_arr) == false) { - stringstream ss; - ss << "ERROR, could not read phi value from file '" << fname << "' in " << __FILE__ << ":" << __LINE__ << endl; - cerr << ss.str(); - } else { - phi_values[cellIDs[c]] = phi_arr[0]; - } - } - - Values val; - if (vlsvReader.readParameter("time",val.t) == false) { - stringstream ss; - ss << "ERROR, could not read 'time' value from file '" << fname << "' in " << __FILE__ << ":" << __LINE__ << endl; - cerr << ss.str(); - return; - } - - // 0th order electric field - val.E[0] = 0.25*(E_values[63800][0]+E_values[63801][0]+E_values[64200][0]+E_values[64201][0]); - val.E[1] = 0.25*(E_values[63800][1]+E_values[63801][1]+E_values[64200][1]+E_values[64201][1]); - val.E[2] = 0.25*(E_values[63800][2]+E_values[63801][2]+E_values[64200][2]+E_values[64201][2]); - val.phi = 0.25*(phi_values[63800]+phi_values[63801]+phi_values[64200]+phi_values[64201]); - values0[index] = val; - - // 1st order electric field - const double dx = 5.0; - double d_E1x = (E_values[63800][0]-E_values[63799][0])/5.0; - double d_E2x = (E_values[64200][0]-E_values[64199][0])/5.0; - double d_E3x = (E_values[63802][0]-E_values[63801][0])/5.0; - double d_E4x = (E_values[64202][0]-E_values[64201][0])/5.0; - - val.E[0] = (E_values[63800][0] + 0.5*d_E1x*dx); - val.E[0] += (E_values[64200][0] + 0.5*d_E2x*dx); - val.E[0] += (E_values[63801][0] - 0.5*d_E3x*dx); - val.E[0] += (E_values[64201][0] - 0.5*d_E4x*dx); - val.E[0] *= 0.25; - val.E[1] = 0; - val.E[2] = 0; - values1[index] = val; -} - -int main(int argn,char* args[]) { - - if (argn != 2) { - cerr << "USAGE: ./esail_intpol " << endl; - return 1; - } - - //Get the file name - const string mask = args[1]; - vector fileList = toolutil::getFiles(mask); - sort(fileList.begin(),fileList.end()); - //cout << "Found " << fileList.size() << " files" << endl; - - vector values0(fileList.size()); - vector values1(fileList.size()); - for (size_t f=0; f 0) { logFile << "(BAILOUT): Bailing out, see error log for details." << endl; diff --git a/vlasovsolver/cpu_acc_transform.cpp b/vlasovsolver/cpu_acc_transform.cpp index 4ab7f74a9..5f07a9b2b 100644 --- a/vlasovsolver/cpu_acc_transform.cpp +++ b/vlasovsolver/cpu_acc_transform.cpp @@ -41,22 +41,16 @@ void updateAccelerationMaxdt( SpatialCell* spatial_cell, const uint popID) { - if (Parameters::propagatePotential == true) { - #warning Electric acceleration works for Poisson only atm - spatial_cell->set_max_v_dt(popID,numeric_limits::max()); - } - else { - const Real Bx = spatial_cell->parameters[CellParams::BGBXVOL]+spatial_cell->parameters[CellParams::PERBXVOL]; - const Real By = spatial_cell->parameters[CellParams::BGBYVOL]+spatial_cell->parameters[CellParams::PERBYVOL]; - const Real Bz = spatial_cell->parameters[CellParams::BGBZVOL]+spatial_cell->parameters[CellParams::PERBZVOL]; - const Eigen::Matrix B(Bx,By,Bz); - const Real B_mag = B.norm() + 1e-30; - const Real gyro_period = 2 * M_PI * getObjectWrapper().particleSpecies[popID].mass - / (getObjectWrapper().particleSpecies[popID].charge * B_mag); - - // Set maximum timestep limit for this cell, based on a maximum allowed rotation angle - spatial_cell->set_max_v_dt(popID,fabs(gyro_period)*(P::maxSlAccelerationRotation/360.0)); - } + const Real Bx = spatial_cell->parameters[CellParams::BGBXVOL]+spatial_cell->parameters[CellParams::PERBXVOL]; + const Real By = spatial_cell->parameters[CellParams::BGBYVOL]+spatial_cell->parameters[CellParams::PERBYVOL]; + const Real Bz = spatial_cell->parameters[CellParams::BGBZVOL]+spatial_cell->parameters[CellParams::PERBZVOL]; + const Eigen::Matrix B(Bx,By,Bz); + const Real B_mag = B.norm() + 1e-30; + const Real gyro_period = 2 * M_PI * getObjectWrapper().particleSpecies[popID].mass + / (getObjectWrapper().particleSpecies[popID].charge * B_mag); + + // Set maximum timestep limit for this cell, based on a maximum allowed rotation angle + spatial_cell->set_max_v_dt(popID,fabs(gyro_period)*(P::maxSlAccelerationRotation/360.0)); } @@ -118,20 +112,6 @@ Eigen::Transform compute_acceleration_transformation( // compute total transformation Transform total_transform(Matrix::Identity()); //CONTINUE - if (Parameters::propagatePotential == true) { - #warning Electric acceleration works for Poisson only atm - Real* E = &(spatial_cell->parameters[CellParams::EXVOL]); - - const Real q_per_m = getObjectWrapper().particleSpecies[popID].charge - / getObjectWrapper().particleSpecies[popID].mass; - const Real CONST = q_per_m * dt; - total_transform(0,3) = CONST * E[0]; - total_transform(1,3) = CONST * E[1]; - total_transform(2,3) = CONST * E[2]; - return total_transform; - } // if (Parameters::propagatePotential == true) - - unsigned int bulk_velocity_substeps; // in this many substeps we iterate forward bulk velocity when the complete transformation is computed (0.1 deg per substep). bulk_velocity_substeps = fabs(dt) / fabs(gyro_period*(0.1/360.0)); if (bulk_velocity_substeps < 1) bulk_velocity_substeps=1; From 7073ee377f53af62dd2097b407796467dc095646 Mon Sep 17 00:00:00 2001 From: Markus Battarbee Date: Tue, 14 May 2019 12:59:08 +0300 Subject: [PATCH 408/602] Update datareducer.cpp typo --- datareduction/datareducer.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/datareduction/datareducer.cpp b/datareduction/datareducer.cpp index 86be31124..bd6758974 100644 --- a/datareduction/datareducer.cpp +++ b/datareduction/datareducer.cpp @@ -924,7 +924,7 @@ bool DataReducer::handlesWriting(const unsigned int& operatorID) const { return dynamic_cast(operators[operatorID]) != nullptr; } -/** Ask a DataReductionOperator if it wants to write parameterdas to the vlsv file header +/** Ask a DataReductionOperator if it wants to write parameters to the vlsv file header * @param operatorID ID number of the DataReductionOperator. * @return If true, then VLSVWriter should be passed to the DataReductionOperator.*/ bool DataReducer::hasParameters(const unsigned int& operatorID) const { From 9e24277924c4623e9aab715f7da0204a27a2caae Mon Sep 17 00:00:00 2001 From: Urs Ganse Date: Tue, 14 May 2019 13:50:10 +0300 Subject: [PATCH 409/602] Add fg_GridCoords reducer, and rename GridCoords to vg_GridCoords --- datareduction/datareducer.cpp | 173 ++++++++++++++++++++++++++++++++-- 1 file changed, 166 insertions(+), 7 deletions(-) diff --git a/datareduction/datareducer.cpp b/datareduction/datareducer.cpp index 7aa7f67ba..523a889e0 100644 --- a/datareduction/datareducer.cpp +++ b/datareduction/datareducer.cpp @@ -718,14 +718,173 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dBGBZVOLdy",bvolderivatives::dBGBZVOLdy,1)); continue; } - if(*it == "GridCoordinates") { + if(*it == "vg_GridCoordinates") { // Spatial coordinates for each cell - outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("X",CellParams::XCRD,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("Y",CellParams::YCRD,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("Z",CellParams::ZCRD,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("DX",CellParams::DX,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("DY",CellParams::DY,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("DZ",CellParams::DZ,1)); + outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("vg_X",CellParams::XCRD,1)); + outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("vg_Y",CellParams::YCRD,1)); + outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("vg_Z",CellParams::ZCRD,1)); + outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("vg_DX",CellParams::DX,1)); + outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("vg_DY",CellParams::DY,1)); + outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("vg_DZ",CellParams::DZ,1)); + continue; + } + if(*it == "fg_GridCoordinates") { + outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fg_X",[]( + FsGrid< std::array, 2>& perBGrid, + FsGrid< std::array, 2>& EGrid, + FsGrid< std::array, 2>& EHallGrid, + FsGrid< std::array, 2>& EGradPeGrid, + FsGrid< std::array, 2>& momentsGrid, + FsGrid< std::array, 2>& dPerBGrid, + FsGrid< std::array, 2>& dMomentsGrid, + FsGrid< std::array, 2>& BgBGrid, + FsGrid< std::array, 2>& volGrid, + FsGrid< fsgrids::technical, 2>& technicalGrid)->std::vector { + + std::array& gridSize = technicalGrid.getLocalSize(); + std::vector retval(gridSize[0]*gridSize[1]*gridSize[2]*3); + + // Iterate through fsgrid cells and extract total BVOL + for(int z=0; zaddOperator(new DRO::DataReductionOperatorFsGrid("fg_Y",[]( + FsGrid< std::array, 2>& perBGrid, + FsGrid< std::array, 2>& EGrid, + FsGrid< std::array, 2>& EHallGrid, + FsGrid< std::array, 2>& EGradPeGrid, + FsGrid< std::array, 2>& momentsGrid, + FsGrid< std::array, 2>& dPerBGrid, + FsGrid< std::array, 2>& dMomentsGrid, + FsGrid< std::array, 2>& BgBGrid, + FsGrid< std::array, 2>& volGrid, + FsGrid< fsgrids::technical, 2>& technicalGrid)->std::vector { + + std::array& gridSize = technicalGrid.getLocalSize(); + std::vector retval(gridSize[0]*gridSize[1]*gridSize[2]*3); + + // Iterate through fsgrid cells and extract total BVOL + for(int z=0; zaddOperator(new DRO::DataReductionOperatorFsGrid("fg_Z",[]( + FsGrid< std::array, 2>& perBGrid, + FsGrid< std::array, 2>& EGrid, + FsGrid< std::array, 2>& EHallGrid, + FsGrid< std::array, 2>& EGradPeGrid, + FsGrid< std::array, 2>& momentsGrid, + FsGrid< std::array, 2>& dPerBGrid, + FsGrid< std::array, 2>& dMomentsGrid, + FsGrid< std::array, 2>& BgBGrid, + FsGrid< std::array, 2>& volGrid, + FsGrid< fsgrids::technical, 2>& technicalGrid)->std::vector { + + std::array& gridSize = technicalGrid.getLocalSize(); + std::vector retval(gridSize[0]*gridSize[1]*gridSize[2]*3); + + // Iterate through fsgrid cells and extract total BVOL + for(int z=0; zaddOperator(new DRO::DataReductionOperatorFsGrid("fg_DX",[]( + FsGrid< std::array, 2>& perBGrid, + FsGrid< std::array, 2>& EGrid, + FsGrid< std::array, 2>& EHallGrid, + FsGrid< std::array, 2>& EGradPeGrid, + FsGrid< std::array, 2>& momentsGrid, + FsGrid< std::array, 2>& dPerBGrid, + FsGrid< std::array, 2>& dMomentsGrid, + FsGrid< std::array, 2>& BgBGrid, + FsGrid< std::array, 2>& volGrid, + FsGrid< fsgrids::technical, 2>& technicalGrid)->std::vector { + + std::array& gridSize = technicalGrid.getLocalSize(); + std::vector retval(gridSize[0]*gridSize[1]*gridSize[2]*3); + + // Iterate through fsgrid cells and extract total BVOL + for(int z=0; zaddOperator(new DRO::DataReductionOperatorFsGrid("fg_DY",[]( + FsGrid< std::array, 2>& perBGrid, + FsGrid< std::array, 2>& EGrid, + FsGrid< std::array, 2>& EHallGrid, + FsGrid< std::array, 2>& EGradPeGrid, + FsGrid< std::array, 2>& momentsGrid, + FsGrid< std::array, 2>& dPerBGrid, + FsGrid< std::array, 2>& dMomentsGrid, + FsGrid< std::array, 2>& BgBGrid, + FsGrid< std::array, 2>& volGrid, + FsGrid< fsgrids::technical, 2>& technicalGrid)->std::vector { + + std::array& gridSize = technicalGrid.getLocalSize(); + std::vector retval(gridSize[0]*gridSize[1]*gridSize[2]*3); + + // Iterate through fsgrid cells and extract total BVOL + for(int z=0; zaddOperator(new DRO::DataReductionOperatorFsGrid("fg_DZ",[]( + FsGrid< std::array, 2>& perBGrid, + FsGrid< std::array, 2>& EGrid, + FsGrid< std::array, 2>& EHallGrid, + FsGrid< std::array, 2>& EGradPeGrid, + FsGrid< std::array, 2>& momentsGrid, + FsGrid< std::array, 2>& dPerBGrid, + FsGrid< std::array, 2>& dMomentsGrid, + FsGrid< std::array, 2>& BgBGrid, + FsGrid< std::array, 2>& volGrid, + FsGrid< fsgrids::technical, 2>& technicalGrid)->std::vector { + + std::array& gridSize = technicalGrid.getLocalSize(); + std::vector retval(gridSize[0]*gridSize[1]*gridSize[2]*3); + + // Iterate through fsgrid cells and extract total BVOL + for(int z=0; z Date: Tue, 14 May 2019 14:06:57 +0300 Subject: [PATCH 410/602] Revert "Removed unused fsgrid variables from spatial_cell and the associated coupling function calls and datareducer calls. Compiles, pushing to test on sisu." This reverts commit ed4598d55805906ad3e779f4f1c34483214e201f. --- common.h | 146 ++++---- datareduction/datareducer.cpp | 204 +++++------ datareduction/datareductionoperator.cpp | 17 +- fieldsolver/gridGlue.cpp | 428 ++++++++++++------------ grid.cpp | 32 +- iowrite.cpp | 4 +- spatial_cell.cpp | 30 +- spatial_cell.hpp | 2 +- vlasiator.cpp | 98 +++--- 9 files changed, 480 insertions(+), 481 deletions(-) diff --git a/common.h b/common.h index ef3798ef9..8ca859c52 100644 --- a/common.h +++ b/common.h @@ -133,9 +133,9 @@ namespace CellParams { EX, /*!< Total electric field x-component, averaged over cell edge. Used to propagate BX,BY,BZ.*/ EY, /*!< Total wlectric field y-component, averaged over cell edge. Used to propagate BX,BY,BZ.*/ EZ, /*!< Total electric field z-component, averaged over cell edge. Used to propagate BX,BY,BZ.*/ - /* BGBX, /\*!< Background magnetic field x-component, averaged over cell x-face.*\/ */ - /* BGBY, /\*!< Background magnetic field x-component, averaged over cell x-face.*\/ */ - /* BGBZ, /\*!< Background magnetic field x-component, averaged over cell x-face.*\/ */ + BGBX, /*!< Background magnetic field x-component, averaged over cell x-face.*/ + BGBY, /*!< Background magnetic field x-component, averaged over cell x-face.*/ + BGBZ, /*!< Background magnetic field x-component, averaged over cell x-face.*/ PERBX, /*!< Perturbed Magnetic field x-component, averaged over cell x-face. Propagated by field solver.*/ PERBY, /*!< Perturbed Magnetic field y-component, averaged over cell y-face. Propagated by field solver.*/ PERBZ, /*!< Perturbed Magnetic field z-component, averaged over cell z-face. Propagated by field solver.*/ @@ -164,18 +164,18 @@ namespace CellParams { EXVOL, /*!< Ex averaged over spatial cell.*/ EYVOL, /*!< Ey averaged over spatial cell.*/ EZVOL, /*!< Ez averaged over spatial cell.*/ - /* EXHALL_000_100, /\*!< Hall term x averaged along x on -y/-z edge of spatial cell.*\/ */ - /* EYHALL_000_010, /\*!< Hall term y averaged along y on -x/-z edge of spatial cell.*\/ */ - /* EZHALL_000_001, /\*!< Hall term z averaged along z on -x/-y edge of spatial cell.*\/ */ - /* EYHALL_100_110, /\*!< Hall term y averaged along y on +x/-z edge of spatial cell.*\/ */ - /* EZHALL_100_101, /\*!< Hall term z averaged along z on +x/-y edge of spatial cell.*\/ */ - /* EXHALL_010_110, /\*!< Hall term x averaged along x on +y/-z edge of spatial cell.*\/ */ - /* EZHALL_010_011, /\*!< Hall term z averaged along z on +y/-x edge of spatial cell.*\/ */ - /* EZHALL_110_111, /\*!< Hall term z averaged along z on +x/+y edge of spatial cell.*\/ */ - /* EXHALL_001_101, /\*!< Hall term x averaged along x on -y/+z edge of spatial cell.*\/ */ - /* EYHALL_001_011, /\*!< Hall term y averaged along y on -x/+z edge of spatial cell.*\/ */ - /* EYHALL_101_111, /\*!< Hall term y averaged along y on +x/+z edge of spatial cell.*\/ */ - /* EXHALL_011_111, /\*!< Hall term x averaged along x on +y/+z edge of spatial cell.*\/ */ + EXHALL_000_100, /*!< Hall term x averaged along x on -y/-z edge of spatial cell.*/ + EYHALL_000_010, /*!< Hall term y averaged along y on -x/-z edge of spatial cell.*/ + EZHALL_000_001, /*!< Hall term z averaged along z on -x/-y edge of spatial cell.*/ + EYHALL_100_110, /*!< Hall term y averaged along y on +x/-z edge of spatial cell.*/ + EZHALL_100_101, /*!< Hall term z averaged along z on +x/-y edge of spatial cell.*/ + EXHALL_010_110, /*!< Hall term x averaged along x on +y/-z edge of spatial cell.*/ + EZHALL_010_011, /*!< Hall term z averaged along z on +y/-x edge of spatial cell.*/ + EZHALL_110_111, /*!< Hall term z averaged along z on +x/+y edge of spatial cell.*/ + EXHALL_001_101, /*!< Hall term x averaged along x on -y/+z edge of spatial cell.*/ + EYHALL_001_011, /*!< Hall term y averaged along y on -x/+z edge of spatial cell.*/ + EYHALL_101_111, /*!< Hall term y averaged along y on +x/+z edge of spatial cell.*/ + EXHALL_011_111, /*!< Hall term x averaged along x on +y/+z edge of spatial cell.*/ EXGRADPE, /*!< Electron pressure gradient term x.*/ EYGRADPE, /*!< Electron pressure gradient term y.*/ EZGRADPE, /*!< Electron pressure gradient term z.*/ @@ -232,64 +232,64 @@ namespace CellParams { */ namespace fieldsolver { enum { -/* drhomdx, /\*!< Derivative of volume-averaged mass density to x-direction. *\/ */ -/* drhomdy, /\*!< Derivative of volume-averaged mass density to y-direction. *\/ */ -/* drhomdz, /\*!< Derivative of volume-averaged mass density to z-direction. *\/ */ -/* drhoqdx, /\*!< Derivative of volume-averaged charge density to x-direction. *\/ */ -/* drhoqdy, /\*!< Derivative of volume-averaged charge density to y-direction. *\/ */ -/* drhoqdz, /\*!< Derivative of volume-averaged charge density to z-direction. *\/ */ -/* dBGBxdy, /\*!< Derivative of face-averaged Bx to y-direction. *\/ */ -/* dBGBxdz, /\*!< Derivative of face-averaged Bx to z-direction. *\/ */ -/* dBGBydx, /\*!< Derivative of face-averaged By to x-direction. *\/ */ -/* dBGBydz, /\*!< Derivative of face-averaged By to z-direction. *\/ */ -/* dBGBzdx, /\*!< Derivative of face-averaged Bz to x-direction. *\/ */ -/* dBGBzdy, /\*!< Derivative of face-averaged Bz to y-direction. *\/ */ -/* dPERBxdy, /\*!< Derivative of face-averaged Bx to y-direction. *\/ */ -/* dPERBxdz, /\*!< Derivative of face-averaged Bx to z-direction. *\/ */ -/* dPERBydx, /\*!< Derivative of face-averaged By to x-direction. *\/ */ -/* dPERBydz, /\*!< Derivative of face-averaged By to z-direction. *\/ */ -/* dPERBzdx, /\*!< Derivative of face-averaged Bz to x-direction. *\/ */ -/* dPERBzdy, /\*!< Derivative of face-averaged Bz to y-direction. *\/ */ -/* // Insert for Hall term */ -/* // NOTE 2nd derivatives of BGBn are not needed as curl(dipole) = 0.0 */ -/* // will change if BGB is not curl-free */ -/* // dBGBxdyy, /\*!< Second derivative of face-averaged Bx to yy-direction. *\/ */ -/* // dBGBxdzz, /\*!< Second derivative of face-averaged Bx to zz-direction. *\/ */ -/* // dBGBxdyz, /\*!< Second derivative of face-averaged Bx to yz-direction. *\/ */ -/* // dBGBydxx, /\*!< Second derivative of face-averaged By to xx-direction. *\/ */ -/* // dBGBydzz, /\*!< Second derivative of face-averaged By to zz-direction. *\/ */ -/* // dBGBydxz, /\*!< Second derivative of face-averaged By to xz-direction. *\/ */ -/* // dBGBzdxx, /\*!< Second derivative of face-averaged Bz to xx-direction. *\/ */ -/* // dBGBzdyy, /\*!< Second derivative of face-averaged Bz to yy-direction. *\/ */ -/* // dBGBzdxy, /\*!< Second derivative of face-averaged Bz to xy-direction. *\/ */ -/* dPERBxdyy, /\*!< Second derivative of face-averaged Bx to yy-direction. *\/ */ -/* dPERBxdzz, /\*!< Second derivative of face-averaged Bx to zz-direction. *\/ */ -/* dPERBxdyz, /\*!< Second derivative of face-averaged Bx to yz-direction. *\/ */ -/* dPERBydxx, /\*!< Second derivative of face-averaged By to xx-direction. *\/ */ -/* dPERBydzz, /\*!< Second derivative of face-averaged By to zz-direction. *\/ */ -/* dPERBydxz, /\*!< Second derivative of face-averaged By to xz-direction. *\/ */ -/* dPERBzdxx, /\*!< Second derivative of face-averaged Bz to xx-direction. *\/ */ -/* dPERBzdyy, /\*!< Second derivative of face-averaged Bz to yy-direction. *\/ */ -/* dPERBzdxy, /\*!< Second derivative of face-averaged Bz to xy-direction. *\/ */ -/* dp11dx, /\*!< Derivative of P_11 to x direction. *\/ */ -/* dp11dy, /\*!< Derivative of P_11 to x direction. *\/ */ -/* dp11dz, /\*!< Derivative of P_11 to x direction. *\/ */ -/* dp22dx, /\*!< Derivative of P_22 to y direction. *\/ */ -/* dp22dy, /\*!< Derivative of P_22 to y direction. *\/ */ -/* dp22dz, /\*!< Derivative of P_22 to y direction. *\/ */ -/* dp33dx, /\*!< Derivative of P_33 to z direction. *\/ */ -/* dp33dy, /\*!< Derivative of P_33 to z direction. *\/ */ -/* dp33dz, /\*!< Derivative of P_33 to z direction. *\/ */ -/* // End of insert for Hall term */ -/* dVxdx, /\*!< Derivative of volume-averaged Vx to x-direction. *\/ */ -/* dVxdy, /\*!< Derivative of volume-averaged Vx to y-direction. *\/ */ -/* dVxdz, /\*!< Derivative of volume-averaged Vx to z-direction. *\/ */ -/* dVydx, /\*!< Derivative of volume-averaged Vy to x-direction. *\/ */ -/* dVydy, /\*!< Derivative of volume-averaged Vy to y-direction. *\/ */ -/* dVydz, /\*!< Derivative of volume-averaged Vy to z-direction. *\/ */ -/* dVzdx, /\*!< Derivative of volume-averaged Vz to x-direction. *\/ */ -/* dVzdy, /\*!< Derivative of volume-averaged Vz to y-direction. *\/ */ -/* dVzdz, /\*!< Derivative of volume-averaged Vz to z-direction. *\/ */ + drhomdx, /*!< Derivative of volume-averaged mass density to x-direction. */ + drhomdy, /*!< Derivative of volume-averaged mass density to y-direction. */ + drhomdz, /*!< Derivative of volume-averaged mass density to z-direction. */ + drhoqdx, /*!< Derivative of volume-averaged charge density to x-direction. */ + drhoqdy, /*!< Derivative of volume-averaged charge density to y-direction. */ + drhoqdz, /*!< Derivative of volume-averaged charge density to z-direction. */ + dBGBxdy, /*!< Derivative of face-averaged Bx to y-direction. */ + dBGBxdz, /*!< Derivative of face-averaged Bx to z-direction. */ + dBGBydx, /*!< Derivative of face-averaged By to x-direction. */ + dBGBydz, /*!< Derivative of face-averaged By to z-direction. */ + dBGBzdx, /*!< Derivative of face-averaged Bz to x-direction. */ + dBGBzdy, /*!< Derivative of face-averaged Bz to y-direction. */ + dPERBxdy, /*!< Derivative of face-averaged Bx to y-direction. */ + dPERBxdz, /*!< Derivative of face-averaged Bx to z-direction. */ + dPERBydx, /*!< Derivative of face-averaged By to x-direction. */ + dPERBydz, /*!< Derivative of face-averaged By to z-direction. */ + dPERBzdx, /*!< Derivative of face-averaged Bz to x-direction. */ + dPERBzdy, /*!< Derivative of face-averaged Bz to y-direction. */ + // Insert for Hall term + // NOTE 2nd derivatives of BGBn are not needed as curl(dipole) = 0.0 + // will change if BGB is not curl-free +// dBGBxdyy, /*!< Second derivative of face-averaged Bx to yy-direction. */ +// dBGBxdzz, /*!< Second derivative of face-averaged Bx to zz-direction. */ +// dBGBxdyz, /*!< Second derivative of face-averaged Bx to yz-direction. */ +// dBGBydxx, /*!< Second derivative of face-averaged By to xx-direction. */ +// dBGBydzz, /*!< Second derivative of face-averaged By to zz-direction. */ +// dBGBydxz, /*!< Second derivative of face-averaged By to xz-direction. */ +// dBGBzdxx, /*!< Second derivative of face-averaged Bz to xx-direction. */ +// dBGBzdyy, /*!< Second derivative of face-averaged Bz to yy-direction. */ +// dBGBzdxy, /*!< Second derivative of face-averaged Bz to xy-direction. */ + dPERBxdyy, /*!< Second derivative of face-averaged Bx to yy-direction. */ + dPERBxdzz, /*!< Second derivative of face-averaged Bx to zz-direction. */ + dPERBxdyz, /*!< Second derivative of face-averaged Bx to yz-direction. */ + dPERBydxx, /*!< Second derivative of face-averaged By to xx-direction. */ + dPERBydzz, /*!< Second derivative of face-averaged By to zz-direction. */ + dPERBydxz, /*!< Second derivative of face-averaged By to xz-direction. */ + dPERBzdxx, /*!< Second derivative of face-averaged Bz to xx-direction. */ + dPERBzdyy, /*!< Second derivative of face-averaged Bz to yy-direction. */ + dPERBzdxy, /*!< Second derivative of face-averaged Bz to xy-direction. */ + dp11dx, /*!< Derivative of P_11 to x direction. */ + dp11dy, /*!< Derivative of P_11 to x direction. */ + dp11dz, /*!< Derivative of P_11 to x direction. */ + dp22dx, /*!< Derivative of P_22 to y direction. */ + dp22dy, /*!< Derivative of P_22 to y direction. */ + dp22dz, /*!< Derivative of P_22 to y direction. */ + dp33dx, /*!< Derivative of P_33 to z direction. */ + dp33dy, /*!< Derivative of P_33 to z direction. */ + dp33dz, /*!< Derivative of P_33 to z direction. */ + // End of insert for Hall term + dVxdx, /*!< Derivative of volume-averaged Vx to x-direction. */ + dVxdy, /*!< Derivative of volume-averaged Vx to y-direction. */ + dVxdz, /*!< Derivative of volume-averaged Vx to z-direction. */ + dVydx, /*!< Derivative of volume-averaged Vy to x-direction. */ + dVydy, /*!< Derivative of volume-averaged Vy to y-direction. */ + dVydz, /*!< Derivative of volume-averaged Vy to z-direction. */ + dVzdx, /*!< Derivative of volume-averaged Vz to x-direction. */ + dVzdy, /*!< Derivative of volume-averaged Vz to y-direction. */ + dVzdz, /*!< Derivative of volume-averaged Vz to z-direction. */ N_SPATIAL_CELL_DERIVATIVES }; } diff --git a/datareduction/datareducer.cpp b/datareduction/datareducer.cpp index 488868830..7aa7f67ba 100644 --- a/datareduction/datareducer.cpp +++ b/datareduction/datareducer.cpp @@ -101,10 +101,10 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti )); continue; } - // if(*it == "vg_BackgroundB") { // Static (typically dipole) magnetic field part - // outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("background_B",CellParams::BGBX,3)); - // continue; - // } + if(*it == "vg_BackgroundB") { // Static (typically dipole) magnetic field part + outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("background_B",CellParams::BGBX,3)); + continue; + } if(*it == "fg_PerturbedB" || *it == "PerturbedB") { // Fluctuating magnetic field part outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fg_perturbed_B",[]( FsGrid< std::array, 2>& perBGrid, @@ -539,27 +539,27 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti )); continue; } - // if(*it == "HallE") { - // // 12 corner components of the hall-effect contribution to the electric field - // outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("EXHALL_000_100",CellParams::EXHALL_000_100,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("EXHALL_001_101",CellParams::EXHALL_001_101,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("EXHALL_010_110",CellParams::EXHALL_010_110,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("EXHALL_011_111",CellParams::EXHALL_011_111,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("EYHALL_000_010",CellParams::EYHALL_000_010,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("EYHALL_001_011",CellParams::EYHALL_001_011,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("EYHALL_100_110",CellParams::EYHALL_100_110,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("EYHALL_101_111",CellParams::EYHALL_101_111,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("EZHALL_000_001",CellParams::EZHALL_000_001,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("EZHALL_010_011",CellParams::EZHALL_010_011,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("EZHALL_100_101",CellParams::EZHALL_100_101,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("EZHALL_110_111",CellParams::EZHALL_110_111,1)); - // continue; - // } - // if(*it =="GradPeE") { - // // Electron pressure gradient contribution to the generalized ohm's law - // outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("EGRADPE",CellParams::EXGRADPE,3)); - // continue; - // } + if(*it == "HallE") { + // 12 corner components of the hall-effect contribution to the electric field + outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("EXHALL_000_100",CellParams::EXHALL_000_100,1)); + outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("EXHALL_001_101",CellParams::EXHALL_001_101,1)); + outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("EXHALL_010_110",CellParams::EXHALL_010_110,1)); + outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("EXHALL_011_111",CellParams::EXHALL_011_111,1)); + outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("EYHALL_000_010",CellParams::EYHALL_000_010,1)); + outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("EYHALL_001_011",CellParams::EYHALL_001_011,1)); + outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("EYHALL_100_110",CellParams::EYHALL_100_110,1)); + outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("EYHALL_101_111",CellParams::EYHALL_101_111,1)); + outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("EZHALL_000_001",CellParams::EZHALL_000_001,1)); + outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("EZHALL_010_011",CellParams::EZHALL_010_011,1)); + outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("EZHALL_100_101",CellParams::EZHALL_100_101,1)); + outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("EZHALL_110_111",CellParams::EZHALL_110_111,1)); + continue; + } + if(*it =="GradPeE") { + // Electron pressure gradient contribution to the generalized ohm's law + outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("EGRADPE",CellParams::EXGRADPE,3)); + continue; + } if(*it == "VolB" || *it == "vg_VolB") { // Volume-averaged magnetic field outputReducer->addOperator(new DRO::VariableBVol); @@ -651,73 +651,73 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti } continue; } - // if(*it == "derivs") { - // // Derivatives of all quantities that might be of interest - // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("drhomdx",fieldsolver::drhomdx,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("drhomdy",fieldsolver::drhomdy,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("drhomdz",fieldsolver::drhomdz,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("drhoqdx",fieldsolver::drhoqdx,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("drhoqdy",fieldsolver::drhoqdy,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("drhoqdz",fieldsolver::drhoqdz,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dp11dx",fieldsolver::dp11dx,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dp22dx",fieldsolver::dp22dx,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dp33dx",fieldsolver::dp33dx,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dp11dy",fieldsolver::dp11dy,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dp22dy",fieldsolver::dp22dy,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dp33dy",fieldsolver::dp33dy,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dp11dz",fieldsolver::dp11dz,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dp22dz",fieldsolver::dp22dz,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dp33dz",fieldsolver::dp33dz,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dPERBxdy",fieldsolver::dPERBxdy,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dBGBxdy",fieldsolver::dBGBxdy,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dPERBxdz",fieldsolver::dPERBxdz,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dBGBxdz",fieldsolver::dBGBxdz,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dPERBydx",fieldsolver::dPERBydx,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dBGBydx",fieldsolver::dBGBydx,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dPERBydz",fieldsolver::dPERBydz,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dBGBydz",fieldsolver::dBGBydz,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dPERBzdx",fieldsolver::dPERBzdx,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dBGBzdx",fieldsolver::dBGBzdx,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dPERBzdy",fieldsolver::dPERBzdy,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dBGBzdy",fieldsolver::dBGBzdy,1)); - // if(Parameters::ohmHallTerm == 2) { - // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dPERBxdyy",fieldsolver::dPERBxdyy,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dPERBxdzz",fieldsolver::dPERBxdzz,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dPERBydxx",fieldsolver::dPERBydxx,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dPERBydzz",fieldsolver::dPERBydzz,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dPERBzdxx",fieldsolver::dPERBzdxx,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dPERBzdyy",fieldsolver::dPERBzdyy,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dPERBxdyz",fieldsolver::dPERBxdyz,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dPERBydxz",fieldsolver::dPERBydxz,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dPERBzdxy",fieldsolver::dPERBzdxy,1)); - // } - // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dVxdx",fieldsolver::dVxdx,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dVxdy",fieldsolver::dVxdy,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dVxdz",fieldsolver::dVxdz,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dVydx",fieldsolver::dVydx,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dVydy",fieldsolver::dVydy,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dVydz",fieldsolver::dVydz,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dVzdx",fieldsolver::dVzdx,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dVzdy",fieldsolver::dVzdy,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dVzdz",fieldsolver::dVzdz,1)); - // continue; - // } - // if(*it == "BVOLderivs") { - // // Volume-averaged derivatives - // outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dPERBXVOLdy",bvolderivatives::dPERBXVOLdy,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dBGBXVOLdy",bvolderivatives::dBGBXVOLdy,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dPERBXVOLdz",bvolderivatives::dPERBXVOLdz,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dBGBXVOLdz",bvolderivatives::dBGBXVOLdz,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dPERBYVOLdx",bvolderivatives::dPERBYVOLdx,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dBGBYVOLdx",bvolderivatives::dBGBYVOLdx,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dPERBYVOLdz",bvolderivatives::dPERBYVOLdz,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dBGBYVOLdz",bvolderivatives::dBGBYVOLdz,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dPERBZVOLdx",bvolderivatives::dPERBZVOLdx,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dBGBZVOLdx",bvolderivatives::dBGBZVOLdx,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dPERBZVOLdy",bvolderivatives::dPERBZVOLdy,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dBGBZVOLdy",bvolderivatives::dBGBZVOLdy,1)); - // continue; - // } + if(*it == "derivs") { + // Derivatives of all quantities that might be of interest + outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("drhomdx",fieldsolver::drhomdx,1)); + outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("drhomdy",fieldsolver::drhomdy,1)); + outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("drhomdz",fieldsolver::drhomdz,1)); + outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("drhoqdx",fieldsolver::drhoqdx,1)); + outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("drhoqdy",fieldsolver::drhoqdy,1)); + outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("drhoqdz",fieldsolver::drhoqdz,1)); + outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dp11dx",fieldsolver::dp11dx,1)); + outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dp22dx",fieldsolver::dp22dx,1)); + outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dp33dx",fieldsolver::dp33dx,1)); + outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dp11dy",fieldsolver::dp11dy,1)); + outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dp22dy",fieldsolver::dp22dy,1)); + outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dp33dy",fieldsolver::dp33dy,1)); + outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dp11dz",fieldsolver::dp11dz,1)); + outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dp22dz",fieldsolver::dp22dz,1)); + outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dp33dz",fieldsolver::dp33dz,1)); + outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dPERBxdy",fieldsolver::dPERBxdy,1)); + outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dBGBxdy",fieldsolver::dBGBxdy,1)); + outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dPERBxdz",fieldsolver::dPERBxdz,1)); + outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dBGBxdz",fieldsolver::dBGBxdz,1)); + outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dPERBydx",fieldsolver::dPERBydx,1)); + outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dBGBydx",fieldsolver::dBGBydx,1)); + outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dPERBydz",fieldsolver::dPERBydz,1)); + outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dBGBydz",fieldsolver::dBGBydz,1)); + outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dPERBzdx",fieldsolver::dPERBzdx,1)); + outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dBGBzdx",fieldsolver::dBGBzdx,1)); + outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dPERBzdy",fieldsolver::dPERBzdy,1)); + outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dBGBzdy",fieldsolver::dBGBzdy,1)); + if(Parameters::ohmHallTerm == 2) { + outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dPERBxdyy",fieldsolver::dPERBxdyy,1)); + outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dPERBxdzz",fieldsolver::dPERBxdzz,1)); + outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dPERBydxx",fieldsolver::dPERBydxx,1)); + outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dPERBydzz",fieldsolver::dPERBydzz,1)); + outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dPERBzdxx",fieldsolver::dPERBzdxx,1)); + outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dPERBzdyy",fieldsolver::dPERBzdyy,1)); + outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dPERBxdyz",fieldsolver::dPERBxdyz,1)); + outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dPERBydxz",fieldsolver::dPERBydxz,1)); + outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dPERBzdxy",fieldsolver::dPERBzdxy,1)); + } + outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dVxdx",fieldsolver::dVxdx,1)); + outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dVxdy",fieldsolver::dVxdy,1)); + outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dVxdz",fieldsolver::dVxdz,1)); + outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dVydx",fieldsolver::dVydx,1)); + outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dVydy",fieldsolver::dVydy,1)); + outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dVydz",fieldsolver::dVydz,1)); + outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dVzdx",fieldsolver::dVzdx,1)); + outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dVzdy",fieldsolver::dVzdy,1)); + outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dVzdz",fieldsolver::dVzdz,1)); + continue; + } + if(*it == "BVOLderivs") { + // Volume-averaged derivatives + outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dPERBXVOLdy",bvolderivatives::dPERBXVOLdy,1)); + outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dBGBXVOLdy",bvolderivatives::dBGBXVOLdy,1)); + outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dPERBXVOLdz",bvolderivatives::dPERBXVOLdz,1)); + outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dBGBXVOLdz",bvolderivatives::dBGBXVOLdz,1)); + outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dPERBYVOLdx",bvolderivatives::dPERBYVOLdx,1)); + outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dBGBYVOLdx",bvolderivatives::dBGBYVOLdx,1)); + outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dPERBYVOLdz",bvolderivatives::dPERBYVOLdz,1)); + outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dBGBYVOLdz",bvolderivatives::dBGBYVOLdz,1)); + outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dPERBZVOLdx",bvolderivatives::dPERBZVOLdx,1)); + outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dBGBZVOLdx",bvolderivatives::dBGBZVOLdx,1)); + outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dPERBZVOLdy",bvolderivatives::dPERBZVOLdy,1)); + outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dBGBZVOLdy",bvolderivatives::dBGBZVOLdy,1)); + continue; + } if(*it == "GridCoordinates") { // Spatial coordinates for each cell outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("X",CellParams::XCRD,1)); @@ -766,16 +766,16 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti for (it = P::diagnosticVariableList.begin(); it != P::diagnosticVariableList.end(); it++) { - // if(*it == "FluxB") { - // // Overall magnetic flux through the simulation plane - // diagnosticReducer->addOperator(new DRO::DiagnosticFluxB); - // continue; - // } - // if(*it == "FluxE") { - // // Overall electric flux through the simulation plane - // diagnosticReducer->addOperator(new DRO::DiagnosticFluxE); - // continue; - // } + if(*it == "FluxB") { + // Overall magnetic flux through the simulation plane + diagnosticReducer->addOperator(new DRO::DiagnosticFluxB); + continue; + } + if(*it == "FluxE") { + // Overall electric flux through the simulation plane + diagnosticReducer->addOperator(new DRO::DiagnosticFluxE); + continue; + } if (*it == "populations_Blocks") { // Per-population total block counts for(unsigned int i =0; i < getObjectWrapper().particleSpecies.size(); i++) { diff --git a/datareduction/datareductionoperator.cpp b/datareduction/datareductionoperator.cpp index 074ec17e4..ca5710877 100644 --- a/datareduction/datareductionoperator.cpp +++ b/datareduction/datareductionoperator.cpp @@ -160,14 +160,15 @@ namespace DRO { return true; } - // DataReductionOperatorDerivatives::DataReductionOperatorDerivatives(const std::string& name,const unsigned int parameterIndex,const unsigned int vectorSize): - // DataReductionOperatorCellParams(name,parameterIndex,vectorSize) { - // } - // //a version with derivatives, this is the only function that is different - // bool DataReductionOperatorDerivatives::setSpatialCell(const SpatialCell* cell) { - // data = &(cell->derivatives[_parameterIndex]); - // return true; - // } + DataReductionOperatorDerivatives::DataReductionOperatorDerivatives(const std::string& name,const unsigned int parameterIndex,const unsigned int vectorSize): + DataReductionOperatorCellParams(name,parameterIndex,vectorSize) { + + } + //a version with derivatives, this is the only function that is different + bool DataReductionOperatorDerivatives::setSpatialCell(const SpatialCell* cell) { + data = &(cell->derivatives[_parameterIndex]); + return true; + } DataReductionOperatorBVOLDerivatives::DataReductionOperatorBVOLDerivatives(const std::string& name,const unsigned int parameterIndex,const unsigned int vectorSize): diff --git a/fieldsolver/gridGlue.cpp b/fieldsolver/gridGlue.cpp index 4c59f022a..41b91a78a 100644 --- a/fieldsolver/gridGlue.cpp +++ b/fieldsolver/gridGlue.cpp @@ -370,247 +370,247 @@ void getFieldsFromFsGrid( } -// void getBgFieldsAndDerivativesFromFsGrid( -// FsGrid< std::array, 2>& BgBGrid, -// FsGrid< fsgrids::technical, 2>& technicalGrid, -// dccrg::Dccrg& mpiGrid, -// const std::vector& cells -// ) { -// // Setup transfer buffers -// cint nCellsOnMaxRefLvl = getNumberOfCellsOnMaxRefLvl(mpiGrid, cells); -// std::vector< std::array > transferBufferBGB(nCellsOnMaxRefLvl); -// std::vector< std::array*> transferBufferPointerBGB; -// std::vector< fsgrids::technical > transferBufferTechnical(nCellsOnMaxRefLvl); -// std::vector< fsgrids::technical*> transferBufferPointerTechnical; +void getBgFieldsAndDerivativesFromFsGrid( + FsGrid< std::array, 2>& BgBGrid, + FsGrid< fsgrids::technical, 2>& technicalGrid, + dccrg::Dccrg& mpiGrid, + const std::vector& cells +) { + // Setup transfer buffers + cint nCellsOnMaxRefLvl = getNumberOfCellsOnMaxRefLvl(mpiGrid, cells); + std::vector< std::array > transferBufferBGB(nCellsOnMaxRefLvl); + std::vector< std::array*> transferBufferPointerBGB; + std::vector< fsgrids::technical > transferBufferTechnical(nCellsOnMaxRefLvl); + std::vector< fsgrids::technical*> transferBufferPointerTechnical; -// // Setup transfer pointers -// BgBGrid.setupForTransferOut(nCellsOnMaxRefLvl); -// technicalGrid.setupForTransferOut(nCellsOnMaxRefLvl); -// int k = 0; -// for(auto dccrgId : cells) { -// const auto fsgridIds = mapDccrgIdToFsGridGlobalID(mpiGrid, dccrgId); -// // Store a pointer to the first fsgrid cell that maps to each dccrg Id -// transferBufferPointerBGB.push_back(&transferBufferBGB[k]); -// transferBufferPointerTechnical.push_back(&transferBufferTechnical[k]); -// for (auto fsgridId : fsgridIds) { -// std::array* thisCellData = &transferBufferBGB[k]; -// BgBGrid.transferDataOut(fsgridId, thisCellData); -// fsgrids::technical* thisCellDataTechnical = &transferBufferTechnical[k]; -// technicalGrid.transferDataOut(fsgridId, thisCellDataTechnical); -// k++; -// } -// } -// // Do the transfer -// BgBGrid.finishTransfersOut(); -// technicalGrid.finishTransfersOut(); + // Setup transfer pointers + BgBGrid.setupForTransferOut(nCellsOnMaxRefLvl); + technicalGrid.setupForTransferOut(nCellsOnMaxRefLvl); + int k = 0; + for(auto dccrgId : cells) { + const auto fsgridIds = mapDccrgIdToFsGridGlobalID(mpiGrid, dccrgId); + // Store a pointer to the first fsgrid cell that maps to each dccrg Id + transferBufferPointerBGB.push_back(&transferBufferBGB[k]); + transferBufferPointerTechnical.push_back(&transferBufferTechnical[k]); + for (auto fsgridId : fsgridIds) { + std::array* thisCellData = &transferBufferBGB[k]; + BgBGrid.transferDataOut(fsgridId, thisCellData); + fsgrids::technical* thisCellDataTechnical = &transferBufferTechnical[k]; + technicalGrid.transferDataOut(fsgridId, thisCellDataTechnical); + k++; + } + } + // Do the transfer + BgBGrid.finishTransfersOut(); + technicalGrid.finishTransfersOut(); -// // Build lists of index pairs to dccrg and fsgrid -// std::vector> iCellParams; -// iCellParams.reserve(6); -// iCellParams.push_back(std::make_pair(CellParams::BGBX, fsgrids::bgbfield::BGBX)); -// iCellParams.push_back(std::make_pair(CellParams::BGBY, fsgrids::bgbfield::BGBY)); -// iCellParams.push_back(std::make_pair(CellParams::BGBZ, fsgrids::bgbfield::BGBZ)); -// iCellParams.push_back(std::make_pair(CellParams::BGBXVOL, fsgrids::bgbfield::BGBXVOL)); -// iCellParams.push_back(std::make_pair(CellParams::BGBYVOL, fsgrids::bgbfield::BGBYVOL)); -// iCellParams.push_back(std::make_pair(CellParams::BGBZVOL, fsgrids::bgbfield::BGBZVOL)); -// std::vector> iDerivatives; -// iDerivatives.reserve(6); -// iDerivatives.push_back(std::make_pair(fieldsolver::dBGBxdy, fsgrids::bgbfield::dBGBxdy)); -// iDerivatives.push_back(std::make_pair(fieldsolver::dBGBxdz, fsgrids::bgbfield::dBGBxdz)); -// iDerivatives.push_back(std::make_pair(fieldsolver::dBGBydx, fsgrids::bgbfield::dBGBydx)); -// iDerivatives.push_back(std::make_pair(fieldsolver::dBGBydz, fsgrids::bgbfield::dBGBydz)); -// iDerivatives.push_back(std::make_pair(fieldsolver::dBGBzdx, fsgrids::bgbfield::dBGBzdx)); -// iDerivatives.push_back(std::make_pair(fieldsolver::dBGBzdy, fsgrids::bgbfield::dBGBzdy)); -// std::vector> iDerivativesBVOL; -// iDerivativesBVOL.reserve(6); -// iDerivativesBVOL.push_back(std::make_pair(bvolderivatives::dBGBXVOLdy, fsgrids::bgbfield::dBGBXVOLdy)); -// iDerivativesBVOL.push_back(std::make_pair(bvolderivatives::dBGBXVOLdz, fsgrids::bgbfield::dBGBXVOLdz)); -// iDerivativesBVOL.push_back(std::make_pair(bvolderivatives::dBGBYVOLdx, fsgrids::bgbfield::dBGBYVOLdx)); -// iDerivativesBVOL.push_back(std::make_pair(bvolderivatives::dBGBYVOLdz, fsgrids::bgbfield::dBGBYVOLdz)); -// iDerivativesBVOL.push_back(std::make_pair(bvolderivatives::dBGBZVOLdx, fsgrids::bgbfield::dBGBZVOLdx)); -// iDerivativesBVOL.push_back(std::make_pair(bvolderivatives::dBGBZVOLdy, fsgrids::bgbfield::dBGBZVOLdy)); + // Build lists of index pairs to dccrg and fsgrid + std::vector> iCellParams; + iCellParams.reserve(6); + iCellParams.push_back(std::make_pair(CellParams::BGBX, fsgrids::bgbfield::BGBX)); + iCellParams.push_back(std::make_pair(CellParams::BGBY, fsgrids::bgbfield::BGBY)); + iCellParams.push_back(std::make_pair(CellParams::BGBZ, fsgrids::bgbfield::BGBZ)); + iCellParams.push_back(std::make_pair(CellParams::BGBXVOL, fsgrids::bgbfield::BGBXVOL)); + iCellParams.push_back(std::make_pair(CellParams::BGBYVOL, fsgrids::bgbfield::BGBYVOL)); + iCellParams.push_back(std::make_pair(CellParams::BGBZVOL, fsgrids::bgbfield::BGBZVOL)); + std::vector> iDerivatives; + iDerivatives.reserve(6); + iDerivatives.push_back(std::make_pair(fieldsolver::dBGBxdy, fsgrids::bgbfield::dBGBxdy)); + iDerivatives.push_back(std::make_pair(fieldsolver::dBGBxdz, fsgrids::bgbfield::dBGBxdz)); + iDerivatives.push_back(std::make_pair(fieldsolver::dBGBydx, fsgrids::bgbfield::dBGBydx)); + iDerivatives.push_back(std::make_pair(fieldsolver::dBGBydz, fsgrids::bgbfield::dBGBydz)); + iDerivatives.push_back(std::make_pair(fieldsolver::dBGBzdx, fsgrids::bgbfield::dBGBzdx)); + iDerivatives.push_back(std::make_pair(fieldsolver::dBGBzdy, fsgrids::bgbfield::dBGBzdy)); + std::vector> iDerivativesBVOL; + iDerivativesBVOL.reserve(6); + iDerivativesBVOL.push_back(std::make_pair(bvolderivatives::dBGBXVOLdy, fsgrids::bgbfield::dBGBXVOLdy)); + iDerivativesBVOL.push_back(std::make_pair(bvolderivatives::dBGBXVOLdz, fsgrids::bgbfield::dBGBXVOLdz)); + iDerivativesBVOL.push_back(std::make_pair(bvolderivatives::dBGBYVOLdx, fsgrids::bgbfield::dBGBYVOLdx)); + iDerivativesBVOL.push_back(std::make_pair(bvolderivatives::dBGBYVOLdz, fsgrids::bgbfield::dBGBYVOLdz)); + iDerivativesBVOL.push_back(std::make_pair(bvolderivatives::dBGBZVOLdx, fsgrids::bgbfield::dBGBZVOLdx)); + iDerivativesBVOL.push_back(std::make_pair(bvolderivatives::dBGBZVOLdy, fsgrids::bgbfield::dBGBZVOLdy)); -// // Distribute data from the transfer buffer back into the appropriate mpiGrid places -// // Disregard DO_NOT_COMPUTE cells -// #pragma omp parallel for -// for(uint i = 0; i < cells.size(); ++i) { + // Distribute data from the transfer buffer back into the appropriate mpiGrid places + // Disregard DO_NOT_COMPUTE cells + #pragma omp parallel for + for(uint i = 0; i < cells.size(); ++i) { -// const CellID dccrgId = cells[i]; -// auto cellParams = mpiGrid[dccrgId]->get_cell_parameters(); + const CellID dccrgId = cells[i]; + auto cellParams = mpiGrid[dccrgId]->get_cell_parameters(); -// // Calculate the number of fsgrid cells we loop through -// cint nCells = pow(pow(2,mpiGrid.mapping.get_maximum_refinement_level() - mpiGrid.mapping.get_refinement_level(dccrgId)),3); -// // Count the number of fsgrid cells we need to average into the current dccrg cell -// int nCellsToSum = 0; + // Calculate the number of fsgrid cells we loop through + cint nCells = pow(pow(2,mpiGrid.mapping.get_maximum_refinement_level() - mpiGrid.mapping.get_refinement_level(dccrgId)),3); + // Count the number of fsgrid cells we need to average into the current dccrg cell + int nCellsToSum = 0; -// // TODO: Could optimize here by adding a separate branch for nCells == 1 with direct assignment of the value -// // Could also do the average in a temporary value and only access grid structure once. + // TODO: Could optimize here by adding a separate branch for nCells == 1 with direct assignment of the value + // Could also do the average in a temporary value and only access grid structure once. -// // Initialize values to 0 -// for (auto j : iCellParams) cellParams[j.first] = 0.0; -// for (auto j : iDerivatives) mpiGrid[dccrgId]->derivatives[j.first] = 0.0; -// for (auto j : iDerivativesBVOL) mpiGrid[dccrgId]->derivativesBVOL[j.first] = 0.0; + // Initialize values to 0 + for (auto j : iCellParams) cellParams[j.first] = 0.0; + for (auto j : iDerivatives) mpiGrid[dccrgId]->derivatives[j.first] = 0.0; + for (auto j : iDerivativesBVOL) mpiGrid[dccrgId]->derivativesBVOL[j.first] = 0.0; -// for(int iCell = 0; iCell < nCells; ++iCell) { -// // The fsgrid cells that cover the i'th dccrg cell are pointed at by -// // transferBufferPointer[i] ... transferBufferPointer[i] + nCell. -// // We want to average over those who are not DO_NOT_COMPUTE to get the value for the dccrg cell -// if ((transferBufferPointerTechnical[i] + iCell)->sysBoundaryFlag == sysboundarytype::DO_NOT_COMPUTE) { -// continue; -// } else { -// nCellsToSum++; + for(int iCell = 0; iCell < nCells; ++iCell) { + // The fsgrid cells that cover the i'th dccrg cell are pointed at by + // transferBufferPointer[i] ... transferBufferPointer[i] + nCell. + // We want to average over those who are not DO_NOT_COMPUTE to get the value for the dccrg cell + if ((transferBufferPointerTechnical[i] + iCell)->sysBoundaryFlag == sysboundarytype::DO_NOT_COMPUTE) { + continue; + } else { + nCellsToSum++; -// std::array* thisCellData = transferBufferPointerBGB[i] + iCell; + std::array* thisCellData = transferBufferPointerBGB[i] + iCell; -// for (auto j : iCellParams) cellParams[j.first] += thisCellData->at(j.second); -// for (auto j : iDerivatives) mpiGrid[dccrgId]->derivatives[j.first] += thisCellData->at(j.second); -// for (auto j : iDerivativesBVOL) mpiGrid[dccrgId]->derivativesBVOL[j.first] += thisCellData->at(j.second); -// } -// } + for (auto j : iCellParams) cellParams[j.first] += thisCellData->at(j.second); + for (auto j : iDerivatives) mpiGrid[dccrgId]->derivatives[j.first] += thisCellData->at(j.second); + for (auto j : iDerivativesBVOL) mpiGrid[dccrgId]->derivativesBVOL[j.first] += thisCellData->at(j.second); + } + } -// if (nCellsToSum > 0) { -// // Divide by the number of cells to get the average -// for (auto j : iCellParams) cellParams[j.first] /= nCellsToSum; -// for (auto j : iDerivatives) mpiGrid[dccrgId]->derivatives[j.first] /= nCellsToSum; -// for (auto j : iDerivativesBVOL) mpiGrid[dccrgId]->derivativesBVOL[j.first] /= nCellsToSum; -// } -// } -// } - - -// void getDerivativesFromFsGrid( -// FsGrid< std::array, 2>& dperbGrid, -// FsGrid< std::array, 2>& dmomentsGrid, -// FsGrid< fsgrids::technical, 2>& technicalGrid, -// dccrg::Dccrg& mpiGrid, -// const std::vector& cells -// ) { - -// // Setup transfer buffers -// cint nCellsOnMaxRefLvl = getNumberOfCellsOnMaxRefLvl(mpiGrid, cells); -// std::vector< std::array > dperbTransferBuffer(nCellsOnMaxRefLvl); -// std::vector< std::array > dmomentsTransferBuffer(nCellsOnMaxRefLvl); + if (nCellsToSum > 0) { + // Divide by the number of cells to get the average + for (auto j : iCellParams) cellParams[j.first] /= nCellsToSum; + for (auto j : iDerivatives) mpiGrid[dccrgId]->derivatives[j.first] /= nCellsToSum; + for (auto j : iDerivativesBVOL) mpiGrid[dccrgId]->derivativesBVOL[j.first] /= nCellsToSum; + } + } +} + + +void getDerivativesFromFsGrid( + FsGrid< std::array, 2>& dperbGrid, + FsGrid< std::array, 2>& dmomentsGrid, + FsGrid< fsgrids::technical, 2>& technicalGrid, + dccrg::Dccrg& mpiGrid, + const std::vector& cells +) { + + // Setup transfer buffers + cint nCellsOnMaxRefLvl = getNumberOfCellsOnMaxRefLvl(mpiGrid, cells); + std::vector< std::array > dperbTransferBuffer(nCellsOnMaxRefLvl); + std::vector< std::array > dmomentsTransferBuffer(nCellsOnMaxRefLvl); -// std::vector< std::array*> dperbTransferBufferPointer; -// std::vector< std::array*> dmomentsTransferBufferPointer; + std::vector< std::array*> dperbTransferBufferPointer; + std::vector< std::array*> dmomentsTransferBufferPointer; -// std::vector< fsgrids::technical > transferBufferTechnical(nCellsOnMaxRefLvl); -// std::vector< fsgrids::technical*> transferBufferPointerTechnical; + std::vector< fsgrids::technical > transferBufferTechnical(nCellsOnMaxRefLvl); + std::vector< fsgrids::technical*> transferBufferPointerTechnical; -// dperbGrid.setupForTransferOut(nCellsOnMaxRefLvl); -// dmomentsGrid.setupForTransferOut(nCellsOnMaxRefLvl); -// technicalGrid.setupForTransferOut(nCellsOnMaxRefLvl); + dperbGrid.setupForTransferOut(nCellsOnMaxRefLvl); + dmomentsGrid.setupForTransferOut(nCellsOnMaxRefLvl); + technicalGrid.setupForTransferOut(nCellsOnMaxRefLvl); -// int k = 0; -// for (auto dccrgId : cells) { + int k = 0; + for (auto dccrgId : cells) { -// // Assuming same local size in all fsgrids -// const auto fsgridIds = mapDccrgIdToFsGridGlobalID(mpiGrid, dccrgId); -// // Store a pointer to the first fsgrid cell that maps to each dccrg Id -// dperbTransferBufferPointer.push_back(&dperbTransferBuffer[k]); -// dmomentsTransferBufferPointer.push_back(&dmomentsTransferBuffer[k]); -// transferBufferPointerTechnical.push_back(&transferBufferTechnical[k]); + // Assuming same local size in all fsgrids + const auto fsgridIds = mapDccrgIdToFsGridGlobalID(mpiGrid, dccrgId); + // Store a pointer to the first fsgrid cell that maps to each dccrg Id + dperbTransferBufferPointer.push_back(&dperbTransferBuffer[k]); + dmomentsTransferBufferPointer.push_back(&dmomentsTransferBuffer[k]); + transferBufferPointerTechnical.push_back(&transferBufferTechnical[k]); -// for (auto fsgridId : fsgridIds) { + for (auto fsgridId : fsgridIds) { -// std::array* dperbCellData = &dperbTransferBuffer[k]; -// dperbGrid.transferDataOut(fsgridId, dperbCellData); -// std::array* dmomentsCellData = &dmomentsTransferBuffer[k]; -// dmomentsGrid.transferDataOut(fsgridId, dmomentsCellData); -// fsgrids::technical* thisCellDataTechnical = &transferBufferTechnical[k]; -// technicalGrid.transferDataOut(fsgridId, thisCellDataTechnical); -// k++; -// } -// } + std::array* dperbCellData = &dperbTransferBuffer[k]; + dperbGrid.transferDataOut(fsgridId, dperbCellData); + std::array* dmomentsCellData = &dmomentsTransferBuffer[k]; + dmomentsGrid.transferDataOut(fsgridId, dmomentsCellData); + fsgrids::technical* thisCellDataTechnical = &transferBufferTechnical[k]; + technicalGrid.transferDataOut(fsgridId, thisCellDataTechnical); + k++; + } + } -// // Do the transfer -// dperbGrid.finishTransfersOut(); -// dmomentsGrid.finishTransfersOut(); -// technicalGrid.finishTransfersOut(); - -// std::vector> iDmoments; -// std::vector> iDperb; -// iDmoments.reserve(24); -// iDmoments.push_back(std::make_pair(fieldsolver::drhomdx, fsgrids::dmoments::drhomdx)); -// iDmoments.push_back(std::make_pair(fieldsolver::drhomdy, fsgrids::dmoments::drhomdy)); -// iDmoments.push_back(std::make_pair(fieldsolver::drhomdz, fsgrids::dmoments::drhomdz)); -// iDmoments.push_back(std::make_pair(fieldsolver::drhoqdx, fsgrids::dmoments::drhoqdx)); -// iDmoments.push_back(std::make_pair(fieldsolver::drhoqdy, fsgrids::dmoments::drhoqdy)); -// iDmoments.push_back(std::make_pair(fieldsolver::drhoqdz, fsgrids::dmoments::drhoqdz)); -// iDmoments.push_back(std::make_pair(fieldsolver::dp11dx , fsgrids::dmoments::dp11dx )); -// iDmoments.push_back(std::make_pair(fieldsolver::dp11dy , fsgrids::dmoments::dp11dy )); -// iDmoments.push_back(std::make_pair(fieldsolver::dp11dz , fsgrids::dmoments::dp11dz )); -// iDmoments.push_back(std::make_pair(fieldsolver::dp22dx , fsgrids::dmoments::dp22dx )); -// iDmoments.push_back(std::make_pair(fieldsolver::dp22dy , fsgrids::dmoments::dp22dy )); -// iDmoments.push_back(std::make_pair(fieldsolver::dp22dz , fsgrids::dmoments::dp22dz )); -// iDmoments.push_back(std::make_pair(fieldsolver::dp33dx , fsgrids::dmoments::dp33dx )); -// iDmoments.push_back(std::make_pair(fieldsolver::dp33dy , fsgrids::dmoments::dp33dy )); -// iDmoments.push_back(std::make_pair(fieldsolver::dp33dz , fsgrids::dmoments::dp33dz )); -// iDmoments.push_back(std::make_pair(fieldsolver::dVxdx , fsgrids::dmoments::dVxdx )); -// iDmoments.push_back(std::make_pair(fieldsolver::dVxdy , fsgrids::dmoments::dVxdy )); -// iDmoments.push_back(std::make_pair(fieldsolver::dVxdz , fsgrids::dmoments::dVxdz )); -// iDmoments.push_back(std::make_pair(fieldsolver::dVydx , fsgrids::dmoments::dVydx )); -// iDmoments.push_back(std::make_pair(fieldsolver::dVydy , fsgrids::dmoments::dVydy )); -// iDmoments.push_back(std::make_pair(fieldsolver::dVydz , fsgrids::dmoments::dVydz )); -// iDmoments.push_back(std::make_pair(fieldsolver::dVzdx , fsgrids::dmoments::dVzdx )); -// iDmoments.push_back(std::make_pair(fieldsolver::dVzdy , fsgrids::dmoments::dVzdy )); -// iDmoments.push_back(std::make_pair(fieldsolver::dVzdz , fsgrids::dmoments::dVzdz )); - -// iDperb.reserve(15); -// iDperb.push_back(std::make_pair(fieldsolver::dPERBxdy , fsgrids::dperb::dPERBxdy )); -// iDperb.push_back(std::make_pair(fieldsolver::dPERBxdz , fsgrids::dperb::dPERBxdz )); -// iDperb.push_back(std::make_pair(fieldsolver::dPERBydx , fsgrids::dperb::dPERBydx )); -// iDperb.push_back(std::make_pair(fieldsolver::dPERBydz , fsgrids::dperb::dPERBydz )); -// iDperb.push_back(std::make_pair(fieldsolver::dPERBzdx , fsgrids::dperb::dPERBzdx )); -// iDperb.push_back(std::make_pair(fieldsolver::dPERBzdy , fsgrids::dperb::dPERBzdy )); -// iDperb.push_back(std::make_pair(fieldsolver::dPERBxdyy, fsgrids::dperb::dPERBxdyy)); -// iDperb.push_back(std::make_pair(fieldsolver::dPERBxdzz, fsgrids::dperb::dPERBxdzz)); -// iDperb.push_back(std::make_pair(fieldsolver::dPERBydxx, fsgrids::dperb::dPERBydxx)); -// iDperb.push_back(std::make_pair(fieldsolver::dPERBydzz, fsgrids::dperb::dPERBydzz)); -// iDperb.push_back(std::make_pair(fieldsolver::dPERBzdxx, fsgrids::dperb::dPERBzdxx)); -// iDperb.push_back(std::make_pair(fieldsolver::dPERBzdyy, fsgrids::dperb::dPERBzdyy)); -// iDperb.push_back(std::make_pair(fieldsolver::dPERBxdyz, fsgrids::dperb::dPERBxdyz)); -// iDperb.push_back(std::make_pair(fieldsolver::dPERBydxz, fsgrids::dperb::dPERBydxz)); -// iDperb.push_back(std::make_pair(fieldsolver::dPERBzdxy, fsgrids::dperb::dPERBzdxy)); + // Do the transfer + dperbGrid.finishTransfersOut(); + dmomentsGrid.finishTransfersOut(); + technicalGrid.finishTransfersOut(); + + std::vector> iDmoments; + std::vector> iDperb; + iDmoments.reserve(24); + iDmoments.push_back(std::make_pair(fieldsolver::drhomdx, fsgrids::dmoments::drhomdx)); + iDmoments.push_back(std::make_pair(fieldsolver::drhomdy, fsgrids::dmoments::drhomdy)); + iDmoments.push_back(std::make_pair(fieldsolver::drhomdz, fsgrids::dmoments::drhomdz)); + iDmoments.push_back(std::make_pair(fieldsolver::drhoqdx, fsgrids::dmoments::drhoqdx)); + iDmoments.push_back(std::make_pair(fieldsolver::drhoqdy, fsgrids::dmoments::drhoqdy)); + iDmoments.push_back(std::make_pair(fieldsolver::drhoqdz, fsgrids::dmoments::drhoqdz)); + iDmoments.push_back(std::make_pair(fieldsolver::dp11dx , fsgrids::dmoments::dp11dx )); + iDmoments.push_back(std::make_pair(fieldsolver::dp11dy , fsgrids::dmoments::dp11dy )); + iDmoments.push_back(std::make_pair(fieldsolver::dp11dz , fsgrids::dmoments::dp11dz )); + iDmoments.push_back(std::make_pair(fieldsolver::dp22dx , fsgrids::dmoments::dp22dx )); + iDmoments.push_back(std::make_pair(fieldsolver::dp22dy , fsgrids::dmoments::dp22dy )); + iDmoments.push_back(std::make_pair(fieldsolver::dp22dz , fsgrids::dmoments::dp22dz )); + iDmoments.push_back(std::make_pair(fieldsolver::dp33dx , fsgrids::dmoments::dp33dx )); + iDmoments.push_back(std::make_pair(fieldsolver::dp33dy , fsgrids::dmoments::dp33dy )); + iDmoments.push_back(std::make_pair(fieldsolver::dp33dz , fsgrids::dmoments::dp33dz )); + iDmoments.push_back(std::make_pair(fieldsolver::dVxdx , fsgrids::dmoments::dVxdx )); + iDmoments.push_back(std::make_pair(fieldsolver::dVxdy , fsgrids::dmoments::dVxdy )); + iDmoments.push_back(std::make_pair(fieldsolver::dVxdz , fsgrids::dmoments::dVxdz )); + iDmoments.push_back(std::make_pair(fieldsolver::dVydx , fsgrids::dmoments::dVydx )); + iDmoments.push_back(std::make_pair(fieldsolver::dVydy , fsgrids::dmoments::dVydy )); + iDmoments.push_back(std::make_pair(fieldsolver::dVydz , fsgrids::dmoments::dVydz )); + iDmoments.push_back(std::make_pair(fieldsolver::dVzdx , fsgrids::dmoments::dVzdx )); + iDmoments.push_back(std::make_pair(fieldsolver::dVzdy , fsgrids::dmoments::dVzdy )); + iDmoments.push_back(std::make_pair(fieldsolver::dVzdz , fsgrids::dmoments::dVzdz )); + + iDperb.reserve(15); + iDperb.push_back(std::make_pair(fieldsolver::dPERBxdy , fsgrids::dperb::dPERBxdy )); + iDperb.push_back(std::make_pair(fieldsolver::dPERBxdz , fsgrids::dperb::dPERBxdz )); + iDperb.push_back(std::make_pair(fieldsolver::dPERBydx , fsgrids::dperb::dPERBydx )); + iDperb.push_back(std::make_pair(fieldsolver::dPERBydz , fsgrids::dperb::dPERBydz )); + iDperb.push_back(std::make_pair(fieldsolver::dPERBzdx , fsgrids::dperb::dPERBzdx )); + iDperb.push_back(std::make_pair(fieldsolver::dPERBzdy , fsgrids::dperb::dPERBzdy )); + iDperb.push_back(std::make_pair(fieldsolver::dPERBxdyy, fsgrids::dperb::dPERBxdyy)); + iDperb.push_back(std::make_pair(fieldsolver::dPERBxdzz, fsgrids::dperb::dPERBxdzz)); + iDperb.push_back(std::make_pair(fieldsolver::dPERBydxx, fsgrids::dperb::dPERBydxx)); + iDperb.push_back(std::make_pair(fieldsolver::dPERBydzz, fsgrids::dperb::dPERBydzz)); + iDperb.push_back(std::make_pair(fieldsolver::dPERBzdxx, fsgrids::dperb::dPERBzdxx)); + iDperb.push_back(std::make_pair(fieldsolver::dPERBzdyy, fsgrids::dperb::dPERBzdyy)); + iDperb.push_back(std::make_pair(fieldsolver::dPERBxdyz, fsgrids::dperb::dPERBxdyz)); + iDperb.push_back(std::make_pair(fieldsolver::dPERBydxz, fsgrids::dperb::dPERBydxz)); + iDperb.push_back(std::make_pair(fieldsolver::dPERBzdxy, fsgrids::dperb::dPERBzdxy)); -// // Distribute data from the transfer buffers back into the appropriate mpiGrid places -// // Disregard DO_NOT_COMPUTE cells -// #pragma omp parallel for -// for(uint i = 0; i < cells.size(); ++i) { + // Distribute data from the transfer buffers back into the appropriate mpiGrid places + // Disregard DO_NOT_COMPUTE cells + #pragma omp parallel for + for(uint i = 0; i < cells.size(); ++i) { -// const CellID dccrgId = cells[i]; + const CellID dccrgId = cells[i]; -// // Calculate the number of fsgrid cells we loop through -// cint nCells = pow(pow(2,mpiGrid.mapping.get_maximum_refinement_level() - mpiGrid.mapping.get_refinement_level(dccrgId)),3); -// // Count the number of fsgrid cells we need to average into the current dccrg cell -// int nCellsToSum = 0; + // Calculate the number of fsgrid cells we loop through + cint nCells = pow(pow(2,mpiGrid.mapping.get_maximum_refinement_level() - mpiGrid.mapping.get_refinement_level(dccrgId)),3); + // Count the number of fsgrid cells we need to average into the current dccrg cell + int nCellsToSum = 0; -// for (auto j : iDmoments) mpiGrid[dccrgId]->derivatives[j.first] = 0.0; -// for (auto j : iDperb ) mpiGrid[dccrgId]->derivatives[j.first] = 0.0; + for (auto j : iDmoments) mpiGrid[dccrgId]->derivatives[j.first] = 0.0; + for (auto j : iDperb ) mpiGrid[dccrgId]->derivatives[j.first] = 0.0; -// for(int iCell = 0; iCell < nCells; ++iCell) { -// // The fsgrid cells that cover the i'th dccrg cell are pointed at by -// // transferBufferPointer[i] ... transferBufferPointer[i] + nCell. -// // We want to average over those who are not DO_NOT_COMPUTE to get the value for the dccrg cell -// if ((transferBufferPointerTechnical[i] + iCell)->sysBoundaryFlag == sysboundarytype::DO_NOT_COMPUTE) { -// continue; -// } else { -// nCellsToSum++; + for(int iCell = 0; iCell < nCells; ++iCell) { + // The fsgrid cells that cover the i'th dccrg cell are pointed at by + // transferBufferPointer[i] ... transferBufferPointer[i] + nCell. + // We want to average over those who are not DO_NOT_COMPUTE to get the value for the dccrg cell + if ((transferBufferPointerTechnical[i] + iCell)->sysBoundaryFlag == sysboundarytype::DO_NOT_COMPUTE) { + continue; + } else { + nCellsToSum++; -// std::array* dperb = dperbTransferBufferPointer[i] + iCell; -// std::array* dmoments = dmomentsTransferBufferPointer[i] + iCell; + std::array* dperb = dperbTransferBufferPointer[i] + iCell; + std::array* dmoments = dmomentsTransferBufferPointer[i] + iCell; -// for (auto j : iDmoments) mpiGrid[dccrgId]->derivatives[j.first] += dmoments->at(j.second); -// for (auto j : iDperb ) mpiGrid[dccrgId]->derivatives[j.first] += dperb ->at(j.second); -// } -// } + for (auto j : iDmoments) mpiGrid[dccrgId]->derivatives[j.first] += dmoments->at(j.second); + for (auto j : iDperb ) mpiGrid[dccrgId]->derivatives[j.first] += dperb ->at(j.second); + } + } -// if (nCellsToSum > 0) { -// for (auto j : iDmoments) mpiGrid[dccrgId]->derivatives[j.first] /= nCellsToSum; -// for (auto j : iDperb ) mpiGrid[dccrgId]->derivatives[j.first] /= nCellsToSum; -// } -// } -// } + if (nCellsToSum > 0) { + for (auto j : iDmoments) mpiGrid[dccrgId]->derivatives[j.first] /= nCellsToSum; + for (auto j : iDperb ) mpiGrid[dccrgId]->derivatives[j.first] /= nCellsToSum; + } + } +} /* diff --git a/grid.cpp b/grid.cpp index f3ac02a8f..ef16201c7 100644 --- a/grid.cpp +++ b/grid.cpp @@ -288,24 +288,24 @@ void initializeGrids( phiprof::stop("Init moments"); } - // phiprof::start("Initial fsgrid coupling"); - // // Couple FSGrids to mpiGrid. Note that the coupling information is shared - // // between them. - // technicalGrid.setupForGridCoupling(cells.size()); + phiprof::start("Initial fsgrid coupling"); + // Couple FSGrids to mpiGrid. Note that the coupling information is shared + // between them. + technicalGrid.setupForGridCoupling(cells.size()); - // // Each dccrg cell may have to communicate with multiple fsgrid cells, if they are on a lower refinement level. - // // Calculate the corresponding fsgrid ids for each dccrg cell and set coupling for each fsgrid id. - // for(auto& dccrgId : cells) { - // const auto fsgridIds = mapDccrgIdToFsGridGlobalID(mpiGrid, dccrgId); + // Each dccrg cell may have to communicate with multiple fsgrid cells, if they are on a lower refinement level. + // Calculate the corresponding fsgrid ids for each dccrg cell and set coupling for each fsgrid id. + for(auto& dccrgId : cells) { + const auto fsgridIds = mapDccrgIdToFsGridGlobalID(mpiGrid, dccrgId); - // for (auto fsgridId : fsgridIds) { + for (auto fsgridId : fsgridIds) { - // technicalGrid.setGridCoupling(fsgridId, myRank); - // } - // } + technicalGrid.setGridCoupling(fsgridId, myRank); + } + } - // technicalGrid.finishGridCoupling(); - // phiprof::stop("Initial fsgrid coupling"); + technicalGrid.finishGridCoupling(); + phiprof::stop("Initial fsgrid coupling"); phiprof::start("setProjectBField"); project.setProjectBField(perBGrid, BgBGrid, technicalGrid); @@ -314,8 +314,8 @@ void initializeGrids( phiprof::stop("setProjectBField"); phiprof::start("Finish fsgrid setup"); - // getFieldDataFromFsGrid(perBGrid, technicalGrid, mpiGrid, cells, CellParams::PERBX); - // getBgFieldsAndDerivativesFromFsGrid(BgBGrid, technicalGrid, mpiGrid, cells); + getFieldDataFromFsGrid(perBGrid, technicalGrid, mpiGrid, cells, CellParams::PERBX); + getBgFieldsAndDerivativesFromFsGrid(BgBGrid, technicalGrid, mpiGrid, cells); // WARNING this means moments and dt2 moments are the same here. feedMomentsIntoFsGrid(mpiGrid, cells, momentsGrid,false); diff --git a/iowrite.cpp b/iowrite.cpp index 9bfd95a8c..b8dd249e7 100644 --- a/iowrite.cpp +++ b/iowrite.cpp @@ -1270,7 +1270,7 @@ bool writeRestart(dccrg::Dccrg& mpiGrid, phiprof::start("reduceddataIO"); //write out DROs we need for restarts DataReducer restartReducer; - //restartReducer.addOperator(new DRO::DataReductionOperatorCellParams("background_B",CellParams::BGBX,3)); + restartReducer.addOperator(new DRO::DataReductionOperatorCellParams("background_B",CellParams::BGBX,3)); restartReducer.addOperator(new DRO::DataReductionOperatorCellParams("perturbed_B",CellParams::PERBX,3)); restartReducer.addOperator(new DRO::DataReductionOperatorCellParams("moments",CellParams::RHOM,5)); restartReducer.addOperator(new DRO::DataReductionOperatorCellParams("moments_dt2",CellParams::RHOM_DT2,5)); @@ -1284,7 +1284,7 @@ bool writeRestart(dccrg::Dccrg& mpiGrid, restartReducer.addOperator(new DRO::DataReductionOperatorCellParams("max_v_dt",CellParams::MAXVDT,1)); restartReducer.addOperator(new DRO::DataReductionOperatorCellParams("max_r_dt",CellParams::MAXRDT,1)); restartReducer.addOperator(new DRO::DataReductionOperatorCellParams("max_fields_dt",CellParams::MAXFDT,1)); - // restartReducer.addOperator(new DRO::DataReductionOperatorDerivatives("derivatives",0,fieldsolver::N_SPATIAL_CELL_DERIVATIVES)); + restartReducer.addOperator(new DRO::DataReductionOperatorDerivatives("derivatives",0,fieldsolver::N_SPATIAL_CELL_DERIVATIVES)); restartReducer.addOperator(new DRO::DataReductionOperatorBVOLDerivatives("Bvolume_derivatives",0,bvolderivatives::N_BVOL_DERIVATIVES)); restartReducer.addOperator(new DRO::MPIrank); restartReducer.addOperator(new DRO::BoundaryType); diff --git a/spatial_cell.cpp b/spatial_cell.cpp index b54652cbe..632478ad0 100644 --- a/spatial_cell.cpp +++ b/spatial_cell.cpp @@ -48,10 +48,10 @@ namespace spatial_cell { this->parameters[i]=0.0; } - // // reset spatial cell derivatives - // for (unsigned int i = 0; i < fieldsolver::N_SPATIAL_CELL_DERIVATIVES; i++) { - // this->derivatives[i]=0; - // } + // reset spatial cell derivatives + for (unsigned int i = 0; i < fieldsolver::N_SPATIAL_CELL_DERIVATIVES; i++) { + this->derivatives[i]=0; + } // reset BVOL derivatives for (unsigned int i = 0; i < bvolderivatives::N_BVOL_DERIVATIVES; i++) { @@ -87,7 +87,7 @@ namespace spatial_cell { mpiTransferEnabled(other.mpiTransferEnabled), populations(other.populations), parameters(other.parameters), - // derivatives(other.derivatives), + derivatives(other.derivatives), derivativesBVOL(other.derivativesBVOL), null_block_data(std::array {}) { } @@ -735,11 +735,11 @@ namespace spatial_cell { block_lengths.push_back(sizeof(Real)); } - // // send spatial cell derivatives - // if ((SpatialCell::mpi_transfer_type & Transfer::CELL_DERIVATIVES)!=0){ - // displacements.push_back((uint8_t*) &(this->derivatives[0]) - (uint8_t*) this); - // block_lengths.push_back(sizeof(Real) * fieldsolver::N_SPATIAL_CELL_DERIVATIVES); - // } + // send spatial cell derivatives + if ((SpatialCell::mpi_transfer_type & Transfer::CELL_DERIVATIVES)!=0){ + displacements.push_back((uint8_t*) &(this->derivatives[0]) - (uint8_t*) this); + block_lengths.push_back(sizeof(Real) * fieldsolver::N_SPATIAL_CELL_DERIVATIVES); + } // send spatial cell BVOL derivatives if ((SpatialCell::mpi_transfer_type & Transfer::CELL_BVOL_DERIVATIVES)!=0){ @@ -752,11 +752,11 @@ namespace spatial_cell { block_lengths.push_back(sizeof(uint64_t)); } - // // send Hall term components - // if ((SpatialCell::mpi_transfer_type & Transfer::CELL_HALL_TERM)!=0){ - // displacements.push_back((uint8_t*) &(this->parameters[CellParams::EXHALL_000_100]) - (uint8_t*) this); - // block_lengths.push_back(sizeof(Real) * 12); - // } + // send Hall term components + if ((SpatialCell::mpi_transfer_type & Transfer::CELL_HALL_TERM)!=0){ + displacements.push_back((uint8_t*) &(this->parameters[CellParams::EXHALL_000_100]) - (uint8_t*) this); + block_lengths.push_back(sizeof(Real) * 12); + } // send electron pressure gradient term components if ((SpatialCell::mpi_transfer_type & Transfer::CELL_GRADPE_TERM)!=0){ displacements.push_back((uint8_t*) &(this->parameters[CellParams::EXGRADPE]) - (uint8_t*) this); diff --git a/spatial_cell.hpp b/spatial_cell.hpp index 388055601..6bc7e00f0 100644 --- a/spatial_cell.hpp +++ b/spatial_cell.hpp @@ -317,7 +317,7 @@ namespace spatial_cell { // Member variables // //Real derivatives[fieldsolver::N_SPATIAL_CELL_DERIVATIVES]; /**< Derivatives of bulk variables in this spatial cell.*/ - //std::array derivatives; /**< Derivatives of bulk variables in this spatial cell.*/ + std::array derivatives; /**< Derivatives of bulk variables in this spatial cell.*/ //Real derivativesBVOL[bvolderivatives::N_BVOL_DERIVATIVES]; /**< Derivatives of BVOL needed by the acceleration. // * Separate array because it does not need to be communicated.*/ std::array derivativesBVOL; /**< Derivatives of BVOL needed by the acceleration. diff --git a/vlasiator.cpp b/vlasiator.cpp index 035df5fd2..4a3c80cec 100644 --- a/vlasiator.cpp +++ b/vlasiator.cpp @@ -547,11 +547,11 @@ int main(int argn,char* args[]) { if (P::writeInitialState) { phiprof::start("write-initial-state"); phiprof::start("fsgrid-coupling-out"); - // getFieldDataFromFsGrid(perBGrid,technicalGrid,mpiGrid,cells,CellParams::PERBX); - // getFieldDataFromFsGrid(EGrid,technicalGrid,mpiGrid,cells,CellParams::EX); - // getFieldDataFromFsGrid(EHallGrid,technicalGrid,mpiGrid,cells,CellParams::EXHALL_000_100); - // getFieldDataFromFsGrid(EGradPeGrid,technicalGrid,mpiGrid,cells,CellParams::EXGRADPE); - // getDerivativesFromFsGrid(dPerBGrid, dMomentsGrid, technicalGrid, mpiGrid, cells); + getFieldDataFromFsGrid(perBGrid,technicalGrid,mpiGrid,cells,CellParams::PERBX); + getFieldDataFromFsGrid(EGrid,technicalGrid,mpiGrid,cells,CellParams::EX); + getFieldDataFromFsGrid(EHallGrid,technicalGrid,mpiGrid,cells,CellParams::EXHALL_000_100); + getFieldDataFromFsGrid(EGradPeGrid,technicalGrid,mpiGrid,cells,CellParams::EXGRADPE); + getDerivativesFromFsGrid(dPerBGrid, dMomentsGrid, technicalGrid, mpiGrid, cells); phiprof::stop("fsgrid-coupling-out"); if (myRank == MASTER_RANK) @@ -735,16 +735,14 @@ int main(int argn,char* args[]) { it != P::diagnosticVariableList.end(); it++) { if (*it == "FluxB") { - if(myRank == MASTER_RANK) cerr << __FILE__ << " " << __LINE__ << "ERROR: Diagnostic output from FsGrid is no longer supported!" << endl; - // phiprof::start("fsgrid-coupling-out"); - // getFieldDataFromFsGrid(perBGrid,technicalGrid,mpiGrid,cells,CellParams::PERBX); - // phiprof::stop("fsgrid-coupling-out"); - } + phiprof::start("fsgrid-coupling-out"); + getFieldDataFromFsGrid(perBGrid,technicalGrid,mpiGrid,cells,CellParams::PERBX); + phiprof::stop("fsgrid-coupling-out"); + } if (*it == "FluxE") { - if(myRank == MASTER_RANK) cerr << __FILE__ << " " << __LINE__ << "ERROR: Diagnostic output from FsGrid is no longer supported!" << endl; - // phiprof::start("fsgrid-coupling-out"); - // getFieldDataFromFsGrid(EGrid,technicalGrid,mpiGrid,cells,CellParams::EX); - // phiprof::stop("fsgrid-coupling-out"); + phiprof::start("fsgrid-coupling-out"); + getFieldDataFromFsGrid(EGrid,technicalGrid,mpiGrid,cells,CellParams::EX); + phiprof::stop("fsgrid-coupling-out"); } } @@ -760,42 +758,42 @@ int main(int argn,char* args[]) { // write system, loop through write classes for (uint i = 0; i < P::systemWriteTimeInterval.size(); i++) { if (P::systemWriteTimeInterval[i] >= 0.0 && - P::t >= P::systemWrites[i] * P::systemWriteTimeInterval[i] - DT_EPSILON) { - // if (extractFsGridFields) { - // vector::const_iterator it; - // for (it = P::outputVariableList.begin(); - // it != P::outputVariableList.end(); - // it++) { - // if (*it == "B" || - // *it == "PerturbedB" - // ) { - // phiprof::start("fsgrid-coupling-out"); - // getFieldDataFromFsGrid(perBGrid,technicalGrid,mpiGrid,cells,CellParams::PERBX); - // phiprof::stop("fsgrid-coupling-out"); - // } - // if (*it == "E") { - // phiprof::start("fsgrid-coupling-out"); - // getFieldDataFromFsGrid(EGrid,technicalGrid,mpiGrid,cells,CellParams::EX); - // phiprof::stop("fsgrid-coupling-out"); - // } - // if (*it == "HallE") { - // phiprof::start("fsgrid-coupling-out"); - // getFieldDataFromFsGrid(EHallGrid,technicalGrid,mpiGrid,cells,CellParams::EXHALL_000_100); - // phiprof::stop("fsgrid-coupling-out"); - // } - // if (*it == "GradPeE") { - // phiprof::start("fsgrid-coupling-out"); - // getFieldDataFromFsGrid(EGradPeGrid,technicalGrid,mpiGrid,cells,CellParams::EXGRADPE); - // phiprof::stop("fsgrid-coupling-out"); - // } - // if (*it == "derivs") { - // phiprof::start("fsgrid-coupling-out"); - // getDerivativesFromFsGrid(dPerBGrid, dMomentsGrid, technicalGrid, mpiGrid, cells); - // phiprof::stop("fsgrid-coupling-out"); - // } - // } - // extractFsGridFields = false; - // } + P::t >= P::systemWrites[i] * P::systemWriteTimeInterval[i] - DT_EPSILON) { + if (extractFsGridFields) { + vector::const_iterator it; + for (it = P::outputVariableList.begin(); + it != P::outputVariableList.end(); + it++) { + if (*it == "B" || + *it == "PerturbedB" + ) { + phiprof::start("fsgrid-coupling-out"); + getFieldDataFromFsGrid(perBGrid,technicalGrid,mpiGrid,cells,CellParams::PERBX); + phiprof::stop("fsgrid-coupling-out"); + } + if (*it == "E") { + phiprof::start("fsgrid-coupling-out"); + getFieldDataFromFsGrid(EGrid,technicalGrid,mpiGrid,cells,CellParams::EX); + phiprof::stop("fsgrid-coupling-out"); + } + if (*it == "HallE") { + phiprof::start("fsgrid-coupling-out"); + getFieldDataFromFsGrid(EHallGrid,technicalGrid,mpiGrid,cells,CellParams::EXHALL_000_100); + phiprof::stop("fsgrid-coupling-out"); + } + if (*it == "GradPeE") { + phiprof::start("fsgrid-coupling-out"); + getFieldDataFromFsGrid(EGradPeGrid,technicalGrid,mpiGrid,cells,CellParams::EXGRADPE); + phiprof::stop("fsgrid-coupling-out"); + } + if (*it == "derivs") { + phiprof::start("fsgrid-coupling-out"); + getDerivativesFromFsGrid(dPerBGrid, dMomentsGrid, technicalGrid, mpiGrid, cells); + phiprof::stop("fsgrid-coupling-out"); + } + } + extractFsGridFields = false; + } phiprof::start("write-system"); logFile << "(IO): Writing spatial cell and reduced system data to disk, tstep = " << P::tstep << " t = " << P::t << endl << writeVerbose; From 9c8ad10261079d23430ad2927efdc5ccf4c2597e Mon Sep 17 00:00:00 2001 From: ykempf Date: Tue, 14 May 2019 14:20:28 +0300 Subject: [PATCH 411/602] Removed unused code from ionosphere.cpp. --- sysboundary/ionosphere.cpp | 9 --------- 1 file changed, 9 deletions(-) diff --git a/sysboundary/ionosphere.cpp b/sysboundary/ionosphere.cpp index 3463faf55..f0cb26e6f 100644 --- a/sysboundary/ionosphere.cpp +++ b/sysboundary/ionosphere.cpp @@ -219,15 +219,6 @@ namespace SBC { cellCenterCoords[0] += 0.5 * technicalGrid.DX; cellCenterCoords[1] += 0.5 * technicalGrid.DY; cellCenterCoords[2] += 0.5 * technicalGrid.DZ; - const auto refLvl = mpiGrid.get_refinement_level(mpiGrid.get_existing_cell(cellCenterCoords)); - - if(refLvl == -1) { - cerr << "Error, could not get refinement level of remote DCCRG cell " << __FILE__ << " " << __LINE__ << endl; - } - - creal dx = P::dx_ini * pow(2,-refLvl); - creal dy = P::dy_ini * pow(2,-refLvl); - creal dz = P::dz_ini * pow(2,-refLvl); if(getR(cellCenterCoords[0],cellCenterCoords[1],cellCenterCoords[2],this->geometry,this->center) < this->radius) { technicalGrid.get(i,j,k)->sysBoundaryFlag = this->getIndex(); From 548aef774cbeb13cdd7e94d87069fccec246d3cf Mon Sep 17 00:00:00 2001 From: ykempf Date: Tue, 14 May 2019 14:54:08 +0300 Subject: [PATCH 412/602] One more pass to set remaining layer 0 cells to DO_NOT_COMPUTE inside any boundary. --- sysboundary/sysboundary.cpp | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/sysboundary/sysboundary.cpp b/sysboundary/sysboundary.cpp index 3c1603cb4..b6ea8e29a 100644 --- a/sysboundary/sysboundary.cpp +++ b/sysboundary/sysboundary.cpp @@ -552,6 +552,17 @@ bool SysBoundary::classifyCells(dccrg::DccrgsysBoundaryLayer == 0 && technicalGrid.get(x,y,z)->sysBoundaryFlag != sysboundarytype::NOT_SYSBOUNDARY) { + technicalGrid.get(x,y,z)->sysBoundaryFlag = sysboundarytype::DO_NOT_COMPUTE; + } + } + } + } + technicalGrid.updateGhostCells(); return success; From b03743eb5581f265b2b2b778fb9c3ad6b75c644b Mon Sep 17 00:00:00 2001 From: ykempf Date: Tue, 14 May 2019 14:57:26 +0300 Subject: [PATCH 413/602] Commented the previous addition. --- sysboundary/sysboundary.cpp | 3 +++ 1 file changed, 3 insertions(+) diff --git a/sysboundary/sysboundary.cpp b/sysboundary/sysboundary.cpp index b6ea8e29a..479536f44 100644 --- a/sysboundary/sysboundary.cpp +++ b/sysboundary/sysboundary.cpp @@ -552,6 +552,9 @@ bool SysBoundary::classifyCells(dccrg::Dccrg Date: Tue, 14 May 2019 15:03:42 +0300 Subject: [PATCH 414/602] Updating field variables in initializeGrids. Now diffs to 0 with dev branch. --- grid.cpp | 9 +++++++++ grid.h | 2 ++ vlasiator.cpp | 30 ++++++++++++++++-------------- 3 files changed, 27 insertions(+), 14 deletions(-) diff --git a/grid.cpp b/grid.cpp index f3ac02a8f..8cb324b47 100644 --- a/grid.cpp +++ b/grid.cpp @@ -91,6 +91,8 @@ void initializeGrids( FsGrid< std::array, 2>& BgBGrid, FsGrid< std::array, 2> & momentsGrid, FsGrid< std::array, 2> & momentsDt2Grid, + FsGrid< std::array, 2> & EGradPeGrid, + FsGrid< std::array, 2> & volGrid, FsGrid< fsgrids::technical, 2>& technicalGrid, SysBoundary& sysBoundaries, Project& project @@ -313,6 +315,13 @@ void initializeGrids( BgBGrid.updateGhostCells(); phiprof::stop("setProjectBField"); + phiprof::start("getFieldsFromFsGrid"); + // These should be done by initializeFieldPropagator() if the propagation is turned off. + volGrid.updateGhostCells(); + technicalGrid.updateGhostCells(); + getFieldsFromFsGrid(volGrid, BgBGrid, EGradPeGrid, technicalGrid, mpiGrid, cells); + phiprof::stop("getFieldsFromFsGrid"); + phiprof::start("Finish fsgrid setup"); // getFieldDataFromFsGrid(perBGrid, technicalGrid, mpiGrid, cells, CellParams::PERBX); // getBgFieldsAndDerivativesFromFsGrid(BgBGrid, technicalGrid, mpiGrid, cells); diff --git a/grid.h b/grid.h index 316c4fb3e..e457d8154 100644 --- a/grid.h +++ b/grid.h @@ -41,6 +41,8 @@ void initializeGrids( FsGrid< std::array, 2>& BgBGrid, FsGrid< std::array, 2> & momentsGrid, FsGrid< std::array, 2> & momentsDt2Grid, + FsGrid< std::array, 2> & EGradPeGrid, + FsGrid< std::array, 2> & volGrid, FsGrid< fsgrids::technical, 2>& technicalGrid, SysBoundary& sysBoundaries, Project& project diff --git a/vlasiator.cpp b/vlasiator.cpp index 035df5fd2..ea3eaa4d3 100644 --- a/vlasiator.cpp +++ b/vlasiator.cpp @@ -452,6 +452,8 @@ int main(int argn,char* args[]) { BgBGrid, momentsGrid, momentsDt2Grid, + EGradPeGrid, + volGrid, technicalGrid, sysBoundaries, *project @@ -546,13 +548,13 @@ int main(int argn,char* args[]) { // Save restart data if (P::writeInitialState) { phiprof::start("write-initial-state"); - phiprof::start("fsgrid-coupling-out"); + // phiprof::start("fsgrid-coupling-out"); // getFieldDataFromFsGrid(perBGrid,technicalGrid,mpiGrid,cells,CellParams::PERBX); // getFieldDataFromFsGrid(EGrid,technicalGrid,mpiGrid,cells,CellParams::EX); // getFieldDataFromFsGrid(EHallGrid,technicalGrid,mpiGrid,cells,CellParams::EXHALL_000_100); // getFieldDataFromFsGrid(EGradPeGrid,technicalGrid,mpiGrid,cells,CellParams::EXGRADPE); // getDerivativesFromFsGrid(dPerBGrid, dMomentsGrid, technicalGrid, mpiGrid, cells); - phiprof::stop("fsgrid-coupling-out"); + // phiprof::stop("fsgrid-coupling-out"); if (myRank == MASTER_RANK) logFile << "(IO): Writing initial state to disk, tstep = " << endl << writeVerbose; @@ -916,22 +918,22 @@ int main(int argn,char* args[]) { // Re-couple fsgrids to updated grid situation phiprof::start("fsgrid-recouple-after-lb"); - const vector& cells = getLocalCells(); +// const vector& cells = getLocalCells(); - technicalGrid. setupForGridCoupling(cells.size()); +// technicalGrid. setupForGridCoupling(cells.size()); - // Each dccrg cell may have to communicate with multiple fsgrid cells, if they are on a lower refinement level. - // Calculate the corresponding fsgrid ids for each dccrg cell and set coupling for each fsgrid id. - for(auto& dccrgId : cells) { - const auto fsgridIds = mapDccrgIdToFsGridGlobalID(mpiGrid, dccrgId); - for (auto& fsgridId : fsgridIds) { +// // Each dccrg cell may have to communicate with multiple fsgrid cells, if they are on a lower refinement level. +// // Calculate the corresponding fsgrid ids for each dccrg cell and set coupling for each fsgrid id. +// for(auto& dccrgId : cells) { +// const auto fsgridIds = mapDccrgIdToFsGridGlobalID(mpiGrid, dccrgId); +// for (auto& fsgridId : fsgridIds) { - technicalGrid. setGridCoupling(fsgridId, myRank); - } - } - // cout << endl; +// technicalGrid. setGridCoupling(fsgridId, myRank); +// } +// } +// // cout << endl; - technicalGrid. finishGridCoupling(); +// technicalGrid. finishGridCoupling(); phiprof::stop("fsgrid-recouple-after-lb"); From 45eb654e6fd424d5c859618dc9a20f15906dd6de Mon Sep 17 00:00:00 2001 From: ykempf Date: Tue, 14 May 2019 15:16:57 +0300 Subject: [PATCH 415/602] vlsv_buffer_size default = 0 to avoid crashes on sisu in the testpackage. --- parameters.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/parameters.cpp b/parameters.cpp index 1222e6485..0ec1d79ae 100644 --- a/parameters.cpp +++ b/parameters.cpp @@ -89,7 +89,7 @@ std::vector> P::systemWriteHints; Real P::saveRestartWalltimeInterval = -1.0; uint P::exitAfterRestarts = numeric_limits::max(); -uint64_t P::vlsvBufferSize; +uint64_t P::vlsvBufferSize = 0; int P::restartStripeFactor = -1; string P::restartWritePath = string(""); @@ -162,7 +162,7 @@ bool Parameters::addParameters(){ Readparameters::add("io.restart_walltime_interval","Save the complete simulation in given walltime intervals. Negative values disable writes.",-1.0); Readparameters::add("io.number_of_restarts","Exit the simulation after certain number of walltime-based restarts.",numeric_limits::max()); - Readparameters::add("io.vlsv_buffer_size", "Buffer size passed to VLSV writer (bytes, up to uint64_t)", 1024*1024*1024); + Readparameters::add("io.vlsv_buffer_size", "Buffer size passed to VLSV writer (bytes, up to uint64_t), default 0 as this is sensible on sisu", 0); Readparameters::add("io.write_restart_stripe_factor","Stripe factor for restart writing.", -1); Readparameters::add("io.write_as_float","If true, write in floats instead of doubles", false); Readparameters::add("io.restart_write_path", "Path to the location where restart files should be written. Defaults to the local directory, also if the specified destination is not writeable.", string("./")); From 99c11654654fc8dc1138317cb77c22bf94c7dd48 Mon Sep 17 00:00:00 2001 From: Tuomas Koskela Date: Tue, 14 May 2019 15:22:46 +0300 Subject: [PATCH 416/602] Revert "Revert "Removed unused fsgrid variables from spatial_cell and the associated coupling function calls and datareducer calls. Compiles, pushing to test on sisu."" This reverts commit 858edaaed7f01fa83c8b1295d63ec998434d4feb. --- common.h | 146 ++++---- datareduction/datareducer.cpp | 204 +++++------ datareduction/datareductionoperator.cpp | 17 +- fieldsolver/gridGlue.cpp | 428 ++++++++++++------------ grid.cpp | 32 +- iowrite.cpp | 4 +- spatial_cell.cpp | 30 +- spatial_cell.hpp | 2 +- vlasiator.cpp | 95 +++--- 9 files changed, 476 insertions(+), 482 deletions(-) diff --git a/common.h b/common.h index 8ca859c52..ef3798ef9 100644 --- a/common.h +++ b/common.h @@ -133,9 +133,9 @@ namespace CellParams { EX, /*!< Total electric field x-component, averaged over cell edge. Used to propagate BX,BY,BZ.*/ EY, /*!< Total wlectric field y-component, averaged over cell edge. Used to propagate BX,BY,BZ.*/ EZ, /*!< Total electric field z-component, averaged over cell edge. Used to propagate BX,BY,BZ.*/ - BGBX, /*!< Background magnetic field x-component, averaged over cell x-face.*/ - BGBY, /*!< Background magnetic field x-component, averaged over cell x-face.*/ - BGBZ, /*!< Background magnetic field x-component, averaged over cell x-face.*/ + /* BGBX, /\*!< Background magnetic field x-component, averaged over cell x-face.*\/ */ + /* BGBY, /\*!< Background magnetic field x-component, averaged over cell x-face.*\/ */ + /* BGBZ, /\*!< Background magnetic field x-component, averaged over cell x-face.*\/ */ PERBX, /*!< Perturbed Magnetic field x-component, averaged over cell x-face. Propagated by field solver.*/ PERBY, /*!< Perturbed Magnetic field y-component, averaged over cell y-face. Propagated by field solver.*/ PERBZ, /*!< Perturbed Magnetic field z-component, averaged over cell z-face. Propagated by field solver.*/ @@ -164,18 +164,18 @@ namespace CellParams { EXVOL, /*!< Ex averaged over spatial cell.*/ EYVOL, /*!< Ey averaged over spatial cell.*/ EZVOL, /*!< Ez averaged over spatial cell.*/ - EXHALL_000_100, /*!< Hall term x averaged along x on -y/-z edge of spatial cell.*/ - EYHALL_000_010, /*!< Hall term y averaged along y on -x/-z edge of spatial cell.*/ - EZHALL_000_001, /*!< Hall term z averaged along z on -x/-y edge of spatial cell.*/ - EYHALL_100_110, /*!< Hall term y averaged along y on +x/-z edge of spatial cell.*/ - EZHALL_100_101, /*!< Hall term z averaged along z on +x/-y edge of spatial cell.*/ - EXHALL_010_110, /*!< Hall term x averaged along x on +y/-z edge of spatial cell.*/ - EZHALL_010_011, /*!< Hall term z averaged along z on +y/-x edge of spatial cell.*/ - EZHALL_110_111, /*!< Hall term z averaged along z on +x/+y edge of spatial cell.*/ - EXHALL_001_101, /*!< Hall term x averaged along x on -y/+z edge of spatial cell.*/ - EYHALL_001_011, /*!< Hall term y averaged along y on -x/+z edge of spatial cell.*/ - EYHALL_101_111, /*!< Hall term y averaged along y on +x/+z edge of spatial cell.*/ - EXHALL_011_111, /*!< Hall term x averaged along x on +y/+z edge of spatial cell.*/ + /* EXHALL_000_100, /\*!< Hall term x averaged along x on -y/-z edge of spatial cell.*\/ */ + /* EYHALL_000_010, /\*!< Hall term y averaged along y on -x/-z edge of spatial cell.*\/ */ + /* EZHALL_000_001, /\*!< Hall term z averaged along z on -x/-y edge of spatial cell.*\/ */ + /* EYHALL_100_110, /\*!< Hall term y averaged along y on +x/-z edge of spatial cell.*\/ */ + /* EZHALL_100_101, /\*!< Hall term z averaged along z on +x/-y edge of spatial cell.*\/ */ + /* EXHALL_010_110, /\*!< Hall term x averaged along x on +y/-z edge of spatial cell.*\/ */ + /* EZHALL_010_011, /\*!< Hall term z averaged along z on +y/-x edge of spatial cell.*\/ */ + /* EZHALL_110_111, /\*!< Hall term z averaged along z on +x/+y edge of spatial cell.*\/ */ + /* EXHALL_001_101, /\*!< Hall term x averaged along x on -y/+z edge of spatial cell.*\/ */ + /* EYHALL_001_011, /\*!< Hall term y averaged along y on -x/+z edge of spatial cell.*\/ */ + /* EYHALL_101_111, /\*!< Hall term y averaged along y on +x/+z edge of spatial cell.*\/ */ + /* EXHALL_011_111, /\*!< Hall term x averaged along x on +y/+z edge of spatial cell.*\/ */ EXGRADPE, /*!< Electron pressure gradient term x.*/ EYGRADPE, /*!< Electron pressure gradient term y.*/ EZGRADPE, /*!< Electron pressure gradient term z.*/ @@ -232,64 +232,64 @@ namespace CellParams { */ namespace fieldsolver { enum { - drhomdx, /*!< Derivative of volume-averaged mass density to x-direction. */ - drhomdy, /*!< Derivative of volume-averaged mass density to y-direction. */ - drhomdz, /*!< Derivative of volume-averaged mass density to z-direction. */ - drhoqdx, /*!< Derivative of volume-averaged charge density to x-direction. */ - drhoqdy, /*!< Derivative of volume-averaged charge density to y-direction. */ - drhoqdz, /*!< Derivative of volume-averaged charge density to z-direction. */ - dBGBxdy, /*!< Derivative of face-averaged Bx to y-direction. */ - dBGBxdz, /*!< Derivative of face-averaged Bx to z-direction. */ - dBGBydx, /*!< Derivative of face-averaged By to x-direction. */ - dBGBydz, /*!< Derivative of face-averaged By to z-direction. */ - dBGBzdx, /*!< Derivative of face-averaged Bz to x-direction. */ - dBGBzdy, /*!< Derivative of face-averaged Bz to y-direction. */ - dPERBxdy, /*!< Derivative of face-averaged Bx to y-direction. */ - dPERBxdz, /*!< Derivative of face-averaged Bx to z-direction. */ - dPERBydx, /*!< Derivative of face-averaged By to x-direction. */ - dPERBydz, /*!< Derivative of face-averaged By to z-direction. */ - dPERBzdx, /*!< Derivative of face-averaged Bz to x-direction. */ - dPERBzdy, /*!< Derivative of face-averaged Bz to y-direction. */ - // Insert for Hall term - // NOTE 2nd derivatives of BGBn are not needed as curl(dipole) = 0.0 - // will change if BGB is not curl-free -// dBGBxdyy, /*!< Second derivative of face-averaged Bx to yy-direction. */ -// dBGBxdzz, /*!< Second derivative of face-averaged Bx to zz-direction. */ -// dBGBxdyz, /*!< Second derivative of face-averaged Bx to yz-direction. */ -// dBGBydxx, /*!< Second derivative of face-averaged By to xx-direction. */ -// dBGBydzz, /*!< Second derivative of face-averaged By to zz-direction. */ -// dBGBydxz, /*!< Second derivative of face-averaged By to xz-direction. */ -// dBGBzdxx, /*!< Second derivative of face-averaged Bz to xx-direction. */ -// dBGBzdyy, /*!< Second derivative of face-averaged Bz to yy-direction. */ -// dBGBzdxy, /*!< Second derivative of face-averaged Bz to xy-direction. */ - dPERBxdyy, /*!< Second derivative of face-averaged Bx to yy-direction. */ - dPERBxdzz, /*!< Second derivative of face-averaged Bx to zz-direction. */ - dPERBxdyz, /*!< Second derivative of face-averaged Bx to yz-direction. */ - dPERBydxx, /*!< Second derivative of face-averaged By to xx-direction. */ - dPERBydzz, /*!< Second derivative of face-averaged By to zz-direction. */ - dPERBydxz, /*!< Second derivative of face-averaged By to xz-direction. */ - dPERBzdxx, /*!< Second derivative of face-averaged Bz to xx-direction. */ - dPERBzdyy, /*!< Second derivative of face-averaged Bz to yy-direction. */ - dPERBzdxy, /*!< Second derivative of face-averaged Bz to xy-direction. */ - dp11dx, /*!< Derivative of P_11 to x direction. */ - dp11dy, /*!< Derivative of P_11 to x direction. */ - dp11dz, /*!< Derivative of P_11 to x direction. */ - dp22dx, /*!< Derivative of P_22 to y direction. */ - dp22dy, /*!< Derivative of P_22 to y direction. */ - dp22dz, /*!< Derivative of P_22 to y direction. */ - dp33dx, /*!< Derivative of P_33 to z direction. */ - dp33dy, /*!< Derivative of P_33 to z direction. */ - dp33dz, /*!< Derivative of P_33 to z direction. */ - // End of insert for Hall term - dVxdx, /*!< Derivative of volume-averaged Vx to x-direction. */ - dVxdy, /*!< Derivative of volume-averaged Vx to y-direction. */ - dVxdz, /*!< Derivative of volume-averaged Vx to z-direction. */ - dVydx, /*!< Derivative of volume-averaged Vy to x-direction. */ - dVydy, /*!< Derivative of volume-averaged Vy to y-direction. */ - dVydz, /*!< Derivative of volume-averaged Vy to z-direction. */ - dVzdx, /*!< Derivative of volume-averaged Vz to x-direction. */ - dVzdy, /*!< Derivative of volume-averaged Vz to y-direction. */ - dVzdz, /*!< Derivative of volume-averaged Vz to z-direction. */ +/* drhomdx, /\*!< Derivative of volume-averaged mass density to x-direction. *\/ */ +/* drhomdy, /\*!< Derivative of volume-averaged mass density to y-direction. *\/ */ +/* drhomdz, /\*!< Derivative of volume-averaged mass density to z-direction. *\/ */ +/* drhoqdx, /\*!< Derivative of volume-averaged charge density to x-direction. *\/ */ +/* drhoqdy, /\*!< Derivative of volume-averaged charge density to y-direction. *\/ */ +/* drhoqdz, /\*!< Derivative of volume-averaged charge density to z-direction. *\/ */ +/* dBGBxdy, /\*!< Derivative of face-averaged Bx to y-direction. *\/ */ +/* dBGBxdz, /\*!< Derivative of face-averaged Bx to z-direction. *\/ */ +/* dBGBydx, /\*!< Derivative of face-averaged By to x-direction. *\/ */ +/* dBGBydz, /\*!< Derivative of face-averaged By to z-direction. *\/ */ +/* dBGBzdx, /\*!< Derivative of face-averaged Bz to x-direction. *\/ */ +/* dBGBzdy, /\*!< Derivative of face-averaged Bz to y-direction. *\/ */ +/* dPERBxdy, /\*!< Derivative of face-averaged Bx to y-direction. *\/ */ +/* dPERBxdz, /\*!< Derivative of face-averaged Bx to z-direction. *\/ */ +/* dPERBydx, /\*!< Derivative of face-averaged By to x-direction. *\/ */ +/* dPERBydz, /\*!< Derivative of face-averaged By to z-direction. *\/ */ +/* dPERBzdx, /\*!< Derivative of face-averaged Bz to x-direction. *\/ */ +/* dPERBzdy, /\*!< Derivative of face-averaged Bz to y-direction. *\/ */ +/* // Insert for Hall term */ +/* // NOTE 2nd derivatives of BGBn are not needed as curl(dipole) = 0.0 */ +/* // will change if BGB is not curl-free */ +/* // dBGBxdyy, /\*!< Second derivative of face-averaged Bx to yy-direction. *\/ */ +/* // dBGBxdzz, /\*!< Second derivative of face-averaged Bx to zz-direction. *\/ */ +/* // dBGBxdyz, /\*!< Second derivative of face-averaged Bx to yz-direction. *\/ */ +/* // dBGBydxx, /\*!< Second derivative of face-averaged By to xx-direction. *\/ */ +/* // dBGBydzz, /\*!< Second derivative of face-averaged By to zz-direction. *\/ */ +/* // dBGBydxz, /\*!< Second derivative of face-averaged By to xz-direction. *\/ */ +/* // dBGBzdxx, /\*!< Second derivative of face-averaged Bz to xx-direction. *\/ */ +/* // dBGBzdyy, /\*!< Second derivative of face-averaged Bz to yy-direction. *\/ */ +/* // dBGBzdxy, /\*!< Second derivative of face-averaged Bz to xy-direction. *\/ */ +/* dPERBxdyy, /\*!< Second derivative of face-averaged Bx to yy-direction. *\/ */ +/* dPERBxdzz, /\*!< Second derivative of face-averaged Bx to zz-direction. *\/ */ +/* dPERBxdyz, /\*!< Second derivative of face-averaged Bx to yz-direction. *\/ */ +/* dPERBydxx, /\*!< Second derivative of face-averaged By to xx-direction. *\/ */ +/* dPERBydzz, /\*!< Second derivative of face-averaged By to zz-direction. *\/ */ +/* dPERBydxz, /\*!< Second derivative of face-averaged By to xz-direction. *\/ */ +/* dPERBzdxx, /\*!< Second derivative of face-averaged Bz to xx-direction. *\/ */ +/* dPERBzdyy, /\*!< Second derivative of face-averaged Bz to yy-direction. *\/ */ +/* dPERBzdxy, /\*!< Second derivative of face-averaged Bz to xy-direction. *\/ */ +/* dp11dx, /\*!< Derivative of P_11 to x direction. *\/ */ +/* dp11dy, /\*!< Derivative of P_11 to x direction. *\/ */ +/* dp11dz, /\*!< Derivative of P_11 to x direction. *\/ */ +/* dp22dx, /\*!< Derivative of P_22 to y direction. *\/ */ +/* dp22dy, /\*!< Derivative of P_22 to y direction. *\/ */ +/* dp22dz, /\*!< Derivative of P_22 to y direction. *\/ */ +/* dp33dx, /\*!< Derivative of P_33 to z direction. *\/ */ +/* dp33dy, /\*!< Derivative of P_33 to z direction. *\/ */ +/* dp33dz, /\*!< Derivative of P_33 to z direction. *\/ */ +/* // End of insert for Hall term */ +/* dVxdx, /\*!< Derivative of volume-averaged Vx to x-direction. *\/ */ +/* dVxdy, /\*!< Derivative of volume-averaged Vx to y-direction. *\/ */ +/* dVxdz, /\*!< Derivative of volume-averaged Vx to z-direction. *\/ */ +/* dVydx, /\*!< Derivative of volume-averaged Vy to x-direction. *\/ */ +/* dVydy, /\*!< Derivative of volume-averaged Vy to y-direction. *\/ */ +/* dVydz, /\*!< Derivative of volume-averaged Vy to z-direction. *\/ */ +/* dVzdx, /\*!< Derivative of volume-averaged Vz to x-direction. *\/ */ +/* dVzdy, /\*!< Derivative of volume-averaged Vz to y-direction. *\/ */ +/* dVzdz, /\*!< Derivative of volume-averaged Vz to z-direction. *\/ */ N_SPATIAL_CELL_DERIVATIVES }; } diff --git a/datareduction/datareducer.cpp b/datareduction/datareducer.cpp index 7aa7f67ba..488868830 100644 --- a/datareduction/datareducer.cpp +++ b/datareduction/datareducer.cpp @@ -101,10 +101,10 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti )); continue; } - if(*it == "vg_BackgroundB") { // Static (typically dipole) magnetic field part - outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("background_B",CellParams::BGBX,3)); - continue; - } + // if(*it == "vg_BackgroundB") { // Static (typically dipole) magnetic field part + // outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("background_B",CellParams::BGBX,3)); + // continue; + // } if(*it == "fg_PerturbedB" || *it == "PerturbedB") { // Fluctuating magnetic field part outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fg_perturbed_B",[]( FsGrid< std::array, 2>& perBGrid, @@ -539,27 +539,27 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti )); continue; } - if(*it == "HallE") { - // 12 corner components of the hall-effect contribution to the electric field - outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("EXHALL_000_100",CellParams::EXHALL_000_100,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("EXHALL_001_101",CellParams::EXHALL_001_101,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("EXHALL_010_110",CellParams::EXHALL_010_110,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("EXHALL_011_111",CellParams::EXHALL_011_111,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("EYHALL_000_010",CellParams::EYHALL_000_010,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("EYHALL_001_011",CellParams::EYHALL_001_011,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("EYHALL_100_110",CellParams::EYHALL_100_110,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("EYHALL_101_111",CellParams::EYHALL_101_111,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("EZHALL_000_001",CellParams::EZHALL_000_001,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("EZHALL_010_011",CellParams::EZHALL_010_011,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("EZHALL_100_101",CellParams::EZHALL_100_101,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("EZHALL_110_111",CellParams::EZHALL_110_111,1)); - continue; - } - if(*it =="GradPeE") { - // Electron pressure gradient contribution to the generalized ohm's law - outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("EGRADPE",CellParams::EXGRADPE,3)); - continue; - } + // if(*it == "HallE") { + // // 12 corner components of the hall-effect contribution to the electric field + // outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("EXHALL_000_100",CellParams::EXHALL_000_100,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("EXHALL_001_101",CellParams::EXHALL_001_101,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("EXHALL_010_110",CellParams::EXHALL_010_110,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("EXHALL_011_111",CellParams::EXHALL_011_111,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("EYHALL_000_010",CellParams::EYHALL_000_010,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("EYHALL_001_011",CellParams::EYHALL_001_011,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("EYHALL_100_110",CellParams::EYHALL_100_110,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("EYHALL_101_111",CellParams::EYHALL_101_111,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("EZHALL_000_001",CellParams::EZHALL_000_001,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("EZHALL_010_011",CellParams::EZHALL_010_011,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("EZHALL_100_101",CellParams::EZHALL_100_101,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("EZHALL_110_111",CellParams::EZHALL_110_111,1)); + // continue; + // } + // if(*it =="GradPeE") { + // // Electron pressure gradient contribution to the generalized ohm's law + // outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("EGRADPE",CellParams::EXGRADPE,3)); + // continue; + // } if(*it == "VolB" || *it == "vg_VolB") { // Volume-averaged magnetic field outputReducer->addOperator(new DRO::VariableBVol); @@ -651,73 +651,73 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti } continue; } - if(*it == "derivs") { - // Derivatives of all quantities that might be of interest - outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("drhomdx",fieldsolver::drhomdx,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("drhomdy",fieldsolver::drhomdy,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("drhomdz",fieldsolver::drhomdz,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("drhoqdx",fieldsolver::drhoqdx,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("drhoqdy",fieldsolver::drhoqdy,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("drhoqdz",fieldsolver::drhoqdz,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dp11dx",fieldsolver::dp11dx,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dp22dx",fieldsolver::dp22dx,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dp33dx",fieldsolver::dp33dx,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dp11dy",fieldsolver::dp11dy,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dp22dy",fieldsolver::dp22dy,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dp33dy",fieldsolver::dp33dy,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dp11dz",fieldsolver::dp11dz,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dp22dz",fieldsolver::dp22dz,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dp33dz",fieldsolver::dp33dz,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dPERBxdy",fieldsolver::dPERBxdy,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dBGBxdy",fieldsolver::dBGBxdy,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dPERBxdz",fieldsolver::dPERBxdz,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dBGBxdz",fieldsolver::dBGBxdz,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dPERBydx",fieldsolver::dPERBydx,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dBGBydx",fieldsolver::dBGBydx,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dPERBydz",fieldsolver::dPERBydz,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dBGBydz",fieldsolver::dBGBydz,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dPERBzdx",fieldsolver::dPERBzdx,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dBGBzdx",fieldsolver::dBGBzdx,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dPERBzdy",fieldsolver::dPERBzdy,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dBGBzdy",fieldsolver::dBGBzdy,1)); - if(Parameters::ohmHallTerm == 2) { - outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dPERBxdyy",fieldsolver::dPERBxdyy,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dPERBxdzz",fieldsolver::dPERBxdzz,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dPERBydxx",fieldsolver::dPERBydxx,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dPERBydzz",fieldsolver::dPERBydzz,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dPERBzdxx",fieldsolver::dPERBzdxx,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dPERBzdyy",fieldsolver::dPERBzdyy,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dPERBxdyz",fieldsolver::dPERBxdyz,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dPERBydxz",fieldsolver::dPERBydxz,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dPERBzdxy",fieldsolver::dPERBzdxy,1)); - } - outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dVxdx",fieldsolver::dVxdx,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dVxdy",fieldsolver::dVxdy,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dVxdz",fieldsolver::dVxdz,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dVydx",fieldsolver::dVydx,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dVydy",fieldsolver::dVydy,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dVydz",fieldsolver::dVydz,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dVzdx",fieldsolver::dVzdx,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dVzdy",fieldsolver::dVzdy,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dVzdz",fieldsolver::dVzdz,1)); - continue; - } - if(*it == "BVOLderivs") { - // Volume-averaged derivatives - outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dPERBXVOLdy",bvolderivatives::dPERBXVOLdy,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dBGBXVOLdy",bvolderivatives::dBGBXVOLdy,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dPERBXVOLdz",bvolderivatives::dPERBXVOLdz,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dBGBXVOLdz",bvolderivatives::dBGBXVOLdz,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dPERBYVOLdx",bvolderivatives::dPERBYVOLdx,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dBGBYVOLdx",bvolderivatives::dBGBYVOLdx,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dPERBYVOLdz",bvolderivatives::dPERBYVOLdz,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dBGBYVOLdz",bvolderivatives::dBGBYVOLdz,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dPERBZVOLdx",bvolderivatives::dPERBZVOLdx,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dBGBZVOLdx",bvolderivatives::dBGBZVOLdx,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dPERBZVOLdy",bvolderivatives::dPERBZVOLdy,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dBGBZVOLdy",bvolderivatives::dBGBZVOLdy,1)); - continue; - } + // if(*it == "derivs") { + // // Derivatives of all quantities that might be of interest + // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("drhomdx",fieldsolver::drhomdx,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("drhomdy",fieldsolver::drhomdy,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("drhomdz",fieldsolver::drhomdz,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("drhoqdx",fieldsolver::drhoqdx,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("drhoqdy",fieldsolver::drhoqdy,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("drhoqdz",fieldsolver::drhoqdz,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dp11dx",fieldsolver::dp11dx,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dp22dx",fieldsolver::dp22dx,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dp33dx",fieldsolver::dp33dx,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dp11dy",fieldsolver::dp11dy,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dp22dy",fieldsolver::dp22dy,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dp33dy",fieldsolver::dp33dy,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dp11dz",fieldsolver::dp11dz,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dp22dz",fieldsolver::dp22dz,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dp33dz",fieldsolver::dp33dz,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dPERBxdy",fieldsolver::dPERBxdy,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dBGBxdy",fieldsolver::dBGBxdy,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dPERBxdz",fieldsolver::dPERBxdz,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dBGBxdz",fieldsolver::dBGBxdz,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dPERBydx",fieldsolver::dPERBydx,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dBGBydx",fieldsolver::dBGBydx,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dPERBydz",fieldsolver::dPERBydz,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dBGBydz",fieldsolver::dBGBydz,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dPERBzdx",fieldsolver::dPERBzdx,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dBGBzdx",fieldsolver::dBGBzdx,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dPERBzdy",fieldsolver::dPERBzdy,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dBGBzdy",fieldsolver::dBGBzdy,1)); + // if(Parameters::ohmHallTerm == 2) { + // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dPERBxdyy",fieldsolver::dPERBxdyy,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dPERBxdzz",fieldsolver::dPERBxdzz,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dPERBydxx",fieldsolver::dPERBydxx,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dPERBydzz",fieldsolver::dPERBydzz,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dPERBzdxx",fieldsolver::dPERBzdxx,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dPERBzdyy",fieldsolver::dPERBzdyy,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dPERBxdyz",fieldsolver::dPERBxdyz,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dPERBydxz",fieldsolver::dPERBydxz,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dPERBzdxy",fieldsolver::dPERBzdxy,1)); + // } + // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dVxdx",fieldsolver::dVxdx,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dVxdy",fieldsolver::dVxdy,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dVxdz",fieldsolver::dVxdz,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dVydx",fieldsolver::dVydx,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dVydy",fieldsolver::dVydy,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dVydz",fieldsolver::dVydz,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dVzdx",fieldsolver::dVzdx,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dVzdy",fieldsolver::dVzdy,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dVzdz",fieldsolver::dVzdz,1)); + // continue; + // } + // if(*it == "BVOLderivs") { + // // Volume-averaged derivatives + // outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dPERBXVOLdy",bvolderivatives::dPERBXVOLdy,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dBGBXVOLdy",bvolderivatives::dBGBXVOLdy,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dPERBXVOLdz",bvolderivatives::dPERBXVOLdz,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dBGBXVOLdz",bvolderivatives::dBGBXVOLdz,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dPERBYVOLdx",bvolderivatives::dPERBYVOLdx,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dBGBYVOLdx",bvolderivatives::dBGBYVOLdx,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dPERBYVOLdz",bvolderivatives::dPERBYVOLdz,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dBGBYVOLdz",bvolderivatives::dBGBYVOLdz,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dPERBZVOLdx",bvolderivatives::dPERBZVOLdx,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dBGBZVOLdx",bvolderivatives::dBGBZVOLdx,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dPERBZVOLdy",bvolderivatives::dPERBZVOLdy,1)); + // outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dBGBZVOLdy",bvolderivatives::dBGBZVOLdy,1)); + // continue; + // } if(*it == "GridCoordinates") { // Spatial coordinates for each cell outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("X",CellParams::XCRD,1)); @@ -766,16 +766,16 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti for (it = P::diagnosticVariableList.begin(); it != P::diagnosticVariableList.end(); it++) { - if(*it == "FluxB") { - // Overall magnetic flux through the simulation plane - diagnosticReducer->addOperator(new DRO::DiagnosticFluxB); - continue; - } - if(*it == "FluxE") { - // Overall electric flux through the simulation plane - diagnosticReducer->addOperator(new DRO::DiagnosticFluxE); - continue; - } + // if(*it == "FluxB") { + // // Overall magnetic flux through the simulation plane + // diagnosticReducer->addOperator(new DRO::DiagnosticFluxB); + // continue; + // } + // if(*it == "FluxE") { + // // Overall electric flux through the simulation plane + // diagnosticReducer->addOperator(new DRO::DiagnosticFluxE); + // continue; + // } if (*it == "populations_Blocks") { // Per-population total block counts for(unsigned int i =0; i < getObjectWrapper().particleSpecies.size(); i++) { diff --git a/datareduction/datareductionoperator.cpp b/datareduction/datareductionoperator.cpp index ca5710877..074ec17e4 100644 --- a/datareduction/datareductionoperator.cpp +++ b/datareduction/datareductionoperator.cpp @@ -160,15 +160,14 @@ namespace DRO { return true; } - DataReductionOperatorDerivatives::DataReductionOperatorDerivatives(const std::string& name,const unsigned int parameterIndex,const unsigned int vectorSize): - DataReductionOperatorCellParams(name,parameterIndex,vectorSize) { - - } - //a version with derivatives, this is the only function that is different - bool DataReductionOperatorDerivatives::setSpatialCell(const SpatialCell* cell) { - data = &(cell->derivatives[_parameterIndex]); - return true; - } + // DataReductionOperatorDerivatives::DataReductionOperatorDerivatives(const std::string& name,const unsigned int parameterIndex,const unsigned int vectorSize): + // DataReductionOperatorCellParams(name,parameterIndex,vectorSize) { + // } + // //a version with derivatives, this is the only function that is different + // bool DataReductionOperatorDerivatives::setSpatialCell(const SpatialCell* cell) { + // data = &(cell->derivatives[_parameterIndex]); + // return true; + // } DataReductionOperatorBVOLDerivatives::DataReductionOperatorBVOLDerivatives(const std::string& name,const unsigned int parameterIndex,const unsigned int vectorSize): diff --git a/fieldsolver/gridGlue.cpp b/fieldsolver/gridGlue.cpp index 41b91a78a..4c59f022a 100644 --- a/fieldsolver/gridGlue.cpp +++ b/fieldsolver/gridGlue.cpp @@ -370,247 +370,247 @@ void getFieldsFromFsGrid( } -void getBgFieldsAndDerivativesFromFsGrid( - FsGrid< std::array, 2>& BgBGrid, - FsGrid< fsgrids::technical, 2>& technicalGrid, - dccrg::Dccrg& mpiGrid, - const std::vector& cells -) { - // Setup transfer buffers - cint nCellsOnMaxRefLvl = getNumberOfCellsOnMaxRefLvl(mpiGrid, cells); - std::vector< std::array > transferBufferBGB(nCellsOnMaxRefLvl); - std::vector< std::array*> transferBufferPointerBGB; - std::vector< fsgrids::technical > transferBufferTechnical(nCellsOnMaxRefLvl); - std::vector< fsgrids::technical*> transferBufferPointerTechnical; +// void getBgFieldsAndDerivativesFromFsGrid( +// FsGrid< std::array, 2>& BgBGrid, +// FsGrid< fsgrids::technical, 2>& technicalGrid, +// dccrg::Dccrg& mpiGrid, +// const std::vector& cells +// ) { +// // Setup transfer buffers +// cint nCellsOnMaxRefLvl = getNumberOfCellsOnMaxRefLvl(mpiGrid, cells); +// std::vector< std::array > transferBufferBGB(nCellsOnMaxRefLvl); +// std::vector< std::array*> transferBufferPointerBGB; +// std::vector< fsgrids::technical > transferBufferTechnical(nCellsOnMaxRefLvl); +// std::vector< fsgrids::technical*> transferBufferPointerTechnical; - // Setup transfer pointers - BgBGrid.setupForTransferOut(nCellsOnMaxRefLvl); - technicalGrid.setupForTransferOut(nCellsOnMaxRefLvl); - int k = 0; - for(auto dccrgId : cells) { - const auto fsgridIds = mapDccrgIdToFsGridGlobalID(mpiGrid, dccrgId); - // Store a pointer to the first fsgrid cell that maps to each dccrg Id - transferBufferPointerBGB.push_back(&transferBufferBGB[k]); - transferBufferPointerTechnical.push_back(&transferBufferTechnical[k]); - for (auto fsgridId : fsgridIds) { - std::array* thisCellData = &transferBufferBGB[k]; - BgBGrid.transferDataOut(fsgridId, thisCellData); - fsgrids::technical* thisCellDataTechnical = &transferBufferTechnical[k]; - technicalGrid.transferDataOut(fsgridId, thisCellDataTechnical); - k++; - } - } - // Do the transfer - BgBGrid.finishTransfersOut(); - technicalGrid.finishTransfersOut(); +// // Setup transfer pointers +// BgBGrid.setupForTransferOut(nCellsOnMaxRefLvl); +// technicalGrid.setupForTransferOut(nCellsOnMaxRefLvl); +// int k = 0; +// for(auto dccrgId : cells) { +// const auto fsgridIds = mapDccrgIdToFsGridGlobalID(mpiGrid, dccrgId); +// // Store a pointer to the first fsgrid cell that maps to each dccrg Id +// transferBufferPointerBGB.push_back(&transferBufferBGB[k]); +// transferBufferPointerTechnical.push_back(&transferBufferTechnical[k]); +// for (auto fsgridId : fsgridIds) { +// std::array* thisCellData = &transferBufferBGB[k]; +// BgBGrid.transferDataOut(fsgridId, thisCellData); +// fsgrids::technical* thisCellDataTechnical = &transferBufferTechnical[k]; +// technicalGrid.transferDataOut(fsgridId, thisCellDataTechnical); +// k++; +// } +// } +// // Do the transfer +// BgBGrid.finishTransfersOut(); +// technicalGrid.finishTransfersOut(); - // Build lists of index pairs to dccrg and fsgrid - std::vector> iCellParams; - iCellParams.reserve(6); - iCellParams.push_back(std::make_pair(CellParams::BGBX, fsgrids::bgbfield::BGBX)); - iCellParams.push_back(std::make_pair(CellParams::BGBY, fsgrids::bgbfield::BGBY)); - iCellParams.push_back(std::make_pair(CellParams::BGBZ, fsgrids::bgbfield::BGBZ)); - iCellParams.push_back(std::make_pair(CellParams::BGBXVOL, fsgrids::bgbfield::BGBXVOL)); - iCellParams.push_back(std::make_pair(CellParams::BGBYVOL, fsgrids::bgbfield::BGBYVOL)); - iCellParams.push_back(std::make_pair(CellParams::BGBZVOL, fsgrids::bgbfield::BGBZVOL)); - std::vector> iDerivatives; - iDerivatives.reserve(6); - iDerivatives.push_back(std::make_pair(fieldsolver::dBGBxdy, fsgrids::bgbfield::dBGBxdy)); - iDerivatives.push_back(std::make_pair(fieldsolver::dBGBxdz, fsgrids::bgbfield::dBGBxdz)); - iDerivatives.push_back(std::make_pair(fieldsolver::dBGBydx, fsgrids::bgbfield::dBGBydx)); - iDerivatives.push_back(std::make_pair(fieldsolver::dBGBydz, fsgrids::bgbfield::dBGBydz)); - iDerivatives.push_back(std::make_pair(fieldsolver::dBGBzdx, fsgrids::bgbfield::dBGBzdx)); - iDerivatives.push_back(std::make_pair(fieldsolver::dBGBzdy, fsgrids::bgbfield::dBGBzdy)); - std::vector> iDerivativesBVOL; - iDerivativesBVOL.reserve(6); - iDerivativesBVOL.push_back(std::make_pair(bvolderivatives::dBGBXVOLdy, fsgrids::bgbfield::dBGBXVOLdy)); - iDerivativesBVOL.push_back(std::make_pair(bvolderivatives::dBGBXVOLdz, fsgrids::bgbfield::dBGBXVOLdz)); - iDerivativesBVOL.push_back(std::make_pair(bvolderivatives::dBGBYVOLdx, fsgrids::bgbfield::dBGBYVOLdx)); - iDerivativesBVOL.push_back(std::make_pair(bvolderivatives::dBGBYVOLdz, fsgrids::bgbfield::dBGBYVOLdz)); - iDerivativesBVOL.push_back(std::make_pair(bvolderivatives::dBGBZVOLdx, fsgrids::bgbfield::dBGBZVOLdx)); - iDerivativesBVOL.push_back(std::make_pair(bvolderivatives::dBGBZVOLdy, fsgrids::bgbfield::dBGBZVOLdy)); +// // Build lists of index pairs to dccrg and fsgrid +// std::vector> iCellParams; +// iCellParams.reserve(6); +// iCellParams.push_back(std::make_pair(CellParams::BGBX, fsgrids::bgbfield::BGBX)); +// iCellParams.push_back(std::make_pair(CellParams::BGBY, fsgrids::bgbfield::BGBY)); +// iCellParams.push_back(std::make_pair(CellParams::BGBZ, fsgrids::bgbfield::BGBZ)); +// iCellParams.push_back(std::make_pair(CellParams::BGBXVOL, fsgrids::bgbfield::BGBXVOL)); +// iCellParams.push_back(std::make_pair(CellParams::BGBYVOL, fsgrids::bgbfield::BGBYVOL)); +// iCellParams.push_back(std::make_pair(CellParams::BGBZVOL, fsgrids::bgbfield::BGBZVOL)); +// std::vector> iDerivatives; +// iDerivatives.reserve(6); +// iDerivatives.push_back(std::make_pair(fieldsolver::dBGBxdy, fsgrids::bgbfield::dBGBxdy)); +// iDerivatives.push_back(std::make_pair(fieldsolver::dBGBxdz, fsgrids::bgbfield::dBGBxdz)); +// iDerivatives.push_back(std::make_pair(fieldsolver::dBGBydx, fsgrids::bgbfield::dBGBydx)); +// iDerivatives.push_back(std::make_pair(fieldsolver::dBGBydz, fsgrids::bgbfield::dBGBydz)); +// iDerivatives.push_back(std::make_pair(fieldsolver::dBGBzdx, fsgrids::bgbfield::dBGBzdx)); +// iDerivatives.push_back(std::make_pair(fieldsolver::dBGBzdy, fsgrids::bgbfield::dBGBzdy)); +// std::vector> iDerivativesBVOL; +// iDerivativesBVOL.reserve(6); +// iDerivativesBVOL.push_back(std::make_pair(bvolderivatives::dBGBXVOLdy, fsgrids::bgbfield::dBGBXVOLdy)); +// iDerivativesBVOL.push_back(std::make_pair(bvolderivatives::dBGBXVOLdz, fsgrids::bgbfield::dBGBXVOLdz)); +// iDerivativesBVOL.push_back(std::make_pair(bvolderivatives::dBGBYVOLdx, fsgrids::bgbfield::dBGBYVOLdx)); +// iDerivativesBVOL.push_back(std::make_pair(bvolderivatives::dBGBYVOLdz, fsgrids::bgbfield::dBGBYVOLdz)); +// iDerivativesBVOL.push_back(std::make_pair(bvolderivatives::dBGBZVOLdx, fsgrids::bgbfield::dBGBZVOLdx)); +// iDerivativesBVOL.push_back(std::make_pair(bvolderivatives::dBGBZVOLdy, fsgrids::bgbfield::dBGBZVOLdy)); - // Distribute data from the transfer buffer back into the appropriate mpiGrid places - // Disregard DO_NOT_COMPUTE cells - #pragma omp parallel for - for(uint i = 0; i < cells.size(); ++i) { +// // Distribute data from the transfer buffer back into the appropriate mpiGrid places +// // Disregard DO_NOT_COMPUTE cells +// #pragma omp parallel for +// for(uint i = 0; i < cells.size(); ++i) { - const CellID dccrgId = cells[i]; - auto cellParams = mpiGrid[dccrgId]->get_cell_parameters(); +// const CellID dccrgId = cells[i]; +// auto cellParams = mpiGrid[dccrgId]->get_cell_parameters(); - // Calculate the number of fsgrid cells we loop through - cint nCells = pow(pow(2,mpiGrid.mapping.get_maximum_refinement_level() - mpiGrid.mapping.get_refinement_level(dccrgId)),3); - // Count the number of fsgrid cells we need to average into the current dccrg cell - int nCellsToSum = 0; +// // Calculate the number of fsgrid cells we loop through +// cint nCells = pow(pow(2,mpiGrid.mapping.get_maximum_refinement_level() - mpiGrid.mapping.get_refinement_level(dccrgId)),3); +// // Count the number of fsgrid cells we need to average into the current dccrg cell +// int nCellsToSum = 0; - // TODO: Could optimize here by adding a separate branch for nCells == 1 with direct assignment of the value - // Could also do the average in a temporary value and only access grid structure once. +// // TODO: Could optimize here by adding a separate branch for nCells == 1 with direct assignment of the value +// // Could also do the average in a temporary value and only access grid structure once. - // Initialize values to 0 - for (auto j : iCellParams) cellParams[j.first] = 0.0; - for (auto j : iDerivatives) mpiGrid[dccrgId]->derivatives[j.first] = 0.0; - for (auto j : iDerivativesBVOL) mpiGrid[dccrgId]->derivativesBVOL[j.first] = 0.0; +// // Initialize values to 0 +// for (auto j : iCellParams) cellParams[j.first] = 0.0; +// for (auto j : iDerivatives) mpiGrid[dccrgId]->derivatives[j.first] = 0.0; +// for (auto j : iDerivativesBVOL) mpiGrid[dccrgId]->derivativesBVOL[j.first] = 0.0; - for(int iCell = 0; iCell < nCells; ++iCell) { - // The fsgrid cells that cover the i'th dccrg cell are pointed at by - // transferBufferPointer[i] ... transferBufferPointer[i] + nCell. - // We want to average over those who are not DO_NOT_COMPUTE to get the value for the dccrg cell - if ((transferBufferPointerTechnical[i] + iCell)->sysBoundaryFlag == sysboundarytype::DO_NOT_COMPUTE) { - continue; - } else { - nCellsToSum++; +// for(int iCell = 0; iCell < nCells; ++iCell) { +// // The fsgrid cells that cover the i'th dccrg cell are pointed at by +// // transferBufferPointer[i] ... transferBufferPointer[i] + nCell. +// // We want to average over those who are not DO_NOT_COMPUTE to get the value for the dccrg cell +// if ((transferBufferPointerTechnical[i] + iCell)->sysBoundaryFlag == sysboundarytype::DO_NOT_COMPUTE) { +// continue; +// } else { +// nCellsToSum++; - std::array* thisCellData = transferBufferPointerBGB[i] + iCell; +// std::array* thisCellData = transferBufferPointerBGB[i] + iCell; - for (auto j : iCellParams) cellParams[j.first] += thisCellData->at(j.second); - for (auto j : iDerivatives) mpiGrid[dccrgId]->derivatives[j.first] += thisCellData->at(j.second); - for (auto j : iDerivativesBVOL) mpiGrid[dccrgId]->derivativesBVOL[j.first] += thisCellData->at(j.second); - } - } +// for (auto j : iCellParams) cellParams[j.first] += thisCellData->at(j.second); +// for (auto j : iDerivatives) mpiGrid[dccrgId]->derivatives[j.first] += thisCellData->at(j.second); +// for (auto j : iDerivativesBVOL) mpiGrid[dccrgId]->derivativesBVOL[j.first] += thisCellData->at(j.second); +// } +// } - if (nCellsToSum > 0) { - // Divide by the number of cells to get the average - for (auto j : iCellParams) cellParams[j.first] /= nCellsToSum; - for (auto j : iDerivatives) mpiGrid[dccrgId]->derivatives[j.first] /= nCellsToSum; - for (auto j : iDerivativesBVOL) mpiGrid[dccrgId]->derivativesBVOL[j.first] /= nCellsToSum; - } - } -} - - -void getDerivativesFromFsGrid( - FsGrid< std::array, 2>& dperbGrid, - FsGrid< std::array, 2>& dmomentsGrid, - FsGrid< fsgrids::technical, 2>& technicalGrid, - dccrg::Dccrg& mpiGrid, - const std::vector& cells -) { - - // Setup transfer buffers - cint nCellsOnMaxRefLvl = getNumberOfCellsOnMaxRefLvl(mpiGrid, cells); - std::vector< std::array > dperbTransferBuffer(nCellsOnMaxRefLvl); - std::vector< std::array > dmomentsTransferBuffer(nCellsOnMaxRefLvl); +// if (nCellsToSum > 0) { +// // Divide by the number of cells to get the average +// for (auto j : iCellParams) cellParams[j.first] /= nCellsToSum; +// for (auto j : iDerivatives) mpiGrid[dccrgId]->derivatives[j.first] /= nCellsToSum; +// for (auto j : iDerivativesBVOL) mpiGrid[dccrgId]->derivativesBVOL[j.first] /= nCellsToSum; +// } +// } +// } + + +// void getDerivativesFromFsGrid( +// FsGrid< std::array, 2>& dperbGrid, +// FsGrid< std::array, 2>& dmomentsGrid, +// FsGrid< fsgrids::technical, 2>& technicalGrid, +// dccrg::Dccrg& mpiGrid, +// const std::vector& cells +// ) { + +// // Setup transfer buffers +// cint nCellsOnMaxRefLvl = getNumberOfCellsOnMaxRefLvl(mpiGrid, cells); +// std::vector< std::array > dperbTransferBuffer(nCellsOnMaxRefLvl); +// std::vector< std::array > dmomentsTransferBuffer(nCellsOnMaxRefLvl); - std::vector< std::array*> dperbTransferBufferPointer; - std::vector< std::array*> dmomentsTransferBufferPointer; +// std::vector< std::array*> dperbTransferBufferPointer; +// std::vector< std::array*> dmomentsTransferBufferPointer; - std::vector< fsgrids::technical > transferBufferTechnical(nCellsOnMaxRefLvl); - std::vector< fsgrids::technical*> transferBufferPointerTechnical; +// std::vector< fsgrids::technical > transferBufferTechnical(nCellsOnMaxRefLvl); +// std::vector< fsgrids::technical*> transferBufferPointerTechnical; - dperbGrid.setupForTransferOut(nCellsOnMaxRefLvl); - dmomentsGrid.setupForTransferOut(nCellsOnMaxRefLvl); - technicalGrid.setupForTransferOut(nCellsOnMaxRefLvl); +// dperbGrid.setupForTransferOut(nCellsOnMaxRefLvl); +// dmomentsGrid.setupForTransferOut(nCellsOnMaxRefLvl); +// technicalGrid.setupForTransferOut(nCellsOnMaxRefLvl); - int k = 0; - for (auto dccrgId : cells) { +// int k = 0; +// for (auto dccrgId : cells) { - // Assuming same local size in all fsgrids - const auto fsgridIds = mapDccrgIdToFsGridGlobalID(mpiGrid, dccrgId); - // Store a pointer to the first fsgrid cell that maps to each dccrg Id - dperbTransferBufferPointer.push_back(&dperbTransferBuffer[k]); - dmomentsTransferBufferPointer.push_back(&dmomentsTransferBuffer[k]); - transferBufferPointerTechnical.push_back(&transferBufferTechnical[k]); +// // Assuming same local size in all fsgrids +// const auto fsgridIds = mapDccrgIdToFsGridGlobalID(mpiGrid, dccrgId); +// // Store a pointer to the first fsgrid cell that maps to each dccrg Id +// dperbTransferBufferPointer.push_back(&dperbTransferBuffer[k]); +// dmomentsTransferBufferPointer.push_back(&dmomentsTransferBuffer[k]); +// transferBufferPointerTechnical.push_back(&transferBufferTechnical[k]); - for (auto fsgridId : fsgridIds) { +// for (auto fsgridId : fsgridIds) { - std::array* dperbCellData = &dperbTransferBuffer[k]; - dperbGrid.transferDataOut(fsgridId, dperbCellData); - std::array* dmomentsCellData = &dmomentsTransferBuffer[k]; - dmomentsGrid.transferDataOut(fsgridId, dmomentsCellData); - fsgrids::technical* thisCellDataTechnical = &transferBufferTechnical[k]; - technicalGrid.transferDataOut(fsgridId, thisCellDataTechnical); - k++; - } - } +// std::array* dperbCellData = &dperbTransferBuffer[k]; +// dperbGrid.transferDataOut(fsgridId, dperbCellData); +// std::array* dmomentsCellData = &dmomentsTransferBuffer[k]; +// dmomentsGrid.transferDataOut(fsgridId, dmomentsCellData); +// fsgrids::technical* thisCellDataTechnical = &transferBufferTechnical[k]; +// technicalGrid.transferDataOut(fsgridId, thisCellDataTechnical); +// k++; +// } +// } - // Do the transfer - dperbGrid.finishTransfersOut(); - dmomentsGrid.finishTransfersOut(); - technicalGrid.finishTransfersOut(); - - std::vector> iDmoments; - std::vector> iDperb; - iDmoments.reserve(24); - iDmoments.push_back(std::make_pair(fieldsolver::drhomdx, fsgrids::dmoments::drhomdx)); - iDmoments.push_back(std::make_pair(fieldsolver::drhomdy, fsgrids::dmoments::drhomdy)); - iDmoments.push_back(std::make_pair(fieldsolver::drhomdz, fsgrids::dmoments::drhomdz)); - iDmoments.push_back(std::make_pair(fieldsolver::drhoqdx, fsgrids::dmoments::drhoqdx)); - iDmoments.push_back(std::make_pair(fieldsolver::drhoqdy, fsgrids::dmoments::drhoqdy)); - iDmoments.push_back(std::make_pair(fieldsolver::drhoqdz, fsgrids::dmoments::drhoqdz)); - iDmoments.push_back(std::make_pair(fieldsolver::dp11dx , fsgrids::dmoments::dp11dx )); - iDmoments.push_back(std::make_pair(fieldsolver::dp11dy , fsgrids::dmoments::dp11dy )); - iDmoments.push_back(std::make_pair(fieldsolver::dp11dz , fsgrids::dmoments::dp11dz )); - iDmoments.push_back(std::make_pair(fieldsolver::dp22dx , fsgrids::dmoments::dp22dx )); - iDmoments.push_back(std::make_pair(fieldsolver::dp22dy , fsgrids::dmoments::dp22dy )); - iDmoments.push_back(std::make_pair(fieldsolver::dp22dz , fsgrids::dmoments::dp22dz )); - iDmoments.push_back(std::make_pair(fieldsolver::dp33dx , fsgrids::dmoments::dp33dx )); - iDmoments.push_back(std::make_pair(fieldsolver::dp33dy , fsgrids::dmoments::dp33dy )); - iDmoments.push_back(std::make_pair(fieldsolver::dp33dz , fsgrids::dmoments::dp33dz )); - iDmoments.push_back(std::make_pair(fieldsolver::dVxdx , fsgrids::dmoments::dVxdx )); - iDmoments.push_back(std::make_pair(fieldsolver::dVxdy , fsgrids::dmoments::dVxdy )); - iDmoments.push_back(std::make_pair(fieldsolver::dVxdz , fsgrids::dmoments::dVxdz )); - iDmoments.push_back(std::make_pair(fieldsolver::dVydx , fsgrids::dmoments::dVydx )); - iDmoments.push_back(std::make_pair(fieldsolver::dVydy , fsgrids::dmoments::dVydy )); - iDmoments.push_back(std::make_pair(fieldsolver::dVydz , fsgrids::dmoments::dVydz )); - iDmoments.push_back(std::make_pair(fieldsolver::dVzdx , fsgrids::dmoments::dVzdx )); - iDmoments.push_back(std::make_pair(fieldsolver::dVzdy , fsgrids::dmoments::dVzdy )); - iDmoments.push_back(std::make_pair(fieldsolver::dVzdz , fsgrids::dmoments::dVzdz )); - - iDperb.reserve(15); - iDperb.push_back(std::make_pair(fieldsolver::dPERBxdy , fsgrids::dperb::dPERBxdy )); - iDperb.push_back(std::make_pair(fieldsolver::dPERBxdz , fsgrids::dperb::dPERBxdz )); - iDperb.push_back(std::make_pair(fieldsolver::dPERBydx , fsgrids::dperb::dPERBydx )); - iDperb.push_back(std::make_pair(fieldsolver::dPERBydz , fsgrids::dperb::dPERBydz )); - iDperb.push_back(std::make_pair(fieldsolver::dPERBzdx , fsgrids::dperb::dPERBzdx )); - iDperb.push_back(std::make_pair(fieldsolver::dPERBzdy , fsgrids::dperb::dPERBzdy )); - iDperb.push_back(std::make_pair(fieldsolver::dPERBxdyy, fsgrids::dperb::dPERBxdyy)); - iDperb.push_back(std::make_pair(fieldsolver::dPERBxdzz, fsgrids::dperb::dPERBxdzz)); - iDperb.push_back(std::make_pair(fieldsolver::dPERBydxx, fsgrids::dperb::dPERBydxx)); - iDperb.push_back(std::make_pair(fieldsolver::dPERBydzz, fsgrids::dperb::dPERBydzz)); - iDperb.push_back(std::make_pair(fieldsolver::dPERBzdxx, fsgrids::dperb::dPERBzdxx)); - iDperb.push_back(std::make_pair(fieldsolver::dPERBzdyy, fsgrids::dperb::dPERBzdyy)); - iDperb.push_back(std::make_pair(fieldsolver::dPERBxdyz, fsgrids::dperb::dPERBxdyz)); - iDperb.push_back(std::make_pair(fieldsolver::dPERBydxz, fsgrids::dperb::dPERBydxz)); - iDperb.push_back(std::make_pair(fieldsolver::dPERBzdxy, fsgrids::dperb::dPERBzdxy)); +// // Do the transfer +// dperbGrid.finishTransfersOut(); +// dmomentsGrid.finishTransfersOut(); +// technicalGrid.finishTransfersOut(); + +// std::vector> iDmoments; +// std::vector> iDperb; +// iDmoments.reserve(24); +// iDmoments.push_back(std::make_pair(fieldsolver::drhomdx, fsgrids::dmoments::drhomdx)); +// iDmoments.push_back(std::make_pair(fieldsolver::drhomdy, fsgrids::dmoments::drhomdy)); +// iDmoments.push_back(std::make_pair(fieldsolver::drhomdz, fsgrids::dmoments::drhomdz)); +// iDmoments.push_back(std::make_pair(fieldsolver::drhoqdx, fsgrids::dmoments::drhoqdx)); +// iDmoments.push_back(std::make_pair(fieldsolver::drhoqdy, fsgrids::dmoments::drhoqdy)); +// iDmoments.push_back(std::make_pair(fieldsolver::drhoqdz, fsgrids::dmoments::drhoqdz)); +// iDmoments.push_back(std::make_pair(fieldsolver::dp11dx , fsgrids::dmoments::dp11dx )); +// iDmoments.push_back(std::make_pair(fieldsolver::dp11dy , fsgrids::dmoments::dp11dy )); +// iDmoments.push_back(std::make_pair(fieldsolver::dp11dz , fsgrids::dmoments::dp11dz )); +// iDmoments.push_back(std::make_pair(fieldsolver::dp22dx , fsgrids::dmoments::dp22dx )); +// iDmoments.push_back(std::make_pair(fieldsolver::dp22dy , fsgrids::dmoments::dp22dy )); +// iDmoments.push_back(std::make_pair(fieldsolver::dp22dz , fsgrids::dmoments::dp22dz )); +// iDmoments.push_back(std::make_pair(fieldsolver::dp33dx , fsgrids::dmoments::dp33dx )); +// iDmoments.push_back(std::make_pair(fieldsolver::dp33dy , fsgrids::dmoments::dp33dy )); +// iDmoments.push_back(std::make_pair(fieldsolver::dp33dz , fsgrids::dmoments::dp33dz )); +// iDmoments.push_back(std::make_pair(fieldsolver::dVxdx , fsgrids::dmoments::dVxdx )); +// iDmoments.push_back(std::make_pair(fieldsolver::dVxdy , fsgrids::dmoments::dVxdy )); +// iDmoments.push_back(std::make_pair(fieldsolver::dVxdz , fsgrids::dmoments::dVxdz )); +// iDmoments.push_back(std::make_pair(fieldsolver::dVydx , fsgrids::dmoments::dVydx )); +// iDmoments.push_back(std::make_pair(fieldsolver::dVydy , fsgrids::dmoments::dVydy )); +// iDmoments.push_back(std::make_pair(fieldsolver::dVydz , fsgrids::dmoments::dVydz )); +// iDmoments.push_back(std::make_pair(fieldsolver::dVzdx , fsgrids::dmoments::dVzdx )); +// iDmoments.push_back(std::make_pair(fieldsolver::dVzdy , fsgrids::dmoments::dVzdy )); +// iDmoments.push_back(std::make_pair(fieldsolver::dVzdz , fsgrids::dmoments::dVzdz )); + +// iDperb.reserve(15); +// iDperb.push_back(std::make_pair(fieldsolver::dPERBxdy , fsgrids::dperb::dPERBxdy )); +// iDperb.push_back(std::make_pair(fieldsolver::dPERBxdz , fsgrids::dperb::dPERBxdz )); +// iDperb.push_back(std::make_pair(fieldsolver::dPERBydx , fsgrids::dperb::dPERBydx )); +// iDperb.push_back(std::make_pair(fieldsolver::dPERBydz , fsgrids::dperb::dPERBydz )); +// iDperb.push_back(std::make_pair(fieldsolver::dPERBzdx , fsgrids::dperb::dPERBzdx )); +// iDperb.push_back(std::make_pair(fieldsolver::dPERBzdy , fsgrids::dperb::dPERBzdy )); +// iDperb.push_back(std::make_pair(fieldsolver::dPERBxdyy, fsgrids::dperb::dPERBxdyy)); +// iDperb.push_back(std::make_pair(fieldsolver::dPERBxdzz, fsgrids::dperb::dPERBxdzz)); +// iDperb.push_back(std::make_pair(fieldsolver::dPERBydxx, fsgrids::dperb::dPERBydxx)); +// iDperb.push_back(std::make_pair(fieldsolver::dPERBydzz, fsgrids::dperb::dPERBydzz)); +// iDperb.push_back(std::make_pair(fieldsolver::dPERBzdxx, fsgrids::dperb::dPERBzdxx)); +// iDperb.push_back(std::make_pair(fieldsolver::dPERBzdyy, fsgrids::dperb::dPERBzdyy)); +// iDperb.push_back(std::make_pair(fieldsolver::dPERBxdyz, fsgrids::dperb::dPERBxdyz)); +// iDperb.push_back(std::make_pair(fieldsolver::dPERBydxz, fsgrids::dperb::dPERBydxz)); +// iDperb.push_back(std::make_pair(fieldsolver::dPERBzdxy, fsgrids::dperb::dPERBzdxy)); - // Distribute data from the transfer buffers back into the appropriate mpiGrid places - // Disregard DO_NOT_COMPUTE cells - #pragma omp parallel for - for(uint i = 0; i < cells.size(); ++i) { +// // Distribute data from the transfer buffers back into the appropriate mpiGrid places +// // Disregard DO_NOT_COMPUTE cells +// #pragma omp parallel for +// for(uint i = 0; i < cells.size(); ++i) { - const CellID dccrgId = cells[i]; +// const CellID dccrgId = cells[i]; - // Calculate the number of fsgrid cells we loop through - cint nCells = pow(pow(2,mpiGrid.mapping.get_maximum_refinement_level() - mpiGrid.mapping.get_refinement_level(dccrgId)),3); - // Count the number of fsgrid cells we need to average into the current dccrg cell - int nCellsToSum = 0; +// // Calculate the number of fsgrid cells we loop through +// cint nCells = pow(pow(2,mpiGrid.mapping.get_maximum_refinement_level() - mpiGrid.mapping.get_refinement_level(dccrgId)),3); +// // Count the number of fsgrid cells we need to average into the current dccrg cell +// int nCellsToSum = 0; - for (auto j : iDmoments) mpiGrid[dccrgId]->derivatives[j.first] = 0.0; - for (auto j : iDperb ) mpiGrid[dccrgId]->derivatives[j.first] = 0.0; +// for (auto j : iDmoments) mpiGrid[dccrgId]->derivatives[j.first] = 0.0; +// for (auto j : iDperb ) mpiGrid[dccrgId]->derivatives[j.first] = 0.0; - for(int iCell = 0; iCell < nCells; ++iCell) { - // The fsgrid cells that cover the i'th dccrg cell are pointed at by - // transferBufferPointer[i] ... transferBufferPointer[i] + nCell. - // We want to average over those who are not DO_NOT_COMPUTE to get the value for the dccrg cell - if ((transferBufferPointerTechnical[i] + iCell)->sysBoundaryFlag == sysboundarytype::DO_NOT_COMPUTE) { - continue; - } else { - nCellsToSum++; +// for(int iCell = 0; iCell < nCells; ++iCell) { +// // The fsgrid cells that cover the i'th dccrg cell are pointed at by +// // transferBufferPointer[i] ... transferBufferPointer[i] + nCell. +// // We want to average over those who are not DO_NOT_COMPUTE to get the value for the dccrg cell +// if ((transferBufferPointerTechnical[i] + iCell)->sysBoundaryFlag == sysboundarytype::DO_NOT_COMPUTE) { +// continue; +// } else { +// nCellsToSum++; - std::array* dperb = dperbTransferBufferPointer[i] + iCell; - std::array* dmoments = dmomentsTransferBufferPointer[i] + iCell; +// std::array* dperb = dperbTransferBufferPointer[i] + iCell; +// std::array* dmoments = dmomentsTransferBufferPointer[i] + iCell; - for (auto j : iDmoments) mpiGrid[dccrgId]->derivatives[j.first] += dmoments->at(j.second); - for (auto j : iDperb ) mpiGrid[dccrgId]->derivatives[j.first] += dperb ->at(j.second); - } - } +// for (auto j : iDmoments) mpiGrid[dccrgId]->derivatives[j.first] += dmoments->at(j.second); +// for (auto j : iDperb ) mpiGrid[dccrgId]->derivatives[j.first] += dperb ->at(j.second); +// } +// } - if (nCellsToSum > 0) { - for (auto j : iDmoments) mpiGrid[dccrgId]->derivatives[j.first] /= nCellsToSum; - for (auto j : iDperb ) mpiGrid[dccrgId]->derivatives[j.first] /= nCellsToSum; - } - } -} +// if (nCellsToSum > 0) { +// for (auto j : iDmoments) mpiGrid[dccrgId]->derivatives[j.first] /= nCellsToSum; +// for (auto j : iDperb ) mpiGrid[dccrgId]->derivatives[j.first] /= nCellsToSum; +// } +// } +// } /* diff --git a/grid.cpp b/grid.cpp index e6a8b470d..8cb324b47 100644 --- a/grid.cpp +++ b/grid.cpp @@ -290,24 +290,24 @@ void initializeGrids( phiprof::stop("Init moments"); } - phiprof::start("Initial fsgrid coupling"); - // Couple FSGrids to mpiGrid. Note that the coupling information is shared - // between them. - technicalGrid.setupForGridCoupling(cells.size()); + // phiprof::start("Initial fsgrid coupling"); + // // Couple FSGrids to mpiGrid. Note that the coupling information is shared + // // between them. + // technicalGrid.setupForGridCoupling(cells.size()); - // Each dccrg cell may have to communicate with multiple fsgrid cells, if they are on a lower refinement level. - // Calculate the corresponding fsgrid ids for each dccrg cell and set coupling for each fsgrid id. - for(auto& dccrgId : cells) { - const auto fsgridIds = mapDccrgIdToFsGridGlobalID(mpiGrid, dccrgId); + // // Each dccrg cell may have to communicate with multiple fsgrid cells, if they are on a lower refinement level. + // // Calculate the corresponding fsgrid ids for each dccrg cell and set coupling for each fsgrid id. + // for(auto& dccrgId : cells) { + // const auto fsgridIds = mapDccrgIdToFsGridGlobalID(mpiGrid, dccrgId); - for (auto fsgridId : fsgridIds) { + // for (auto fsgridId : fsgridIds) { - technicalGrid.setGridCoupling(fsgridId, myRank); - } - } + // technicalGrid.setGridCoupling(fsgridId, myRank); + // } + // } - technicalGrid.finishGridCoupling(); - phiprof::stop("Initial fsgrid coupling"); + // technicalGrid.finishGridCoupling(); + // phiprof::stop("Initial fsgrid coupling"); phiprof::start("setProjectBField"); project.setProjectBField(perBGrid, BgBGrid, technicalGrid); @@ -323,8 +323,8 @@ void initializeGrids( phiprof::stop("getFieldsFromFsGrid"); phiprof::start("Finish fsgrid setup"); - getFieldDataFromFsGrid(perBGrid, technicalGrid, mpiGrid, cells, CellParams::PERBX); - getBgFieldsAndDerivativesFromFsGrid(BgBGrid, technicalGrid, mpiGrid, cells); + // getFieldDataFromFsGrid(perBGrid, technicalGrid, mpiGrid, cells, CellParams::PERBX); + // getBgFieldsAndDerivativesFromFsGrid(BgBGrid, technicalGrid, mpiGrid, cells); // WARNING this means moments and dt2 moments are the same here. feedMomentsIntoFsGrid(mpiGrid, cells, momentsGrid,false); diff --git a/iowrite.cpp b/iowrite.cpp index b8dd249e7..9bfd95a8c 100644 --- a/iowrite.cpp +++ b/iowrite.cpp @@ -1270,7 +1270,7 @@ bool writeRestart(dccrg::Dccrg& mpiGrid, phiprof::start("reduceddataIO"); //write out DROs we need for restarts DataReducer restartReducer; - restartReducer.addOperator(new DRO::DataReductionOperatorCellParams("background_B",CellParams::BGBX,3)); + //restartReducer.addOperator(new DRO::DataReductionOperatorCellParams("background_B",CellParams::BGBX,3)); restartReducer.addOperator(new DRO::DataReductionOperatorCellParams("perturbed_B",CellParams::PERBX,3)); restartReducer.addOperator(new DRO::DataReductionOperatorCellParams("moments",CellParams::RHOM,5)); restartReducer.addOperator(new DRO::DataReductionOperatorCellParams("moments_dt2",CellParams::RHOM_DT2,5)); @@ -1284,7 +1284,7 @@ bool writeRestart(dccrg::Dccrg& mpiGrid, restartReducer.addOperator(new DRO::DataReductionOperatorCellParams("max_v_dt",CellParams::MAXVDT,1)); restartReducer.addOperator(new DRO::DataReductionOperatorCellParams("max_r_dt",CellParams::MAXRDT,1)); restartReducer.addOperator(new DRO::DataReductionOperatorCellParams("max_fields_dt",CellParams::MAXFDT,1)); - restartReducer.addOperator(new DRO::DataReductionOperatorDerivatives("derivatives",0,fieldsolver::N_SPATIAL_CELL_DERIVATIVES)); + // restartReducer.addOperator(new DRO::DataReductionOperatorDerivatives("derivatives",0,fieldsolver::N_SPATIAL_CELL_DERIVATIVES)); restartReducer.addOperator(new DRO::DataReductionOperatorBVOLDerivatives("Bvolume_derivatives",0,bvolderivatives::N_BVOL_DERIVATIVES)); restartReducer.addOperator(new DRO::MPIrank); restartReducer.addOperator(new DRO::BoundaryType); diff --git a/spatial_cell.cpp b/spatial_cell.cpp index 632478ad0..b54652cbe 100644 --- a/spatial_cell.cpp +++ b/spatial_cell.cpp @@ -48,10 +48,10 @@ namespace spatial_cell { this->parameters[i]=0.0; } - // reset spatial cell derivatives - for (unsigned int i = 0; i < fieldsolver::N_SPATIAL_CELL_DERIVATIVES; i++) { - this->derivatives[i]=0; - } + // // reset spatial cell derivatives + // for (unsigned int i = 0; i < fieldsolver::N_SPATIAL_CELL_DERIVATIVES; i++) { + // this->derivatives[i]=0; + // } // reset BVOL derivatives for (unsigned int i = 0; i < bvolderivatives::N_BVOL_DERIVATIVES; i++) { @@ -87,7 +87,7 @@ namespace spatial_cell { mpiTransferEnabled(other.mpiTransferEnabled), populations(other.populations), parameters(other.parameters), - derivatives(other.derivatives), + // derivatives(other.derivatives), derivativesBVOL(other.derivativesBVOL), null_block_data(std::array {}) { } @@ -735,11 +735,11 @@ namespace spatial_cell { block_lengths.push_back(sizeof(Real)); } - // send spatial cell derivatives - if ((SpatialCell::mpi_transfer_type & Transfer::CELL_DERIVATIVES)!=0){ - displacements.push_back((uint8_t*) &(this->derivatives[0]) - (uint8_t*) this); - block_lengths.push_back(sizeof(Real) * fieldsolver::N_SPATIAL_CELL_DERIVATIVES); - } + // // send spatial cell derivatives + // if ((SpatialCell::mpi_transfer_type & Transfer::CELL_DERIVATIVES)!=0){ + // displacements.push_back((uint8_t*) &(this->derivatives[0]) - (uint8_t*) this); + // block_lengths.push_back(sizeof(Real) * fieldsolver::N_SPATIAL_CELL_DERIVATIVES); + // } // send spatial cell BVOL derivatives if ((SpatialCell::mpi_transfer_type & Transfer::CELL_BVOL_DERIVATIVES)!=0){ @@ -752,11 +752,11 @@ namespace spatial_cell { block_lengths.push_back(sizeof(uint64_t)); } - // send Hall term components - if ((SpatialCell::mpi_transfer_type & Transfer::CELL_HALL_TERM)!=0){ - displacements.push_back((uint8_t*) &(this->parameters[CellParams::EXHALL_000_100]) - (uint8_t*) this); - block_lengths.push_back(sizeof(Real) * 12); - } + // // send Hall term components + // if ((SpatialCell::mpi_transfer_type & Transfer::CELL_HALL_TERM)!=0){ + // displacements.push_back((uint8_t*) &(this->parameters[CellParams::EXHALL_000_100]) - (uint8_t*) this); + // block_lengths.push_back(sizeof(Real) * 12); + // } // send electron pressure gradient term components if ((SpatialCell::mpi_transfer_type & Transfer::CELL_GRADPE_TERM)!=0){ displacements.push_back((uint8_t*) &(this->parameters[CellParams::EXGRADPE]) - (uint8_t*) this); diff --git a/spatial_cell.hpp b/spatial_cell.hpp index 6bc7e00f0..388055601 100644 --- a/spatial_cell.hpp +++ b/spatial_cell.hpp @@ -317,7 +317,7 @@ namespace spatial_cell { // Member variables // //Real derivatives[fieldsolver::N_SPATIAL_CELL_DERIVATIVES]; /**< Derivatives of bulk variables in this spatial cell.*/ - std::array derivatives; /**< Derivatives of bulk variables in this spatial cell.*/ + //std::array derivatives; /**< Derivatives of bulk variables in this spatial cell.*/ //Real derivativesBVOL[bvolderivatives::N_BVOL_DERIVATIVES]; /**< Derivatives of BVOL needed by the acceleration. // * Separate array because it does not need to be communicated.*/ std::array derivativesBVOL; /**< Derivatives of BVOL needed by the acceleration. diff --git a/vlasiator.cpp b/vlasiator.cpp index cab0b9b37..1158918dc 100644 --- a/vlasiator.cpp +++ b/vlasiator.cpp @@ -548,13 +548,6 @@ int main(int argn,char* args[]) { // Save restart data if (P::writeInitialState) { phiprof::start("write-initial-state"); - // phiprof::start("fsgrid-coupling-out"); - // getFieldDataFromFsGrid(perBGrid,technicalGrid,mpiGrid,cells,CellParams::PERBX); - // getFieldDataFromFsGrid(EGrid,technicalGrid,mpiGrid,cells,CellParams::EX); - // getFieldDataFromFsGrid(EHallGrid,technicalGrid,mpiGrid,cells,CellParams::EXHALL_000_100); - // getFieldDataFromFsGrid(EGradPeGrid,technicalGrid,mpiGrid,cells,CellParams::EXGRADPE); - // getDerivativesFromFsGrid(dPerBGrid, dMomentsGrid, technicalGrid, mpiGrid, cells); - // phiprof::stop("fsgrid-coupling-out"); if (myRank == MASTER_RANK) logFile << "(IO): Writing initial state to disk, tstep = " << endl << writeVerbose; @@ -737,14 +730,16 @@ int main(int argn,char* args[]) { it != P::diagnosticVariableList.end(); it++) { if (*it == "FluxB") { - phiprof::start("fsgrid-coupling-out"); - getFieldDataFromFsGrid(perBGrid,technicalGrid,mpiGrid,cells,CellParams::PERBX); - phiprof::stop("fsgrid-coupling-out"); - } + if(myRank == MASTER_RANK) cerr << __FILE__ << " " << __LINE__ << "ERROR: Diagnostic output from FsGrid is no longer supported!" << endl; + // phiprof::start("fsgrid-coupling-out"); + // getFieldDataFromFsGrid(perBGrid,technicalGrid,mpiGrid,cells,CellParams::PERBX); + // phiprof::stop("fsgrid-coupling-out"); + } if (*it == "FluxE") { - phiprof::start("fsgrid-coupling-out"); - getFieldDataFromFsGrid(EGrid,technicalGrid,mpiGrid,cells,CellParams::EX); - phiprof::stop("fsgrid-coupling-out"); + if(myRank == MASTER_RANK) cerr << __FILE__ << " " << __LINE__ << "ERROR: Diagnostic output from FsGrid is no longer supported!" << endl; + // phiprof::start("fsgrid-coupling-out"); + // getFieldDataFromFsGrid(EGrid,technicalGrid,mpiGrid,cells,CellParams::EX); + // phiprof::stop("fsgrid-coupling-out"); } } @@ -760,42 +755,42 @@ int main(int argn,char* args[]) { // write system, loop through write classes for (uint i = 0; i < P::systemWriteTimeInterval.size(); i++) { if (P::systemWriteTimeInterval[i] >= 0.0 && - P::t >= P::systemWrites[i] * P::systemWriteTimeInterval[i] - DT_EPSILON) { - if (extractFsGridFields) { - vector::const_iterator it; - for (it = P::outputVariableList.begin(); - it != P::outputVariableList.end(); - it++) { - if (*it == "B" || - *it == "PerturbedB" - ) { - phiprof::start("fsgrid-coupling-out"); - getFieldDataFromFsGrid(perBGrid,technicalGrid,mpiGrid,cells,CellParams::PERBX); - phiprof::stop("fsgrid-coupling-out"); - } - if (*it == "E") { - phiprof::start("fsgrid-coupling-out"); - getFieldDataFromFsGrid(EGrid,technicalGrid,mpiGrid,cells,CellParams::EX); - phiprof::stop("fsgrid-coupling-out"); - } - if (*it == "HallE") { - phiprof::start("fsgrid-coupling-out"); - getFieldDataFromFsGrid(EHallGrid,technicalGrid,mpiGrid,cells,CellParams::EXHALL_000_100); - phiprof::stop("fsgrid-coupling-out"); - } - if (*it == "GradPeE") { - phiprof::start("fsgrid-coupling-out"); - getFieldDataFromFsGrid(EGradPeGrid,technicalGrid,mpiGrid,cells,CellParams::EXGRADPE); - phiprof::stop("fsgrid-coupling-out"); - } - if (*it == "derivs") { - phiprof::start("fsgrid-coupling-out"); - getDerivativesFromFsGrid(dPerBGrid, dMomentsGrid, technicalGrid, mpiGrid, cells); - phiprof::stop("fsgrid-coupling-out"); - } - } - extractFsGridFields = false; - } + P::t >= P::systemWrites[i] * P::systemWriteTimeInterval[i] - DT_EPSILON) { + // if (extractFsGridFields) { + // vector::const_iterator it; + // for (it = P::outputVariableList.begin(); + // it != P::outputVariableList.end(); + // it++) { + // if (*it == "B" || + // *it == "PerturbedB" + // ) { + // phiprof::start("fsgrid-coupling-out"); + // getFieldDataFromFsGrid(perBGrid,technicalGrid,mpiGrid,cells,CellParams::PERBX); + // phiprof::stop("fsgrid-coupling-out"); + // } + // if (*it == "E") { + // phiprof::start("fsgrid-coupling-out"); + // getFieldDataFromFsGrid(EGrid,technicalGrid,mpiGrid,cells,CellParams::EX); + // phiprof::stop("fsgrid-coupling-out"); + // } + // if (*it == "HallE") { + // phiprof::start("fsgrid-coupling-out"); + // getFieldDataFromFsGrid(EHallGrid,technicalGrid,mpiGrid,cells,CellParams::EXHALL_000_100); + // phiprof::stop("fsgrid-coupling-out"); + // } + // if (*it == "GradPeE") { + // phiprof::start("fsgrid-coupling-out"); + // getFieldDataFromFsGrid(EGradPeGrid,technicalGrid,mpiGrid,cells,CellParams::EXGRADPE); + // phiprof::stop("fsgrid-coupling-out"); + // } + // if (*it == "derivs") { + // phiprof::start("fsgrid-coupling-out"); + // getDerivativesFromFsGrid(dPerBGrid, dMomentsGrid, technicalGrid, mpiGrid, cells); + // phiprof::stop("fsgrid-coupling-out"); + // } + // } + // extractFsGridFields = false; + // } phiprof::start("write-system"); logFile << "(IO): Writing spatial cell and reduced system data to disk, tstep = " << P::tstep << " t = " << P::t << endl << writeVerbose; From 947a23ee24697853e1a55b19111f4aa7b28d1d06 Mon Sep 17 00:00:00 2001 From: Tuomas Koskela Date: Tue, 14 May 2019 15:29:17 +0300 Subject: [PATCH 417/602] Deleted commented out code. --- common.h | 86 --------- datareduction/datareducer.cpp | 103 +--------- datareduction/datareductionoperator.cpp | 10 - fieldsolver/gridGlue.cpp | 243 ------------------------ grid.cpp | 21 -- iowrite.cpp | 2 - spatial_cell.cpp | 19 +- spatial_cell.hpp | 4 - vlasiator.cpp | 48 ----- 9 files changed, 6 insertions(+), 530 deletions(-) diff --git a/common.h b/common.h index ef3798ef9..c0245bde6 100644 --- a/common.h +++ b/common.h @@ -133,9 +133,6 @@ namespace CellParams { EX, /*!< Total electric field x-component, averaged over cell edge. Used to propagate BX,BY,BZ.*/ EY, /*!< Total wlectric field y-component, averaged over cell edge. Used to propagate BX,BY,BZ.*/ EZ, /*!< Total electric field z-component, averaged over cell edge. Used to propagate BX,BY,BZ.*/ - /* BGBX, /\*!< Background magnetic field x-component, averaged over cell x-face.*\/ */ - /* BGBY, /\*!< Background magnetic field x-component, averaged over cell x-face.*\/ */ - /* BGBZ, /\*!< Background magnetic field x-component, averaged over cell x-face.*\/ */ PERBX, /*!< Perturbed Magnetic field x-component, averaged over cell x-face. Propagated by field solver.*/ PERBY, /*!< Perturbed Magnetic field y-component, averaged over cell y-face. Propagated by field solver.*/ PERBZ, /*!< Perturbed Magnetic field z-component, averaged over cell z-face. Propagated by field solver.*/ @@ -164,18 +161,6 @@ namespace CellParams { EXVOL, /*!< Ex averaged over spatial cell.*/ EYVOL, /*!< Ey averaged over spatial cell.*/ EZVOL, /*!< Ez averaged over spatial cell.*/ - /* EXHALL_000_100, /\*!< Hall term x averaged along x on -y/-z edge of spatial cell.*\/ */ - /* EYHALL_000_010, /\*!< Hall term y averaged along y on -x/-z edge of spatial cell.*\/ */ - /* EZHALL_000_001, /\*!< Hall term z averaged along z on -x/-y edge of spatial cell.*\/ */ - /* EYHALL_100_110, /\*!< Hall term y averaged along y on +x/-z edge of spatial cell.*\/ */ - /* EZHALL_100_101, /\*!< Hall term z averaged along z on +x/-y edge of spatial cell.*\/ */ - /* EXHALL_010_110, /\*!< Hall term x averaged along x on +y/-z edge of spatial cell.*\/ */ - /* EZHALL_010_011, /\*!< Hall term z averaged along z on +y/-x edge of spatial cell.*\/ */ - /* EZHALL_110_111, /\*!< Hall term z averaged along z on +x/+y edge of spatial cell.*\/ */ - /* EXHALL_001_101, /\*!< Hall term x averaged along x on -y/+z edge of spatial cell.*\/ */ - /* EYHALL_001_011, /\*!< Hall term y averaged along y on -x/+z edge of spatial cell.*\/ */ - /* EYHALL_101_111, /\*!< Hall term y averaged along y on +x/+z edge of spatial cell.*\/ */ - /* EXHALL_011_111, /\*!< Hall term x averaged along x on +y/+z edge of spatial cell.*\/ */ EXGRADPE, /*!< Electron pressure gradient term x.*/ EYGRADPE, /*!< Electron pressure gradient term y.*/ EZGRADPE, /*!< Electron pressure gradient term z.*/ @@ -223,77 +208,6 @@ namespace CellParams { }; } -/*! Namespace fieldsolver contains indices into arrays which store - * variables required by the field solver. These quantities are derivatives - * of variables described in namespace CellParams. - * Do not change the order of variables unless you know what you are doing: - * in several places the size of cpu_derivatives array in cell_spatial is calculated - * as fieldsolver::dVzdz+1. - */ -namespace fieldsolver { - enum { -/* drhomdx, /\*!< Derivative of volume-averaged mass density to x-direction. *\/ */ -/* drhomdy, /\*!< Derivative of volume-averaged mass density to y-direction. *\/ */ -/* drhomdz, /\*!< Derivative of volume-averaged mass density to z-direction. *\/ */ -/* drhoqdx, /\*!< Derivative of volume-averaged charge density to x-direction. *\/ */ -/* drhoqdy, /\*!< Derivative of volume-averaged charge density to y-direction. *\/ */ -/* drhoqdz, /\*!< Derivative of volume-averaged charge density to z-direction. *\/ */ -/* dBGBxdy, /\*!< Derivative of face-averaged Bx to y-direction. *\/ */ -/* dBGBxdz, /\*!< Derivative of face-averaged Bx to z-direction. *\/ */ -/* dBGBydx, /\*!< Derivative of face-averaged By to x-direction. *\/ */ -/* dBGBydz, /\*!< Derivative of face-averaged By to z-direction. *\/ */ -/* dBGBzdx, /\*!< Derivative of face-averaged Bz to x-direction. *\/ */ -/* dBGBzdy, /\*!< Derivative of face-averaged Bz to y-direction. *\/ */ -/* dPERBxdy, /\*!< Derivative of face-averaged Bx to y-direction. *\/ */ -/* dPERBxdz, /\*!< Derivative of face-averaged Bx to z-direction. *\/ */ -/* dPERBydx, /\*!< Derivative of face-averaged By to x-direction. *\/ */ -/* dPERBydz, /\*!< Derivative of face-averaged By to z-direction. *\/ */ -/* dPERBzdx, /\*!< Derivative of face-averaged Bz to x-direction. *\/ */ -/* dPERBzdy, /\*!< Derivative of face-averaged Bz to y-direction. *\/ */ -/* // Insert for Hall term */ -/* // NOTE 2nd derivatives of BGBn are not needed as curl(dipole) = 0.0 */ -/* // will change if BGB is not curl-free */ -/* // dBGBxdyy, /\*!< Second derivative of face-averaged Bx to yy-direction. *\/ */ -/* // dBGBxdzz, /\*!< Second derivative of face-averaged Bx to zz-direction. *\/ */ -/* // dBGBxdyz, /\*!< Second derivative of face-averaged Bx to yz-direction. *\/ */ -/* // dBGBydxx, /\*!< Second derivative of face-averaged By to xx-direction. *\/ */ -/* // dBGBydzz, /\*!< Second derivative of face-averaged By to zz-direction. *\/ */ -/* // dBGBydxz, /\*!< Second derivative of face-averaged By to xz-direction. *\/ */ -/* // dBGBzdxx, /\*!< Second derivative of face-averaged Bz to xx-direction. *\/ */ -/* // dBGBzdyy, /\*!< Second derivative of face-averaged Bz to yy-direction. *\/ */ -/* // dBGBzdxy, /\*!< Second derivative of face-averaged Bz to xy-direction. *\/ */ -/* dPERBxdyy, /\*!< Second derivative of face-averaged Bx to yy-direction. *\/ */ -/* dPERBxdzz, /\*!< Second derivative of face-averaged Bx to zz-direction. *\/ */ -/* dPERBxdyz, /\*!< Second derivative of face-averaged Bx to yz-direction. *\/ */ -/* dPERBydxx, /\*!< Second derivative of face-averaged By to xx-direction. *\/ */ -/* dPERBydzz, /\*!< Second derivative of face-averaged By to zz-direction. *\/ */ -/* dPERBydxz, /\*!< Second derivative of face-averaged By to xz-direction. *\/ */ -/* dPERBzdxx, /\*!< Second derivative of face-averaged Bz to xx-direction. *\/ */ -/* dPERBzdyy, /\*!< Second derivative of face-averaged Bz to yy-direction. *\/ */ -/* dPERBzdxy, /\*!< Second derivative of face-averaged Bz to xy-direction. *\/ */ -/* dp11dx, /\*!< Derivative of P_11 to x direction. *\/ */ -/* dp11dy, /\*!< Derivative of P_11 to x direction. *\/ */ -/* dp11dz, /\*!< Derivative of P_11 to x direction. *\/ */ -/* dp22dx, /\*!< Derivative of P_22 to y direction. *\/ */ -/* dp22dy, /\*!< Derivative of P_22 to y direction. *\/ */ -/* dp22dz, /\*!< Derivative of P_22 to y direction. *\/ */ -/* dp33dx, /\*!< Derivative of P_33 to z direction. *\/ */ -/* dp33dy, /\*!< Derivative of P_33 to z direction. *\/ */ -/* dp33dz, /\*!< Derivative of P_33 to z direction. *\/ */ -/* // End of insert for Hall term */ -/* dVxdx, /\*!< Derivative of volume-averaged Vx to x-direction. *\/ */ -/* dVxdy, /\*!< Derivative of volume-averaged Vx to y-direction. *\/ */ -/* dVxdz, /\*!< Derivative of volume-averaged Vx to z-direction. *\/ */ -/* dVydx, /\*!< Derivative of volume-averaged Vy to x-direction. *\/ */ -/* dVydy, /\*!< Derivative of volume-averaged Vy to y-direction. *\/ */ -/* dVydz, /\*!< Derivative of volume-averaged Vy to z-direction. *\/ */ -/* dVzdx, /\*!< Derivative of volume-averaged Vz to x-direction. *\/ */ -/* dVzdy, /\*!< Derivative of volume-averaged Vz to y-direction. *\/ */ -/* dVzdz, /\*!< Derivative of volume-averaged Vz to z-direction. *\/ */ - N_SPATIAL_CELL_DERIVATIVES - }; -} - /*! The namespace bvolderivatives contains the indices to an array which stores the spatial * derivatives of the volume-averaged magnetic field, needed for Lorentz force. * TODO: Vol values may be removed if background field is curlfree diff --git a/datareduction/datareducer.cpp b/datareduction/datareducer.cpp index 488868830..9f7cb5189 100644 --- a/datareduction/datareducer.cpp +++ b/datareduction/datareducer.cpp @@ -539,27 +539,11 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti )); continue; } - // if(*it == "HallE") { - // // 12 corner components of the hall-effect contribution to the electric field - // outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("EXHALL_000_100",CellParams::EXHALL_000_100,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("EXHALL_001_101",CellParams::EXHALL_001_101,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("EXHALL_010_110",CellParams::EXHALL_010_110,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("EXHALL_011_111",CellParams::EXHALL_011_111,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("EYHALL_000_010",CellParams::EYHALL_000_010,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("EYHALL_001_011",CellParams::EYHALL_001_011,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("EYHALL_100_110",CellParams::EYHALL_100_110,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("EYHALL_101_111",CellParams::EYHALL_101_111,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("EZHALL_000_001",CellParams::EZHALL_000_001,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("EZHALL_010_011",CellParams::EZHALL_010_011,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("EZHALL_100_101",CellParams::EZHALL_100_101,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("EZHALL_110_111",CellParams::EZHALL_110_111,1)); - // continue; - // } - // if(*it =="GradPeE") { - // // Electron pressure gradient contribution to the generalized ohm's law - // outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("EGRADPE",CellParams::EXGRADPE,3)); - // continue; - // } + if(*it =="GradPeE") { + // Electron pressure gradient contribution to the generalized ohm's law + outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("EGRADPE",CellParams::EXGRADPE,3)); + continue; + } if(*it == "VolB" || *it == "vg_VolB") { // Volume-averaged magnetic field outputReducer->addOperator(new DRO::VariableBVol); @@ -651,73 +635,6 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti } continue; } - // if(*it == "derivs") { - // // Derivatives of all quantities that might be of interest - // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("drhomdx",fieldsolver::drhomdx,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("drhomdy",fieldsolver::drhomdy,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("drhomdz",fieldsolver::drhomdz,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("drhoqdx",fieldsolver::drhoqdx,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("drhoqdy",fieldsolver::drhoqdy,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("drhoqdz",fieldsolver::drhoqdz,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dp11dx",fieldsolver::dp11dx,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dp22dx",fieldsolver::dp22dx,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dp33dx",fieldsolver::dp33dx,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dp11dy",fieldsolver::dp11dy,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dp22dy",fieldsolver::dp22dy,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dp33dy",fieldsolver::dp33dy,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dp11dz",fieldsolver::dp11dz,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dp22dz",fieldsolver::dp22dz,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dp33dz",fieldsolver::dp33dz,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dPERBxdy",fieldsolver::dPERBxdy,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dBGBxdy",fieldsolver::dBGBxdy,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dPERBxdz",fieldsolver::dPERBxdz,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dBGBxdz",fieldsolver::dBGBxdz,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dPERBydx",fieldsolver::dPERBydx,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dBGBydx",fieldsolver::dBGBydx,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dPERBydz",fieldsolver::dPERBydz,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dBGBydz",fieldsolver::dBGBydz,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dPERBzdx",fieldsolver::dPERBzdx,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dBGBzdx",fieldsolver::dBGBzdx,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dPERBzdy",fieldsolver::dPERBzdy,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dBGBzdy",fieldsolver::dBGBzdy,1)); - // if(Parameters::ohmHallTerm == 2) { - // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dPERBxdyy",fieldsolver::dPERBxdyy,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dPERBxdzz",fieldsolver::dPERBxdzz,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dPERBydxx",fieldsolver::dPERBydxx,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dPERBydzz",fieldsolver::dPERBydzz,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dPERBzdxx",fieldsolver::dPERBzdxx,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dPERBzdyy",fieldsolver::dPERBzdyy,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dPERBxdyz",fieldsolver::dPERBxdyz,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dPERBydxz",fieldsolver::dPERBydxz,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dPERBzdxy",fieldsolver::dPERBzdxy,1)); - // } - // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dVxdx",fieldsolver::dVxdx,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dVxdy",fieldsolver::dVxdy,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dVxdz",fieldsolver::dVxdz,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dVydx",fieldsolver::dVydx,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dVydy",fieldsolver::dVydy,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dVydz",fieldsolver::dVydz,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dVzdx",fieldsolver::dVzdx,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dVzdy",fieldsolver::dVzdy,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("dVzdz",fieldsolver::dVzdz,1)); - // continue; - // } - // if(*it == "BVOLderivs") { - // // Volume-averaged derivatives - // outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dPERBXVOLdy",bvolderivatives::dPERBXVOLdy,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dBGBXVOLdy",bvolderivatives::dBGBXVOLdy,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dPERBXVOLdz",bvolderivatives::dPERBXVOLdz,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dBGBXVOLdz",bvolderivatives::dBGBXVOLdz,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dPERBYVOLdx",bvolderivatives::dPERBYVOLdx,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dBGBYVOLdx",bvolderivatives::dBGBYVOLdx,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dPERBYVOLdz",bvolderivatives::dPERBYVOLdz,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dBGBYVOLdz",bvolderivatives::dBGBYVOLdz,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dPERBZVOLdx",bvolderivatives::dPERBZVOLdx,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dBGBZVOLdx",bvolderivatives::dBGBZVOLdx,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dPERBZVOLdy",bvolderivatives::dPERBZVOLdy,1)); - // outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dBGBZVOLdy",bvolderivatives::dBGBZVOLdy,1)); - // continue; - // } if(*it == "GridCoordinates") { // Spatial coordinates for each cell outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("X",CellParams::XCRD,1)); @@ -766,16 +683,6 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti for (it = P::diagnosticVariableList.begin(); it != P::diagnosticVariableList.end(); it++) { - // if(*it == "FluxB") { - // // Overall magnetic flux through the simulation plane - // diagnosticReducer->addOperator(new DRO::DiagnosticFluxB); - // continue; - // } - // if(*it == "FluxE") { - // // Overall electric flux through the simulation plane - // diagnosticReducer->addOperator(new DRO::DiagnosticFluxE); - // continue; - // } if (*it == "populations_Blocks") { // Per-population total block counts for(unsigned int i =0; i < getObjectWrapper().particleSpecies.size(); i++) { diff --git a/datareduction/datareductionoperator.cpp b/datareduction/datareductionoperator.cpp index 074ec17e4..f7b5cca15 100644 --- a/datareduction/datareductionoperator.cpp +++ b/datareduction/datareductionoperator.cpp @@ -159,16 +159,6 @@ namespace DRO { return true; } - - // DataReductionOperatorDerivatives::DataReductionOperatorDerivatives(const std::string& name,const unsigned int parameterIndex,const unsigned int vectorSize): - // DataReductionOperatorCellParams(name,parameterIndex,vectorSize) { - // } - // //a version with derivatives, this is the only function that is different - // bool DataReductionOperatorDerivatives::setSpatialCell(const SpatialCell* cell) { - // data = &(cell->derivatives[_parameterIndex]); - // return true; - // } - DataReductionOperatorBVOLDerivatives::DataReductionOperatorBVOLDerivatives(const std::string& name,const unsigned int parameterIndex,const unsigned int vectorSize): DataReductionOperatorCellParams(name,parameterIndex,vectorSize) { diff --git a/fieldsolver/gridGlue.cpp b/fieldsolver/gridGlue.cpp index 4c59f022a..9e06f4f53 100644 --- a/fieldsolver/gridGlue.cpp +++ b/fieldsolver/gridGlue.cpp @@ -370,249 +370,6 @@ void getFieldsFromFsGrid( } -// void getBgFieldsAndDerivativesFromFsGrid( -// FsGrid< std::array, 2>& BgBGrid, -// FsGrid< fsgrids::technical, 2>& technicalGrid, -// dccrg::Dccrg& mpiGrid, -// const std::vector& cells -// ) { -// // Setup transfer buffers -// cint nCellsOnMaxRefLvl = getNumberOfCellsOnMaxRefLvl(mpiGrid, cells); -// std::vector< std::array > transferBufferBGB(nCellsOnMaxRefLvl); -// std::vector< std::array*> transferBufferPointerBGB; -// std::vector< fsgrids::technical > transferBufferTechnical(nCellsOnMaxRefLvl); -// std::vector< fsgrids::technical*> transferBufferPointerTechnical; - -// // Setup transfer pointers -// BgBGrid.setupForTransferOut(nCellsOnMaxRefLvl); -// technicalGrid.setupForTransferOut(nCellsOnMaxRefLvl); -// int k = 0; -// for(auto dccrgId : cells) { -// const auto fsgridIds = mapDccrgIdToFsGridGlobalID(mpiGrid, dccrgId); -// // Store a pointer to the first fsgrid cell that maps to each dccrg Id -// transferBufferPointerBGB.push_back(&transferBufferBGB[k]); -// transferBufferPointerTechnical.push_back(&transferBufferTechnical[k]); -// for (auto fsgridId : fsgridIds) { -// std::array* thisCellData = &transferBufferBGB[k]; -// BgBGrid.transferDataOut(fsgridId, thisCellData); -// fsgrids::technical* thisCellDataTechnical = &transferBufferTechnical[k]; -// technicalGrid.transferDataOut(fsgridId, thisCellDataTechnical); -// k++; -// } -// } -// // Do the transfer -// BgBGrid.finishTransfersOut(); -// technicalGrid.finishTransfersOut(); - -// // Build lists of index pairs to dccrg and fsgrid -// std::vector> iCellParams; -// iCellParams.reserve(6); -// iCellParams.push_back(std::make_pair(CellParams::BGBX, fsgrids::bgbfield::BGBX)); -// iCellParams.push_back(std::make_pair(CellParams::BGBY, fsgrids::bgbfield::BGBY)); -// iCellParams.push_back(std::make_pair(CellParams::BGBZ, fsgrids::bgbfield::BGBZ)); -// iCellParams.push_back(std::make_pair(CellParams::BGBXVOL, fsgrids::bgbfield::BGBXVOL)); -// iCellParams.push_back(std::make_pair(CellParams::BGBYVOL, fsgrids::bgbfield::BGBYVOL)); -// iCellParams.push_back(std::make_pair(CellParams::BGBZVOL, fsgrids::bgbfield::BGBZVOL)); -// std::vector> iDerivatives; -// iDerivatives.reserve(6); -// iDerivatives.push_back(std::make_pair(fieldsolver::dBGBxdy, fsgrids::bgbfield::dBGBxdy)); -// iDerivatives.push_back(std::make_pair(fieldsolver::dBGBxdz, fsgrids::bgbfield::dBGBxdz)); -// iDerivatives.push_back(std::make_pair(fieldsolver::dBGBydx, fsgrids::bgbfield::dBGBydx)); -// iDerivatives.push_back(std::make_pair(fieldsolver::dBGBydz, fsgrids::bgbfield::dBGBydz)); -// iDerivatives.push_back(std::make_pair(fieldsolver::dBGBzdx, fsgrids::bgbfield::dBGBzdx)); -// iDerivatives.push_back(std::make_pair(fieldsolver::dBGBzdy, fsgrids::bgbfield::dBGBzdy)); -// std::vector> iDerivativesBVOL; -// iDerivativesBVOL.reserve(6); -// iDerivativesBVOL.push_back(std::make_pair(bvolderivatives::dBGBXVOLdy, fsgrids::bgbfield::dBGBXVOLdy)); -// iDerivativesBVOL.push_back(std::make_pair(bvolderivatives::dBGBXVOLdz, fsgrids::bgbfield::dBGBXVOLdz)); -// iDerivativesBVOL.push_back(std::make_pair(bvolderivatives::dBGBYVOLdx, fsgrids::bgbfield::dBGBYVOLdx)); -// iDerivativesBVOL.push_back(std::make_pair(bvolderivatives::dBGBYVOLdz, fsgrids::bgbfield::dBGBYVOLdz)); -// iDerivativesBVOL.push_back(std::make_pair(bvolderivatives::dBGBZVOLdx, fsgrids::bgbfield::dBGBZVOLdx)); -// iDerivativesBVOL.push_back(std::make_pair(bvolderivatives::dBGBZVOLdy, fsgrids::bgbfield::dBGBZVOLdy)); - -// // Distribute data from the transfer buffer back into the appropriate mpiGrid places -// // Disregard DO_NOT_COMPUTE cells -// #pragma omp parallel for -// for(uint i = 0; i < cells.size(); ++i) { - -// const CellID dccrgId = cells[i]; -// auto cellParams = mpiGrid[dccrgId]->get_cell_parameters(); - -// // Calculate the number of fsgrid cells we loop through -// cint nCells = pow(pow(2,mpiGrid.mapping.get_maximum_refinement_level() - mpiGrid.mapping.get_refinement_level(dccrgId)),3); -// // Count the number of fsgrid cells we need to average into the current dccrg cell -// int nCellsToSum = 0; - -// // TODO: Could optimize here by adding a separate branch for nCells == 1 with direct assignment of the value -// // Could also do the average in a temporary value and only access grid structure once. - -// // Initialize values to 0 -// for (auto j : iCellParams) cellParams[j.first] = 0.0; -// for (auto j : iDerivatives) mpiGrid[dccrgId]->derivatives[j.first] = 0.0; -// for (auto j : iDerivativesBVOL) mpiGrid[dccrgId]->derivativesBVOL[j.first] = 0.0; - -// for(int iCell = 0; iCell < nCells; ++iCell) { -// // The fsgrid cells that cover the i'th dccrg cell are pointed at by -// // transferBufferPointer[i] ... transferBufferPointer[i] + nCell. -// // We want to average over those who are not DO_NOT_COMPUTE to get the value for the dccrg cell -// if ((transferBufferPointerTechnical[i] + iCell)->sysBoundaryFlag == sysboundarytype::DO_NOT_COMPUTE) { -// continue; -// } else { -// nCellsToSum++; - -// std::array* thisCellData = transferBufferPointerBGB[i] + iCell; - -// for (auto j : iCellParams) cellParams[j.first] += thisCellData->at(j.second); -// for (auto j : iDerivatives) mpiGrid[dccrgId]->derivatives[j.first] += thisCellData->at(j.second); -// for (auto j : iDerivativesBVOL) mpiGrid[dccrgId]->derivativesBVOL[j.first] += thisCellData->at(j.second); -// } -// } - -// if (nCellsToSum > 0) { -// // Divide by the number of cells to get the average -// for (auto j : iCellParams) cellParams[j.first] /= nCellsToSum; -// for (auto j : iDerivatives) mpiGrid[dccrgId]->derivatives[j.first] /= nCellsToSum; -// for (auto j : iDerivativesBVOL) mpiGrid[dccrgId]->derivativesBVOL[j.first] /= nCellsToSum; -// } -// } -// } - - -// void getDerivativesFromFsGrid( -// FsGrid< std::array, 2>& dperbGrid, -// FsGrid< std::array, 2>& dmomentsGrid, -// FsGrid< fsgrids::technical, 2>& technicalGrid, -// dccrg::Dccrg& mpiGrid, -// const std::vector& cells -// ) { - -// // Setup transfer buffers -// cint nCellsOnMaxRefLvl = getNumberOfCellsOnMaxRefLvl(mpiGrid, cells); -// std::vector< std::array > dperbTransferBuffer(nCellsOnMaxRefLvl); -// std::vector< std::array > dmomentsTransferBuffer(nCellsOnMaxRefLvl); - -// std::vector< std::array*> dperbTransferBufferPointer; -// std::vector< std::array*> dmomentsTransferBufferPointer; - -// std::vector< fsgrids::technical > transferBufferTechnical(nCellsOnMaxRefLvl); -// std::vector< fsgrids::technical*> transferBufferPointerTechnical; - -// dperbGrid.setupForTransferOut(nCellsOnMaxRefLvl); -// dmomentsGrid.setupForTransferOut(nCellsOnMaxRefLvl); -// technicalGrid.setupForTransferOut(nCellsOnMaxRefLvl); - -// int k = 0; -// for (auto dccrgId : cells) { - -// // Assuming same local size in all fsgrids -// const auto fsgridIds = mapDccrgIdToFsGridGlobalID(mpiGrid, dccrgId); -// // Store a pointer to the first fsgrid cell that maps to each dccrg Id -// dperbTransferBufferPointer.push_back(&dperbTransferBuffer[k]); -// dmomentsTransferBufferPointer.push_back(&dmomentsTransferBuffer[k]); -// transferBufferPointerTechnical.push_back(&transferBufferTechnical[k]); - -// for (auto fsgridId : fsgridIds) { - -// std::array* dperbCellData = &dperbTransferBuffer[k]; -// dperbGrid.transferDataOut(fsgridId, dperbCellData); -// std::array* dmomentsCellData = &dmomentsTransferBuffer[k]; -// dmomentsGrid.transferDataOut(fsgridId, dmomentsCellData); -// fsgrids::technical* thisCellDataTechnical = &transferBufferTechnical[k]; -// technicalGrid.transferDataOut(fsgridId, thisCellDataTechnical); -// k++; -// } -// } - -// // Do the transfer -// dperbGrid.finishTransfersOut(); -// dmomentsGrid.finishTransfersOut(); -// technicalGrid.finishTransfersOut(); - -// std::vector> iDmoments; -// std::vector> iDperb; -// iDmoments.reserve(24); -// iDmoments.push_back(std::make_pair(fieldsolver::drhomdx, fsgrids::dmoments::drhomdx)); -// iDmoments.push_back(std::make_pair(fieldsolver::drhomdy, fsgrids::dmoments::drhomdy)); -// iDmoments.push_back(std::make_pair(fieldsolver::drhomdz, fsgrids::dmoments::drhomdz)); -// iDmoments.push_back(std::make_pair(fieldsolver::drhoqdx, fsgrids::dmoments::drhoqdx)); -// iDmoments.push_back(std::make_pair(fieldsolver::drhoqdy, fsgrids::dmoments::drhoqdy)); -// iDmoments.push_back(std::make_pair(fieldsolver::drhoqdz, fsgrids::dmoments::drhoqdz)); -// iDmoments.push_back(std::make_pair(fieldsolver::dp11dx , fsgrids::dmoments::dp11dx )); -// iDmoments.push_back(std::make_pair(fieldsolver::dp11dy , fsgrids::dmoments::dp11dy )); -// iDmoments.push_back(std::make_pair(fieldsolver::dp11dz , fsgrids::dmoments::dp11dz )); -// iDmoments.push_back(std::make_pair(fieldsolver::dp22dx , fsgrids::dmoments::dp22dx )); -// iDmoments.push_back(std::make_pair(fieldsolver::dp22dy , fsgrids::dmoments::dp22dy )); -// iDmoments.push_back(std::make_pair(fieldsolver::dp22dz , fsgrids::dmoments::dp22dz )); -// iDmoments.push_back(std::make_pair(fieldsolver::dp33dx , fsgrids::dmoments::dp33dx )); -// iDmoments.push_back(std::make_pair(fieldsolver::dp33dy , fsgrids::dmoments::dp33dy )); -// iDmoments.push_back(std::make_pair(fieldsolver::dp33dz , fsgrids::dmoments::dp33dz )); -// iDmoments.push_back(std::make_pair(fieldsolver::dVxdx , fsgrids::dmoments::dVxdx )); -// iDmoments.push_back(std::make_pair(fieldsolver::dVxdy , fsgrids::dmoments::dVxdy )); -// iDmoments.push_back(std::make_pair(fieldsolver::dVxdz , fsgrids::dmoments::dVxdz )); -// iDmoments.push_back(std::make_pair(fieldsolver::dVydx , fsgrids::dmoments::dVydx )); -// iDmoments.push_back(std::make_pair(fieldsolver::dVydy , fsgrids::dmoments::dVydy )); -// iDmoments.push_back(std::make_pair(fieldsolver::dVydz , fsgrids::dmoments::dVydz )); -// iDmoments.push_back(std::make_pair(fieldsolver::dVzdx , fsgrids::dmoments::dVzdx )); -// iDmoments.push_back(std::make_pair(fieldsolver::dVzdy , fsgrids::dmoments::dVzdy )); -// iDmoments.push_back(std::make_pair(fieldsolver::dVzdz , fsgrids::dmoments::dVzdz )); - -// iDperb.reserve(15); -// iDperb.push_back(std::make_pair(fieldsolver::dPERBxdy , fsgrids::dperb::dPERBxdy )); -// iDperb.push_back(std::make_pair(fieldsolver::dPERBxdz , fsgrids::dperb::dPERBxdz )); -// iDperb.push_back(std::make_pair(fieldsolver::dPERBydx , fsgrids::dperb::dPERBydx )); -// iDperb.push_back(std::make_pair(fieldsolver::dPERBydz , fsgrids::dperb::dPERBydz )); -// iDperb.push_back(std::make_pair(fieldsolver::dPERBzdx , fsgrids::dperb::dPERBzdx )); -// iDperb.push_back(std::make_pair(fieldsolver::dPERBzdy , fsgrids::dperb::dPERBzdy )); -// iDperb.push_back(std::make_pair(fieldsolver::dPERBxdyy, fsgrids::dperb::dPERBxdyy)); -// iDperb.push_back(std::make_pair(fieldsolver::dPERBxdzz, fsgrids::dperb::dPERBxdzz)); -// iDperb.push_back(std::make_pair(fieldsolver::dPERBydxx, fsgrids::dperb::dPERBydxx)); -// iDperb.push_back(std::make_pair(fieldsolver::dPERBydzz, fsgrids::dperb::dPERBydzz)); -// iDperb.push_back(std::make_pair(fieldsolver::dPERBzdxx, fsgrids::dperb::dPERBzdxx)); -// iDperb.push_back(std::make_pair(fieldsolver::dPERBzdyy, fsgrids::dperb::dPERBzdyy)); -// iDperb.push_back(std::make_pair(fieldsolver::dPERBxdyz, fsgrids::dperb::dPERBxdyz)); -// iDperb.push_back(std::make_pair(fieldsolver::dPERBydxz, fsgrids::dperb::dPERBydxz)); -// iDperb.push_back(std::make_pair(fieldsolver::dPERBzdxy, fsgrids::dperb::dPERBzdxy)); - -// // Distribute data from the transfer buffers back into the appropriate mpiGrid places -// // Disregard DO_NOT_COMPUTE cells -// #pragma omp parallel for -// for(uint i = 0; i < cells.size(); ++i) { - -// const CellID dccrgId = cells[i]; - -// // Calculate the number of fsgrid cells we loop through -// cint nCells = pow(pow(2,mpiGrid.mapping.get_maximum_refinement_level() - mpiGrid.mapping.get_refinement_level(dccrgId)),3); -// // Count the number of fsgrid cells we need to average into the current dccrg cell -// int nCellsToSum = 0; - -// for (auto j : iDmoments) mpiGrid[dccrgId]->derivatives[j.first] = 0.0; -// for (auto j : iDperb ) mpiGrid[dccrgId]->derivatives[j.first] = 0.0; - -// for(int iCell = 0; iCell < nCells; ++iCell) { -// // The fsgrid cells that cover the i'th dccrg cell are pointed at by -// // transferBufferPointer[i] ... transferBufferPointer[i] + nCell. -// // We want to average over those who are not DO_NOT_COMPUTE to get the value for the dccrg cell -// if ((transferBufferPointerTechnical[i] + iCell)->sysBoundaryFlag == sysboundarytype::DO_NOT_COMPUTE) { -// continue; -// } else { -// nCellsToSum++; - -// std::array* dperb = dperbTransferBufferPointer[i] + iCell; -// std::array* dmoments = dmomentsTransferBufferPointer[i] + iCell; - -// for (auto j : iDmoments) mpiGrid[dccrgId]->derivatives[j.first] += dmoments->at(j.second); -// for (auto j : iDperb ) mpiGrid[dccrgId]->derivatives[j.first] += dperb ->at(j.second); -// } -// } - -// if (nCellsToSum > 0) { -// for (auto j : iDmoments) mpiGrid[dccrgId]->derivatives[j.first] /= nCellsToSum; -// for (auto j : iDperb ) mpiGrid[dccrgId]->derivatives[j.first] /= nCellsToSum; -// } -// } -// } - - /* Map from dccrg cell id to fsgrid global cell ids when they aren't identical (ie. when dccrg has refinement). */ diff --git a/grid.cpp b/grid.cpp index 8cb324b47..1c073b1f4 100644 --- a/grid.cpp +++ b/grid.cpp @@ -289,26 +289,7 @@ void initializeGrids( calculateInitialVelocityMoments(mpiGrid); phiprof::stop("Init moments"); } - - // phiprof::start("Initial fsgrid coupling"); - // // Couple FSGrids to mpiGrid. Note that the coupling information is shared - // // between them. - // technicalGrid.setupForGridCoupling(cells.size()); - - // // Each dccrg cell may have to communicate with multiple fsgrid cells, if they are on a lower refinement level. - // // Calculate the corresponding fsgrid ids for each dccrg cell and set coupling for each fsgrid id. - // for(auto& dccrgId : cells) { - // const auto fsgridIds = mapDccrgIdToFsGridGlobalID(mpiGrid, dccrgId); - // for (auto fsgridId : fsgridIds) { - - // technicalGrid.setGridCoupling(fsgridId, myRank); - // } - // } - - // technicalGrid.finishGridCoupling(); - // phiprof::stop("Initial fsgrid coupling"); - phiprof::start("setProjectBField"); project.setProjectBField(perBGrid, BgBGrid, technicalGrid); perBGrid.updateGhostCells(); @@ -323,8 +304,6 @@ void initializeGrids( phiprof::stop("getFieldsFromFsGrid"); phiprof::start("Finish fsgrid setup"); - // getFieldDataFromFsGrid(perBGrid, technicalGrid, mpiGrid, cells, CellParams::PERBX); - // getBgFieldsAndDerivativesFromFsGrid(BgBGrid, technicalGrid, mpiGrid, cells); // WARNING this means moments and dt2 moments are the same here. feedMomentsIntoFsGrid(mpiGrid, cells, momentsGrid,false); diff --git a/iowrite.cpp b/iowrite.cpp index 9bfd95a8c..94ceb8671 100644 --- a/iowrite.cpp +++ b/iowrite.cpp @@ -1270,7 +1270,6 @@ bool writeRestart(dccrg::Dccrg& mpiGrid, phiprof::start("reduceddataIO"); //write out DROs we need for restarts DataReducer restartReducer; - //restartReducer.addOperator(new DRO::DataReductionOperatorCellParams("background_B",CellParams::BGBX,3)); restartReducer.addOperator(new DRO::DataReductionOperatorCellParams("perturbed_B",CellParams::PERBX,3)); restartReducer.addOperator(new DRO::DataReductionOperatorCellParams("moments",CellParams::RHOM,5)); restartReducer.addOperator(new DRO::DataReductionOperatorCellParams("moments_dt2",CellParams::RHOM_DT2,5)); @@ -1284,7 +1283,6 @@ bool writeRestart(dccrg::Dccrg& mpiGrid, restartReducer.addOperator(new DRO::DataReductionOperatorCellParams("max_v_dt",CellParams::MAXVDT,1)); restartReducer.addOperator(new DRO::DataReductionOperatorCellParams("max_r_dt",CellParams::MAXRDT,1)); restartReducer.addOperator(new DRO::DataReductionOperatorCellParams("max_fields_dt",CellParams::MAXFDT,1)); - // restartReducer.addOperator(new DRO::DataReductionOperatorDerivatives("derivatives",0,fieldsolver::N_SPATIAL_CELL_DERIVATIVES)); restartReducer.addOperator(new DRO::DataReductionOperatorBVOLDerivatives("Bvolume_derivatives",0,bvolderivatives::N_BVOL_DERIVATIVES)); restartReducer.addOperator(new DRO::MPIrank); restartReducer.addOperator(new DRO::BoundaryType); diff --git a/spatial_cell.cpp b/spatial_cell.cpp index b54652cbe..8f179e984 100644 --- a/spatial_cell.cpp +++ b/spatial_cell.cpp @@ -674,13 +674,7 @@ namespace spatial_cell { displacements.push_back((uint8_t*) &(this->parameters[CellParams::DX]) - (uint8_t*) this); block_lengths.push_back(sizeof(Real) * 3); } - -// // send BGBX BGBY BGBZ -// if ((SpatialCell::mpi_transfer_type & Transfer::CELL_BGB)!=0){ -// displacements.push_back((uint8_t*) &(this->parameters[CellParams::BGBX]) - (uint8_t*) this); -// block_lengths.push_back(sizeof(Real) * 3); -// } - + // send BGBXVOL BGBYVOL BGBZVOL PERBXVOL PERBYVOL PERBZVOL if ((SpatialCell::mpi_transfer_type & Transfer::CELL_BVOL)!=0){ displacements.push_back((uint8_t*) &(this->parameters[CellParams::BGBXVOL]) - (uint8_t*) this); @@ -735,12 +729,6 @@ namespace spatial_cell { block_lengths.push_back(sizeof(Real)); } - // // send spatial cell derivatives - // if ((SpatialCell::mpi_transfer_type & Transfer::CELL_DERIVATIVES)!=0){ - // displacements.push_back((uint8_t*) &(this->derivatives[0]) - (uint8_t*) this); - // block_lengths.push_back(sizeof(Real) * fieldsolver::N_SPATIAL_CELL_DERIVATIVES); - // } - // send spatial cell BVOL derivatives if ((SpatialCell::mpi_transfer_type & Transfer::CELL_BVOL_DERIVATIVES)!=0){ displacements.push_back((uint8_t*) &(this->derivativesBVOL[0]) - (uint8_t*) this); @@ -752,11 +740,6 @@ namespace spatial_cell { block_lengths.push_back(sizeof(uint64_t)); } - // // send Hall term components - // if ((SpatialCell::mpi_transfer_type & Transfer::CELL_HALL_TERM)!=0){ - // displacements.push_back((uint8_t*) &(this->parameters[CellParams::EXHALL_000_100]) - (uint8_t*) this); - // block_lengths.push_back(sizeof(Real) * 12); - // } // send electron pressure gradient term components if ((SpatialCell::mpi_transfer_type & Transfer::CELL_GRADPE_TERM)!=0){ displacements.push_back((uint8_t*) &(this->parameters[CellParams::EXGRADPE]) - (uint8_t*) this); diff --git a/spatial_cell.hpp b/spatial_cell.hpp index 388055601..a75721a09 100644 --- a/spatial_cell.hpp +++ b/spatial_cell.hpp @@ -316,10 +316,6 @@ namespace spatial_cell { //random_data* get_rng_data_buffer(); // Member variables // - //Real derivatives[fieldsolver::N_SPATIAL_CELL_DERIVATIVES]; /**< Derivatives of bulk variables in this spatial cell.*/ - //std::array derivatives; /**< Derivatives of bulk variables in this spatial cell.*/ - //Real derivativesBVOL[bvolderivatives::N_BVOL_DERIVATIVES]; /**< Derivatives of BVOL needed by the acceleration. - // * Separate array because it does not need to be communicated.*/ std::array derivativesBVOL; /**< Derivatives of BVOL needed by the acceleration. * Separate array because it does not need to be communicated.*/ //Real parameters[CellParams::N_SPATIAL_CELL_PARAMS]; /**< Bulk variables in this spatial cell.*/ diff --git a/vlasiator.cpp b/vlasiator.cpp index 1158918dc..24cf104f3 100644 --- a/vlasiator.cpp +++ b/vlasiator.cpp @@ -538,13 +538,6 @@ int main(int argn,char* args[]) { phiprof::stop("compute-dt"); } - phiprof::start("getFieldsFromFsGrid"); - // These should be done by initializeFieldPropagator() if the propagation is turned off. - volGrid.updateGhostCells(); - technicalGrid.updateGhostCells(); - getFieldsFromFsGrid(volGrid, BgBGrid, EGradPeGrid, technicalGrid, mpiGrid, cells); - phiprof::stop("getFieldsFromFsGrid"); - // Save restart data if (P::writeInitialState) { phiprof::start("write-initial-state"); @@ -731,15 +724,9 @@ int main(int argn,char* args[]) { it++) { if (*it == "FluxB") { if(myRank == MASTER_RANK) cerr << __FILE__ << " " << __LINE__ << "ERROR: Diagnostic output from FsGrid is no longer supported!" << endl; - // phiprof::start("fsgrid-coupling-out"); - // getFieldDataFromFsGrid(perBGrid,technicalGrid,mpiGrid,cells,CellParams::PERBX); - // phiprof::stop("fsgrid-coupling-out"); } if (*it == "FluxE") { if(myRank == MASTER_RANK) cerr << __FILE__ << " " << __LINE__ << "ERROR: Diagnostic output from FsGrid is no longer supported!" << endl; - // phiprof::start("fsgrid-coupling-out"); - // getFieldDataFromFsGrid(EGrid,technicalGrid,mpiGrid,cells,CellParams::EX); - // phiprof::stop("fsgrid-coupling-out"); } } @@ -756,41 +743,6 @@ int main(int argn,char* args[]) { for (uint i = 0; i < P::systemWriteTimeInterval.size(); i++) { if (P::systemWriteTimeInterval[i] >= 0.0 && P::t >= P::systemWrites[i] * P::systemWriteTimeInterval[i] - DT_EPSILON) { - // if (extractFsGridFields) { - // vector::const_iterator it; - // for (it = P::outputVariableList.begin(); - // it != P::outputVariableList.end(); - // it++) { - // if (*it == "B" || - // *it == "PerturbedB" - // ) { - // phiprof::start("fsgrid-coupling-out"); - // getFieldDataFromFsGrid(perBGrid,technicalGrid,mpiGrid,cells,CellParams::PERBX); - // phiprof::stop("fsgrid-coupling-out"); - // } - // if (*it == "E") { - // phiprof::start("fsgrid-coupling-out"); - // getFieldDataFromFsGrid(EGrid,technicalGrid,mpiGrid,cells,CellParams::EX); - // phiprof::stop("fsgrid-coupling-out"); - // } - // if (*it == "HallE") { - // phiprof::start("fsgrid-coupling-out"); - // getFieldDataFromFsGrid(EHallGrid,technicalGrid,mpiGrid,cells,CellParams::EXHALL_000_100); - // phiprof::stop("fsgrid-coupling-out"); - // } - // if (*it == "GradPeE") { - // phiprof::start("fsgrid-coupling-out"); - // getFieldDataFromFsGrid(EGradPeGrid,technicalGrid,mpiGrid,cells,CellParams::EXGRADPE); - // phiprof::stop("fsgrid-coupling-out"); - // } - // if (*it == "derivs") { - // phiprof::start("fsgrid-coupling-out"); - // getDerivativesFromFsGrid(dPerBGrid, dMomentsGrid, technicalGrid, mpiGrid, cells); - // phiprof::stop("fsgrid-coupling-out"); - // } - // } - // extractFsGridFields = false; - // } phiprof::start("write-system"); logFile << "(IO): Writing spatial cell and reduced system data to disk, tstep = " << P::tstep << " t = " << P::t << endl << writeVerbose; From 9db4f427173bad8464dc5a5cb7e365177264ebfa Mon Sep 17 00:00:00 2001 From: Tuomas Koskela Date: Tue, 14 May 2019 15:37:28 +0300 Subject: [PATCH 418/602] Removed references to fieldsolver namespace --- fieldsolver/fs_common.h | 1 - fieldsolver/ldz_electric_field.cpp | 1 - spatial_cell.hpp | 2 -- 3 files changed, 4 deletions(-) diff --git a/fieldsolver/fs_common.h b/fieldsolver/fs_common.h index 236be28c8..be37fe226 100644 --- a/fieldsolver/fs_common.h +++ b/fieldsolver/fs_common.h @@ -60,7 +60,6 @@ const Real ZERO = 0.0; static creal EPS = 1.0e-30; using namespace std; -using namespace fieldsolver; bool initializeFieldPropagator( FsGrid< std::array, 2> & perBGrid, diff --git a/fieldsolver/ldz_electric_field.cpp b/fieldsolver/ldz_electric_field.cpp index ad6a85471..88ead4faf 100644 --- a/fieldsolver/ldz_electric_field.cpp +++ b/fieldsolver/ldz_electric_field.cpp @@ -29,7 +29,6 @@ #define DEBUG_FSOLVER #endif -namespace fs = fieldsolver; namespace pc = physicalconstants; using namespace std; diff --git a/spatial_cell.hpp b/spatial_cell.hpp index a75721a09..96d8d5148 100644 --- a/spatial_cell.hpp +++ b/spatial_cell.hpp @@ -1611,7 +1611,6 @@ namespace spatial_cell { size += velocity_block_with_content_list.size() * sizeof(vmesh::GlobalID); size += velocity_block_with_no_content_list.size() * sizeof(vmesh::GlobalID); size += CellParams::N_SPATIAL_CELL_PARAMS * sizeof(Real); - size += fieldsolver::N_SPATIAL_CELL_DERIVATIVES * sizeof(Real); size += bvolderivatives::N_BVOL_DERIVATIVES * sizeof(Real); for (size_t p=0; p Date: Tue, 14 May 2019 15:46:32 +0300 Subject: [PATCH 419/602] fixed list of output variables --- parameters.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/parameters.cpp b/parameters.cpp index f8fe889a7..c93257d5e 100644 --- a/parameters.cpp +++ b/parameters.cpp @@ -217,7 +217,7 @@ bool Parameters::addParameters(){ // Output variable parameters // NOTE Do not remove the : before the list of variable names as this is parsed by tools/check_vlasiator_cfg.sh - Readparameters::addComposing("variables.output", "List of data reduction operators (DROs) to add to the grid file output. Each variable to be added has to be on a new line output = XXX. Available (20190508): B fg_B BackgroundB vg_BackgroundB fg_BackgroundB PerturbedB vg_PerturbedB fg_PerturbedB E fg_E Rhom vg_Rhom fg_Rhom Rhoq vg_Rhoq fg_Rhoq populations_Rho V vg_V fg_V populations_V populations_moments populations_EnergyDensity_Backstream populations_moments_NonBackstream populations_EffectiveSparsityThreshold populations_RhoLossAdjust LBweight MaxVdt MaxRdt populations_MaxVdt populations_MaxRdt MaxFieldsdt MPIrank vg_rank FsGridRank fg_rank FsGridBoundaryType BoundaryType vg_BoundaryType fg_BoundaryType BoundaryLayer vg_BoundaryLayer fg_BoundaryLayer populations_Blocks fSaved populations_accSubcycles VolE vg_VolE fg_VolE HallE GradPeE VolB vg_VolB fg_VolB BackgroundVolB PerturbedVolB Pressure vg_Pressure fg_Pressure populations_PTensor derivs BVOLderivs GridCoordinates Potential BackgroundVolE ChargeDensity PotentialError MeshData"); + Readparameters::addComposing("variables.output", "List of data reduction operators (DROs) to add to the grid file output. Each variable to be added has to be on a new line output = XXX. Available (20190514): B fg_B BackgroundB vg_BackgroundB fg_BackgroundB PerturbedB vg_PerturbedB fg_PerturbedB E fg_E Rhom vg_Rhom fg_Rhom Rhoq vg_Rhoq fg_Rhoq populations_Rho V vg_V fg_V populations_V populations_moments_Backstream populations_moments_NonBackstream populations_EffectiveSparsityThreshold populations_RhoLossAdjust populations_EnergyDensity LBweight MaxVdt MaxRdt populations_MaxVdt populations_MaxRdt MaxFieldsdt MPIrank vg_rank FsGridRank fg_rank FsGridBoundaryType BoundaryType vg_BoundaryType fg_BoundaryType BoundaryLayer vg_BoundaryLayer fg_BoundaryLayer populations_Blocks fSaved populations_accSubcycles VolE vg_VolE fg_VolE HallE GradPeE VolB vg_VolB fg_VolB BackgroundVolB PerturbedVolB Pressure vg_Pressure fg_Pressure populations_PTensor derivs BVOLderivs GridCoordinates Potential BackgroundVolE ChargeDensity PotentialError MeshData"); // NOTE Do not remove the : before the list of variable names as this is parsed by tools/check_vlasiator_cfg.sh Readparameters::addComposing("variables.diagnostic", "List of data reduction operators (DROs) to add to the diagnostic runtime output. Each variable to be added has to be on a new line diagnostic = XXX. Available (20190320): FluxB FluxE populations_Blocks Rhom populations_RhoLossAdjust LBweight populations_MaxVdt MaxVdt populations_MaxRdt MaxRdt MaxFieldsdt populations_MaxDistributionFunction populations_MinDistributionFunction"); From bbd78c6c0adf7aafc9f6095dc774f63e1967a13a Mon Sep 17 00:00:00 2001 From: Grandin Maxime T J Date: Tue, 14 May 2019 15:50:44 +0300 Subject: [PATCH 420/602] WIP: adding parameters from config file to DRO --- datareduction/datareductionoperator.cpp | 6 ++++++ datareduction/datareductionoperator.h | 3 ++- object_wrapper.cpp | 5 +++++ particle_species.h | 9 +++++++++ 4 files changed, 22 insertions(+), 1 deletion(-) diff --git a/datareduction/datareductionoperator.cpp b/datareduction/datareductionoperator.cpp index d51257915..a7e29388c 100644 --- a/datareduction/datareductionoperator.cpp +++ b/datareduction/datareductionoperator.cpp @@ -1597,4 +1597,10 @@ namespace DRO { bool VariablePrecipitationDiffFlux::setSpatialCell(const SpatialCell* cell) { return true; } + + bool VariablePrecipitation::writeParameters(vlsv::Writer& vlsvWriter) { + for (int i=0; i backstreamV; /*!< Centre of sphere to split the distribution into backstreaming and non-backstreaming. 0 (default in cfg) disables the DRO. */ + Real EnergyDensityLimit1; /*!< Lower bound for second Energy density bin in units of solar wind ram energy. Default 5. */ + Real EnergyDensityLimit2; /*!< Lower bound forthird Energy density bin in units of solar wind ram energy. Default 10. */ + Real SolarWindEnergy; /*!< Solar wind ram energy, used for calculating energy density bins. Default value of 0 attempts to use SolarWindSpeed instead. */ + Real SolarWindSpeed; /*!< Solar wind speed, used for calculating energy density bins if solar wind ram energy wasn't given. Default 0. */ + + int nChannels; /*!< Number of energy channels for precipitation differential flux evaluation. Default 16. */ + Real emin; /*!< Lowest energy channel (in keV) for precipitation differential flux evaluation. Default 0.1. */ + Real emax; /*!< Highest energy channel (in keV) for precipitation differential flux evaluation. Default 100. */ + Species(); Species(const Species& other); ~Species(); From 4e5a51b5d91e5122c5a73906f575e617d58f344c Mon Sep 17 00:00:00 2001 From: Tuomas Koskela Date: Tue, 14 May 2019 22:25:10 +0300 Subject: [PATCH 421/602] Bug fix: only set DO_NOT_COMPUTE for ionosphere cells whose layer has not been set. --- sysboundary/sysboundary.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sysboundary/sysboundary.cpp b/sysboundary/sysboundary.cpp index 479536f44..ae6ceab9b 100644 --- a/sysboundary/sysboundary.cpp +++ b/sysboundary/sysboundary.cpp @@ -559,7 +559,7 @@ bool SysBoundary::classifyCells(dccrg::DccrgsysBoundaryLayer == 0 && technicalGrid.get(x,y,z)->sysBoundaryFlag != sysboundarytype::NOT_SYSBOUNDARY) { + if (technicalGrid.get(x,y,z)->sysBoundaryLayer == 0 && technicalGrid.get(x,y,z)->sysBoundaryFlag == sysboundarytype::IONOSPHERE) { technicalGrid.get(x,y,z)->sysBoundaryFlag = sysboundarytype::DO_NOT_COMPUTE; } } From e94229ee8a98ffb47b3920fc92e192f20aafa668 Mon Sep 17 00:00:00 2001 From: ykempf Date: Tue, 14 May 2019 23:43:19 +0300 Subject: [PATCH 422/602] fix fsgrid x y z and dx dy dz DROs --- datareduction/datareducer.cpp | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/datareduction/datareducer.cpp b/datareduction/datareducer.cpp index c4ce1f639..59e5648be 100644 --- a/datareduction/datareducer.cpp +++ b/datareduction/datareducer.cpp @@ -742,7 +742,7 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti FsGrid< fsgrids::technical, 2>& technicalGrid)->std::vector { std::array& gridSize = technicalGrid.getLocalSize(); - std::vector retval(gridSize[0]*gridSize[1]*gridSize[2]*3); + std::vector retval(gridSize[0]*gridSize[1]*gridSize[2]); // Iterate through fsgrid cells and extract total BVOL for(int z=0; z& technicalGrid)->std::vector { std::array& gridSize = technicalGrid.getLocalSize(); - std::vector retval(gridSize[0]*gridSize[1]*gridSize[2]*3); + std::vector retval(gridSize[0]*gridSize[1]*gridSize[2]); // Iterate through fsgrid cells and extract total BVOL for(int z=0; z& technicalGrid)->std::vector { std::array& gridSize = technicalGrid.getLocalSize(); - std::vector retval(gridSize[0]*gridSize[1]*gridSize[2]*3); + std::vector retval(gridSize[0]*gridSize[1]*gridSize[2]); // Iterate through fsgrid cells and extract total BVOL for(int z=0; z& technicalGrid)->std::vector { std::array& gridSize = technicalGrid.getLocalSize(); - std::vector retval(gridSize[0]*gridSize[1]*gridSize[2]*3); + std::vector retval(gridSize[0]*gridSize[1]*gridSize[2]); // Iterate through fsgrid cells and extract total BVOL for(int z=0; z& technicalGrid)->std::vector { std::array& gridSize = technicalGrid.getLocalSize(); - std::vector retval(gridSize[0]*gridSize[1]*gridSize[2]*3); + std::vector retval(gridSize[0]*gridSize[1]*gridSize[2]); // Iterate through fsgrid cells and extract total BVOL for(int z=0; z& technicalGrid)->std::vector { std::array& gridSize = technicalGrid.getLocalSize(); - std::vector retval(gridSize[0]*gridSize[1]*gridSize[2]*3); + std::vector retval(gridSize[0]*gridSize[1]*gridSize[2]); // Iterate through fsgrid cells and extract total BVOL for(int z=0; z Date: Wed, 15 May 2019 08:47:21 +0300 Subject: [PATCH 423/602] Fixed initialisation of layers/boundaries, technical grid needs updating. --- sysboundary/sysboundary.cpp | 3 +++ 1 file changed, 3 insertions(+) diff --git a/sysboundary/sysboundary.cpp b/sysboundary/sysboundary.cpp index 07686f39c..89825b4e8 100644 --- a/sysboundary/sysboundary.cpp +++ b/sysboundary/sysboundary.cpp @@ -409,6 +409,7 @@ bool SysBoundary::classifyCells(dccrg::Dccrg 2 && technicalGrid.get(x,y,z)->sysBoundaryFlag != sysboundarytype::NOT_SYSBOUNDARY) { technicalGrid.get(x,y,z)->sysBoundaryFlag = sysboundarytype::DO_NOT_COMPUTE; } + } } } } } + technicalGrid.updateGhostCells(); } // One more pass to make sure, in particular if the ionosphere is wide enough From af0e268002a9a5bc753b2f3edc11bcc0ac271b26 Mon Sep 17 00:00:00 2001 From: ykempf Date: Wed, 15 May 2019 09:02:21 +0300 Subject: [PATCH 424/602] Moved technicalGrid.updateGhostCells to where technicalGrid things are done. --- sysboundary/sysboundary.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/sysboundary/sysboundary.cpp b/sysboundary/sysboundary.cpp index 89825b4e8..fc12e00a4 100644 --- a/sysboundary/sysboundary.cpp +++ b/sysboundary/sysboundary.cpp @@ -409,7 +409,6 @@ bool SysBoundary::classifyCells(dccrg::Dccrg Date: Wed, 15 May 2019 09:33:26 +0300 Subject: [PATCH 425/602] reordered energydensity dro contents, reinstated ionosphere warnings, added population name to output parameters --- datareduction/datareductionoperator.cpp | 21 ++++++++++++--------- sysboundary/ionosphere.cpp | 6 ++---- 2 files changed, 14 insertions(+), 13 deletions(-) diff --git a/datareduction/datareductionoperator.cpp b/datareduction/datareductionoperator.cpp index 7b1b665e0..60064d3e3 100644 --- a/datareduction/datareductionoperator.cpp +++ b/datareduction/datareductionoperator.cpp @@ -1438,6 +1438,9 @@ namespace DRO { VariableEnergyDensity::VariableEnergyDensity(cuint _popID): DataReductionOperatorHasParameters(),popID(_popID) { popName = getObjectWrapper().particleSpecies[popID].name; + solarwindenergy = getObjectWrapper().particleSpecies[popID].SolarWindEnergy; + E1limit = solarwindenergy * getObjectWrapper().particleSpecies[popID].EnergyDensityLimit1; + E2limit = solarwindenergy * getObjectWrapper().particleSpecies[popID].EnergyDensityLimit2; } VariableEnergyDensity::~VariableEnergyDensity() { } @@ -1452,6 +1455,11 @@ namespace DRO { bool VariableEnergyDensity::reduceData(const SpatialCell* cell,char* buffer) { const Real HALF = 0.5; + + for(int i = 0; i < 3; i++) { + EDensity[i] = 0.0; + } + # pragma omp parallel { Real thread_E0_sum = 0.0; @@ -1508,12 +1516,6 @@ namespace DRO { } bool VariableEnergyDensity::setSpatialCell(const SpatialCell* cell) { - for(int i = 0; i < 3; i++) { - EDensity[i] = 0.0; - } - solarwindenergy = getObjectWrapper().particleSpecies[popID].SolarWindEnergy; - E1limit = solarwindenergy * getObjectWrapper().particleSpecies[popID].EnergyDensityLimit1; - E2limit = solarwindenergy * getObjectWrapper().particleSpecies[popID].EnergyDensityLimit2; return true; } @@ -1522,9 +1524,10 @@ namespace DRO { Real swe = solarwindenergy/physicalconstants::CHARGE; Real e1l = E1limit/physicalconstants::CHARGE; Real e2l = E2limit/physicalconstants::CHARGE; - if( vlsvWriter.writeParameter("EnergyDensityESW", &swe) == false ) { return false; } - if( vlsvWriter.writeParameter("EnergyDensityELimit1", &e1l) == false ) { return false; } - if( vlsvWriter.writeParameter("EnergyDensityELimit2", &e2l) == false ) { return false; } + + if( vlsvWriter.writeParameter(popName+"_EnergyDensityESW", &swe) == false ) { return false; } + if( vlsvWriter.writeParameter(popName+"_EnergyDensityELimit1", &e1l) == false ) { return false; } + if( vlsvWriter.writeParameter(popName+"_EnergyDensityELimit2", &e2l) == false ) { return false; } return true; } diff --git a/sysboundary/ionosphere.cpp b/sysboundary/ionosphere.cpp index 5647a920f..3463faf55 100644 --- a/sysboundary/ionosphere.cpp +++ b/sysboundary/ionosphere.cpp @@ -550,10 +550,8 @@ namespace SBC { ) { std::vector< std::array > closestCells = getAllClosestNonsysboundaryCells(technicalGrid, i,j,k); if (closestCells.size() == 1 && closestCells[0][0] == std::numeric_limits::min() ) { - //mismatch on fsgrid and mpigrid? - //std::cerr << __FILE__ << ":" << __LINE__ << ":" << "No closest cells found!" << std::endl; - //abort(); - return 0; + std::cerr << __FILE__ << ":" << __LINE__ << ":" << "No closest cells found!" << std::endl; + abort(); } FsGrid< std::array, 2> * bGrid; From c11d6b636e1f5ce79c3df1674b9dc1e92158cd8a Mon Sep 17 00:00:00 2001 From: tkoskela Date: Thu, 9 May 2019 14:17:42 +0300 Subject: [PATCH 426/602] Added doxygen documentation to all functions. TODO: add details to update_remote_mapping_contribution_amr. --- vlasovsolver/cpu_trans_map_amr.cpp | 160 ++++++++++++++++++++++++----- 1 file changed, 137 insertions(+), 23 deletions(-) diff --git a/vlasovsolver/cpu_trans_map_amr.cpp b/vlasovsolver/cpu_trans_map_amr.cpp index 1976016d2..b85c25255 100644 --- a/vlasovsolver/cpu_trans_map_amr.cpp +++ b/vlasovsolver/cpu_trans_map_amr.cpp @@ -24,6 +24,13 @@ using namespace spatial_cell; #define i_trans_ps_blockv_pencil(planeVectorIndex, planeIndex, blockIndex, lengthOfPencil) ( (blockIndex) + VLASOV_STENCIL_WIDTH + ( (planeVectorIndex) + (planeIndex) * VEC_PER_PLANE ) * ( lengthOfPencil + 2 * VLASOV_STENCIL_WIDTH) ) + +/* Get the one-dimensional neighborhood index for a given direction and neighborhood size. + * + * @param dimension spatial dimension of neighborhood + * @param stencil neighborhood size in cells + * @return neighborhood index that can be passed to DCCRG functions + */ int getNeighborhood(const uint dimension, const uint stencil) { int neighborhood = 0; @@ -60,8 +67,21 @@ int getNeighborhood(const uint dimension, const uint stencil) { } +/* Get pointers to spatial cells that are considered source cells for a pencil. + * Source cells are cells that the pencil reads data from to compute polynomial + * fits that are used for propagation in the vlasov solver. All cells included + * in the pencil + VLASOV_STENCIL_WIDTH cells on both ends are source cells. + * Invalid cells are replaced by closest good cells. + * Boundary cells are included. + * + * @param [in] mpiGrid DCCRG grid object + * @param [in] pencils pencil data struct + * @param [in] ipencil index of a pencil in the pencils data struct + * @param [in] dimension spatial dimension + * @param [out] sourceCells pointer to an array of pointers to SpatialCell objects for the source cells + */ void computeSpatialSourceCellsForPencil(const dccrg::Dccrg& mpiGrid, - setOfPencils pencils, + setOfPencils& pencils, const uint iPencil, const uint dimension, SpatialCell **sourceCells){ @@ -162,7 +182,17 @@ void computeSpatialSourceCellsForPencil(const dccrg::Dccrg& mpiGrid, setOfPencils& pencils, const uint dimension, @@ -228,6 +258,7 @@ void computeSpatialTargetCellsForPencils(const dccrg::DccrgsysBoundaryFlag != sysboundarytype::NOT_SYSBOUNDARY ) { targetCells[i] = NULL; @@ -236,6 +267,16 @@ void computeSpatialTargetCellsForPencils(const dccrg::Dccrg &grid, CellID id, int dimension = 0, uint path = 0) { @@ -270,30 +311,47 @@ CellID selectNeighbor(const dccrg::Dccrg return neighbor; } +/* Recursive function for building one-dimensional pencils to cover local DCCRG cells. + * Starts from a given seedID and proceeds finding the nearest neighbor in the given dimension + * and adding it to the pencil until no neighbors are found or an endId is met. When a higher + * refinement level (ie. multiple nearest neighbors) is met, the pencil splits into four + * copies to remain at a width of 1 cell. This is done by the function calling itself recursively + * and passing as inputs the cells added so far. The cell selected by each copy of the function + * at a split is stored in the path variable, the same path has to be followed if a refinement + * level is encoutered multiple times. + * + * @param [in] grid DCCRG grid object + * @param [out] pencils Pencil data struct + * @param [in] seedId DCCRG cell id where we start building the pencil. + * The pencil will continue in the + direction in the given dimension until an end condition is met + * @param [in] dimension Spatial dimension + * @param [in] path Integer value that determines which neighbor is added to the pencil when a higher refinement level is met + * @param [in] endIds Prescribed end conditions for the pencil. If any of these cell ids is about to be added to the pencil, + * the builder terminates. + */ setOfPencils buildPencilsWithNeighbors( const dccrg::Dccrg &grid, - setOfPencils &pencils, const CellID startingId, + setOfPencils &pencils, const CellID seedId, vector ids, const uint dimension, vector path, const vector &endIds) { const bool debug = false; CellID nextNeighbor; - CellID id = startingId; + CellID id = seedId; int startingRefLvl = grid.get_refinement_level(id); bool periodic = false; if( ids.size() == 0 ) - ids.push_back(startingId); + ids.push_back(seedId); // If the cell where we start is refined, we need to figure out which path // to follow in future refined cells. This is a bit hacky but we have to // use the order or the children of the parent cell to figure out which // corner we are in. - // Maybe you could use physical coordinates here? int startingPathSize = path.size(); auto it = path.end(); if( startingRefLvl > startingPathSize ) { - CellID myId = startingId; + CellID myId = seedId; for ( int i = path.size(); i < startingRefLvl; ++i) { @@ -436,6 +494,16 @@ setOfPencils buildPencilsWithNeighbors( const dccrg::Dccrg &vmesh, const uint lengthOfPencil) { @@ -542,6 +610,16 @@ void propagatePencil(Vec* dz, Vec* values, const uint dimension, } } +/* Determine which cells in the local DCCRG mesh should be starting points for pencils. + * If a neighbor cell is non-local, across a periodic boundary, or in non-periodic boundary layer 1 + * then we use this cell as a seed for pencils + * + * @param [in] mpiGrid DCCRG grid object + * @param [in] localPropagatedCells List of local cells that get propagated + * ie. not boundary or DO_NOT_COMPUTE + * @param [in] dimension Spatial dimension + * @param [out] seedIds list of cell ids that will be starting points for pencils + */ void getSeedIds(const dccrg::Dccrg& mpiGrid, const vector &localPropagatedCells, const uint dimension, @@ -573,7 +651,6 @@ void getSeedIds(const dccrg::Dccrg& mpiGr pow(2,mpiGrid.get_maximum_refinement_level()) || !mpiGrid.is_local(nbrPair.first) || !do_translate_cell(mpiGrid[nbrPair.first]) ) { - addToSeedIds = true; } } @@ -587,7 +664,6 @@ void getSeedIds(const dccrg::Dccrg& mpiGr } if(debug) { - //cout << "Number of seed ids is " << seedIds.size() << endl; cout << "Rank " << myRank << ", Seed ids are: "; for (const auto seedId : seedIds) { cout << seedId << " "; @@ -603,7 +679,9 @@ void getSeedIds(const dccrg::Dccrg& mpiGr * dimensions are correctly swapped. Also, copy the same block for * then neighboring spatial cells (in the dimension). neighbors * generated with compute_spatial_neighbors_wboundcond). - * + * Adapted from copy_trans_block_data to be suitable for use with + * AMR and pencils. + * * This function must be thread-safe. * * @param source_neighbors Array containing the VLASOV_STENCIL_WIDTH closest @@ -686,11 +764,14 @@ void copy_trans_block_data_amr( } } -/* -Check whether the ghost cells around the pencil contain higher refinement than the pencil does. -If they do, the pencil must be split to match the finest refined ghost cell. This function checks -One neighbor pair, but takes as an argument the offset from the pencil. Call multiple times for -Multiple ghost cells. +/* Check whether the ghost cells around the pencil contain higher refinement than the pencil does. + * If they do, the pencil must be split to match the finest refined ghost cell. This function checks + * One neighbor pair, but takes as an argument the offset from the pencil. Call multiple times for + * Multiple ghost cells. + * + * @param mpiGrid DCCRG grid object + * @param pencils Pencil data struct + * @param dimension Spatial dimension */ void check_ghost_cells(const dccrg::Dccrg& mpiGrid, setOfPencils& pencils, @@ -768,6 +849,12 @@ void check_ghost_cells(const dccrg::Dccrg } } +/* Checks that each local spatial cell appears in pencils at least 1 time. + * + * @param mpiGrid DCCRG grid object + * @param cells Local spatial cells + * @param pencils Pencil data struct + */ bool checkPencils(const dccrg::Dccrg& mpiGrid, const std::vector &cells, const setOfPencils& pencils) { @@ -793,6 +880,12 @@ bool checkPencils(const dccrg::Dccrg& mpi } +/* Debugging function, prints the list of cells in each pencil + * + * @param pencils Pencil data struct + * @param dimension Spatial dimension + * @param myRank MPI rank + */ void printPencilsFunc(const setOfPencils& pencils, const uint dimension, const int myRank) { // Print out ids of pencils (if needed for debugging) @@ -827,6 +920,22 @@ void printPencilsFunc(const setOfPencils& pencils, const uint dimension, const i MPI_Barrier(MPI_COMM_WORLD); } +/* Map velocity blocks in all local cells forward by one time step in one spatial dimension. + * This function uses 1-cell wide pencils to update cells in-place to avoid allocating large + * temporary buffers. + * + * This function can, and should be, safely called in a parallel + * OpenMP region (as long as it does only one dimension per parallel + * refion). It is safe as each thread only computes certain blocks (blockID%tnum_threads = thread_num) + * + * @param [in] mpiGrid DCCRG grid object + * @param [in] localPropagatedCells List of local cells that get propagated + * ie. not boundary or DO_NOT_COMPUTE + * @param [in] remoteTargetCells List of non-local target cells + * @param dimension Spatial dimension + * @param [in] dt Time step + * @param [in] popId Particle population ID + */ bool trans_map_1d_amr(const dccrg::Dccrg& mpiGrid, const vector& localPropagatedCells, const vector& remoteTargetCells, @@ -1179,6 +1288,12 @@ bool trans_map_1d_amr(const dccrg::Dccrg& return true; } + +/* Get an index that identifies which cell in the list of sibling cells this cell is. + * + * @param mpiGrid DCCRG grid object + * @param cellid DCCRG id of this cell + */ int get_sibling_index(dccrg::Dccrg& mpiGrid, const CellID& cellid) { const int NO_SIBLINGS = 0; @@ -1202,14 +1317,13 @@ int get_sibling_index(dccrg::Dccrg& mpiGr } -/*! - - This function communicates the mapping on process boundaries, and then updates the data to their correct values. - TODO, this could be inside an openmp region, in which case some m ore barriers and masters should be added - - \par dimension: 0,1,2 for x,y,z - \par direction: 1 for + dir, -1 for - dir -*/ +/* This function communicates the mapping on process boundaries, and then updates the data to their correct values. + * + * @param mpiGrid DCCRG grid object + * @param dimension Spatial dimension + * @param direction Direction of communication (+-) + * @param popId Particle population ID + */ void update_remote_mapping_contribution_amr( dccrg::Dccrg& mpiGrid, const uint dimension, From 28e878c1f8d2ce795f6d23887181d2fc3c14c132 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Thu, 9 May 2019 14:54:44 +0300 Subject: [PATCH 427/602] Expanded comments --- vlasovsolver/cpu_trans_map_amr.cpp | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/vlasovsolver/cpu_trans_map_amr.cpp b/vlasovsolver/cpu_trans_map_amr.cpp index b85c25255..8fbcba984 100644 --- a/vlasovsolver/cpu_trans_map_amr.cpp +++ b/vlasovsolver/cpu_trans_map_amr.cpp @@ -1318,10 +1318,14 @@ int get_sibling_index(dccrg::Dccrg& mpiGr } /* This function communicates the mapping on process boundaries, and then updates the data to their correct values. + * When sending data between neighbors of different refinement levels, special care has to be taken to ensure that + * The sending and receiving ranks allocate the correct size arrays for neighbor_block_data. + * This is partially due to DCCRG defining neighborhood size relative to the host cell. For details, see + * https://github.com/fmihpc/dccrg/issues/12 * * @param mpiGrid DCCRG grid object * @param dimension Spatial dimension - * @param direction Direction of communication (+-) + * @param direction Direction of communication (+ or -) * @param popId Particle population ID */ void update_remote_mapping_contribution_amr( From c71475d63283194c5b8ea17b53607ba5768775cd Mon Sep 17 00:00:00 2001 From: Markus Battarbee Date: Wed, 15 May 2019 10:55:49 +0300 Subject: [PATCH 428/602] fixed string construction for output parameters --- parameters.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/parameters.cpp b/parameters.cpp index 720972e8f..0c022970f 100644 --- a/parameters.cpp +++ b/parameters.cpp @@ -215,7 +215,7 @@ bool Parameters::addParameters(){ // Output variable parameters // NOTE Do not remove the : before the list of variable names as this is parsed by tools/check_vlasiator_cfg.sh - Readparameters::addComposing("variables.output", "List of data reduction operators (DROs) to add to the grid file output. Each variable to be added has to be on a new line output = XXX. "+ + Readparameters::addComposing("variables.output", std::string()+"List of data reduction operators (DROs) to add to the grid file output. Each variable to be added has to be on a new line output = XXX. "+ "Available (20190514): "+ "B fg_B BackgroundB vg_BackgroundB fg_BackgroundB PerturbedB vg_PerturbedB fg_PerturbedB "+ "E fg_E "+ @@ -235,7 +235,7 @@ bool Parameters::addParameters(){ "GridCoordinates BackgroundVolE MeshData"); // NOTE Do not remove the : before the list of variable names as this is parsed by tools/check_vlasiator_cfg.sh - Readparameters::addComposing("variables.diagnostic", "List of data reduction operators (DROs) to add to the diagnostic runtime output. Each variable to be added has to be on a new line diagnostic = XXX. "+ + Readparameters::addComposing("variables.diagnostic", std::string()+"List of data reduction operators (DROs) to add to the diagnostic runtime output. Each variable to be added has to be on a new line diagnostic = XXX. "+ "Available (20190320): "+ "FluxB FluxE "+ "populations_Blocks "+ From bc2307187756d796e0437a91aa4577ff07eae6bd Mon Sep 17 00:00:00 2001 From: Tuomas Koskela Date: Wed, 15 May 2019 15:19:50 +0300 Subject: [PATCH 429/602] Removed commented code --- spatial_cell.cpp | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/spatial_cell.cpp b/spatial_cell.cpp index 8f179e984..c891c51a3 100644 --- a/spatial_cell.cpp +++ b/spatial_cell.cpp @@ -47,12 +47,7 @@ namespace spatial_cell { for (unsigned int i = 0; i < CellParams::N_SPATIAL_CELL_PARAMS; i++) { this->parameters[i]=0.0; } - - // // reset spatial cell derivatives - // for (unsigned int i = 0; i < fieldsolver::N_SPATIAL_CELL_DERIVATIVES; i++) { - // this->derivatives[i]=0; - // } - + // reset BVOL derivatives for (unsigned int i = 0; i < bvolderivatives::N_BVOL_DERIVATIVES; i++) { this->derivativesBVOL[i]=0; @@ -87,7 +82,6 @@ namespace spatial_cell { mpiTransferEnabled(other.mpiTransferEnabled), populations(other.populations), parameters(other.parameters), - // derivatives(other.derivatives), derivativesBVOL(other.derivativesBVOL), null_block_data(std::array {}) { } From b225e10bd0ee15d70f2093231924fc3bad0a908a Mon Sep 17 00:00:00 2001 From: Tuomas Koskela Date: Wed, 15 May 2019 15:20:53 +0300 Subject: [PATCH 430/602] Removed commented code --- datareduction/datareductionoperator.cpp | 102 +----------------------- 1 file changed, 1 insertion(+), 101 deletions(-) diff --git a/datareduction/datareductionoperator.cpp b/datareduction/datareductionoperator.cpp index f7b5cca15..58710d4ed 100644 --- a/datareduction/datareductionoperator.cpp +++ b/datareduction/datareductionoperator.cpp @@ -488,107 +488,7 @@ namespace DRO { averageVZ = cell-> parameters[CellParams::VZ]; for(int i = 0; i < 3; i++) PTensor[i] = 0.0; return true; - } - - // Integrated divergence of magnetic field - // Integral of div B over the simulation volume = - // Integral of flux of B on simulation volume surface - DiagnosticFluxB::DiagnosticFluxB(): DataReductionOperator() { } - DiagnosticFluxB::~DiagnosticFluxB() { } - - std::string DiagnosticFluxB::getName() const {return "FluxB";} - - bool DiagnosticFluxB::getDataVectorInfo(std::string& dataType,unsigned int& dataSize,unsigned int& vectorSize) const { - dataType = "float"; - dataSize = sizeof(Real); - vectorSize = 1; - return true; - } - - bool DiagnosticFluxB::reduceDiagnostic(const SpatialCell* cell,Real * result) { - creal x = cell->parameters[CellParams::XCRD]; - creal dx = cell->parameters[CellParams::DX]; - creal y = cell->parameters[CellParams::YCRD]; - creal dy = cell->parameters[CellParams::DY]; - creal z = cell->parameters[CellParams::ZCRD]; - creal dz = cell->parameters[CellParams::DZ]; - creal cx = x + 0.5 * dx; - creal cy = y + 0.5 * dy; - creal cz = z + 0.5 * dz; - - Real value = 0.0; - if(cx > Parameters::xmax - 2.0 * dx && cx < Parameters::xmax - dx) { - value += cell->parameters[CellParams::PERBX]; - } else if (cx < Parameters::xmin + 2.0 * dx && cx > Parameters::xmin + dx) { - value += -1.0*cell->parameters[CellParams::PERBX]; - } - if(cy > Parameters::ymax - 2.0 * dy && cy < Parameters::ymax - dy) { - value += cell->parameters[CellParams::PERBY]; - } else if (cy < Parameters::ymin + 2.0 * dy && cy > Parameters::ymin + dy) { - value += -1.0*cell->parameters[CellParams::PERBY]; - } - if(cz > Parameters::zmax - 2.0 * dz && cz < Parameters::zmax - dz) { - value += cell->parameters[CellParams::PERBZ]; - } else if (cz < Parameters::zmin + 2.0 * dz && cz > Parameters::zmin + dz) { - value += -1.0*cell->parameters[CellParams::PERBZ]; - } - *result = value; - - return true; - } - - bool DiagnosticFluxB::setSpatialCell(const SpatialCell* cell) {return true;} - - - - // YK Integrated divergence of electric field - // Integral of div E over the simulation volume = - // Integral of flux of E on simulation volume surface - DiagnosticFluxE::DiagnosticFluxE(): DataReductionOperator() { } - DiagnosticFluxE::~DiagnosticFluxE() { } - - std::string DiagnosticFluxE::getName() const {return "FluxE";} - - bool DiagnosticFluxE::getDataVectorInfo(std::string& dataType,unsigned int& dataSize,unsigned int& vectorSize) const { - dataType = "float"; - dataSize = sizeof(Real); - vectorSize = 1; - return true; - } - - bool DiagnosticFluxE::reduceDiagnostic(const SpatialCell* cell,Real * result) { - creal x = cell->parameters[CellParams::XCRD]; - creal dx = cell->parameters[CellParams::DX]; - creal y = cell->parameters[CellParams::YCRD]; - creal dy = cell->parameters[CellParams::DY]; - creal z = cell->parameters[CellParams::ZCRD]; - creal dz = cell->parameters[CellParams::DZ]; - creal cx = x + 0.5 * dx; - creal cy = y + 0.5 * dy; - creal cz = z + 0.5 * dz; - - Real value = 0.0; - if(cx > Parameters::xmax - 2.0 * dx && cx < Parameters::xmax - dx) { - value += cell->parameters[CellParams::EX]; - } else if (cx < Parameters::xmin + 2.0 * dx && cx > Parameters::xmin + dx) { - value += -1.0*cell->parameters[CellParams::EX]; - } - if(cy > Parameters::ymax - 2.0 * dy && cy < Parameters::ymax - dy) { - value += cell->parameters[CellParams::EY]; - } else if (cy < Parameters::ymin + 2.0 * dy && cy > Parameters::ymin + dy) { - value += -1.0*cell->parameters[CellParams::EY]; - } - if(cz > Parameters::zmax - 2.0 * dz && cz < Parameters::zmax - dz) { - value += cell->parameters[CellParams::EZ]; - } else if (cz < Parameters::zmin + 2.0 * dz && cz > Parameters::zmin + dz) { - value += -1.0*cell->parameters[CellParams::EZ]; - } - *result = value; - - return true; - } - - bool DiagnosticFluxE::setSpatialCell(const SpatialCell* cell) {return true;} + } // YK maximum value of the distribution function MaxDistributionFunction::MaxDistributionFunction(cuint _popID): DataReductionOperator(),popID(_popID) { From a2f8f0eced813018362f6cd1bbee0a0b7868b36c Mon Sep 17 00:00:00 2001 From: Tuomas Koskela Date: Wed, 15 May 2019 15:23:40 +0300 Subject: [PATCH 431/602] Removed warnings for diagnostic flux variables. --- vlasiator.cpp | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/vlasiator.cpp b/vlasiator.cpp index 24cf104f3..c227e3780 100644 --- a/vlasiator.cpp +++ b/vlasiator.cpp @@ -718,17 +718,6 @@ int main(int argn,char* args[]) { // Check whether diagnostic output has to be produced if (P::diagnosticInterval != 0 && P::tstep % P::diagnosticInterval == 0) { - vector::const_iterator it; - for (it = P::diagnosticVariableList.begin(); - it != P::diagnosticVariableList.end(); - it++) { - if (*it == "FluxB") { - if(myRank == MASTER_RANK) cerr << __FILE__ << " " << __LINE__ << "ERROR: Diagnostic output from FsGrid is no longer supported!" << endl; - } - if (*it == "FluxE") { - if(myRank == MASTER_RANK) cerr << __FILE__ << " " << __LINE__ << "ERROR: Diagnostic output from FsGrid is no longer supported!" << endl; - } - } phiprof::start("diagnostic-io"); if (writeDiagnostic(mpiGrid, diagnosticReducer) == false) { From a044f0f88fca3fa89e94eed7b732cdfc9b5ca7b6 Mon Sep 17 00:00:00 2001 From: Tuomas Koskela Date: Wed, 15 May 2019 15:24:33 +0300 Subject: [PATCH 432/602] Removed commented code --- vlasiator.cpp | 22 ---------------------- 1 file changed, 22 deletions(-) diff --git a/vlasiator.cpp b/vlasiator.cpp index c227e3780..0ca1eb2ee 100644 --- a/vlasiator.cpp +++ b/vlasiator.cpp @@ -849,28 +849,6 @@ int main(int argn,char* args[]) { logFile << "(LB): ... done!" << endl << writeVerbose; P::prepareForRebalance = false; - // Re-couple fsgrids to updated grid situation - phiprof::start("fsgrid-recouple-after-lb"); - -// const vector& cells = getLocalCells(); - -// technicalGrid. setupForGridCoupling(cells.size()); - -// // Each dccrg cell may have to communicate with multiple fsgrid cells, if they are on a lower refinement level. -// // Calculate the corresponding fsgrid ids for each dccrg cell and set coupling for each fsgrid id. -// for(auto& dccrgId : cells) { -// const auto fsgridIds = mapDccrgIdToFsGridGlobalID(mpiGrid, dccrgId); -// for (auto& fsgridId : fsgridIds) { - -// technicalGrid. setGridCoupling(fsgridId, myRank); -// } -// } -// // cout << endl; - -// technicalGrid. finishGridCoupling(); - - phiprof::stop("fsgrid-recouple-after-lb"); - overrideRebalanceNow = false; } From 09f4de71672b5e9f2f1d4c959f7c307b34a012f9 Mon Sep 17 00:00:00 2001 From: Markus Battarbee Date: Wed, 15 May 2019 15:47:01 +0300 Subject: [PATCH 433/602] Vector dipole compiles --- backgroundfield/vectordipole.cpp | 98 ++++++++++++++++++++---- backgroundfield/vectordipole.hpp | 5 +- projects/Magnetosphere/Magnetosphere.cpp | 26 +++++-- projects/Magnetosphere/Magnetosphere.h | 3 +- 4 files changed, 110 insertions(+), 22 deletions(-) diff --git a/backgroundfield/vectordipole.cpp b/backgroundfield/vectordipole.cpp index 25b9112c3..d5b25bfd5 100644 --- a/backgroundfield/vectordipole.cpp +++ b/backgroundfield/vectordipole.cpp @@ -31,7 +31,7 @@ Background magnetic field class of Vlasiator. // tilt_angle_phi is from the z-axis in radians // tilt_angle_theta is from the Sun-Earth-line in radians -void VectorDipole::initialize(const double moment,const double center_x, const double center_y, const double center_z, const double tilt_angle_phi=0, const double tilt_angle_theta=0, const double xlimit_f, const double xlimit_z){ +void VectorDipole::initialize(const double moment,const double center_x, const double center_y, const double center_z, const double tilt_angle_phi, const double tilt_angle_theta, const double xlimit_f, const double xlimit_z, const double IMF_Bx, const double IMF_By, const double IMF_Bz){ this->initialized = true; q[0]=-sin(tilt_angle_phi)*cos(tilt_angle_theta)*moment; @@ -46,6 +46,10 @@ void VectorDipole::initialize(const double moment,const double center_x, const d xlimit[0]=xlimit_f; // Full dipole when x < xlimit_f xlimit[1]=xlimit_z; // Zero field when x > xlimit_z + IMF[0]=IMF_Bx; + IMF[1]=IMF_By; + IMF[2]=IMF_Bz; + // TODO: If values for xlimit are zero, instead place them as 15 RE and Xmax-2*cellsize? } @@ -67,10 +71,14 @@ double VectorDipole::call( double x, double y, double z) const if(r2=xlimit[1]) - return 0.0; //set zero field and derivatives outside "zero x limit" - + + if(r[0]>=xlimit[1]){ + //set zero or IMF field and derivatives outside "zero x limit" + if(_derivative == 0) + return IMF[_fComponent]; + else + return 0.0; + } /* This function is called from within other calls, one component at a time. The component in question is defined using the _fComponent index. If a derivative is requested, the direction of the derivative is defined using _dComponent. */ @@ -80,11 +88,11 @@ double VectorDipole::call( double x, double y, double z) const const double rdotq=q[0]*r[0] + q[1]*r[1] +q[2]*r[2]; const double B=( 3*r[_fComponent]*rdotq-q[_fComponent]*r2)/r5; - if(_derivative == 0) && (r[0] <= xlimit[0]) + if((_derivative == 0) && (r[0] <= xlimit[0])) // Full dipole field within full xlimit return B; - if(_derivative == 1) && (r[0] <= xlimit[0]){ + if((_derivative == 1) && (r[0] <= xlimit[0])){ //first derivatives of full field unsigned int sameComponent; if(_dComponent==_fComponent) @@ -109,6 +117,13 @@ double VectorDipole::call( double x, double y, double z) const A[0] = (q[1]*r[2]-q[2]*r[1]) / (r2*r1); A[1] = (q[2]*r[0]-q[0]*r[2]) / (r2*r1); A[2] = (q[0]*r[1]-q[1]*r[0]) / (r2*r1); + // Calculate vector potential for IMF scaling + double IMFA[3]; + IMFA[0] = 0.5*(IMF[1]*r[2] - IMF[2]*r[1]); + IMFA[1] = 0.5*(IMF[2]*r[0] - IMF[0]*r[2]); + IMFA[2] = 0.5*(IMF[0]*r[1] - IMF[1]*r[0]); + const double IMFB = IMF[_fComponent]; + // Coordinate within smootherstep function (x-coordinate only) const double s = -(r[0]-xlimit[1])/(xlimit[1]-xlimit[0]); const double ss = s*s; @@ -116,13 +131,26 @@ double VectorDipole::call( double x, double y, double z) const const double S2 = 6.*ss*ss*s - 15.*ss*ss + 10.*ss*s; const double dS2dx = -(30.*ss*ss - 60.*ss*s + 30.*ss)/(xlimit[1]-xlimit[0]); + // Smootherstep for IMF + const double IMFs = (r[0]-xlimit[0])/(xlimit[1]-xlimit[0]); + const double IMFss = IMFs*IMFs; + // Smootherstep and its x-directional derivative + const double IMFS2 = 6.*IMFss*IMFss*IMFs - 15.*IMFss*IMFss + 10.*IMFss*IMFs; + const double IMFdS2dx = (30.*IMFss*IMFss - 60.*IMFss*IMFs + 30.*IMFss)/(xlimit[1]-xlimit[0]); + // Cartesian derivatives of S2 double dS2cart[3]; dS2cart[0] = dS2dx; //(r[0]/r1)*dS2dr; dS2cart[1] = 0; //(r[1]/r1)*dS2dr; dS2cart[2] = 0; //(r[2]/r1)*dS2dr; - if(_derivative == 0) && (r1 > xlimit[0]) { + // Cartesian derivatives of S2 + double IMFdS2cart[3]; + IMFdS2cart[0] = IMFdS2dx; //(r[0]/r1)*dS2dr; + IMFdS2cart[1] = 0; //(r[1]/r1)*dS2dr; + IMFdS2cart[2] = 0; //(r[2]/r1)*dS2dr; + + if((_derivative == 0) && (r1 > xlimit[0])) { /* Within transition range (between xlimit[0] and xlimit[1]) we multiply the magnetic field with the S2 smootherstep function and add an additional corrective term to remove divergence. This @@ -163,11 +191,18 @@ double VectorDipole::call( double x, double y, double z) const delS2crossA[0] = 0; delS2crossA[1] = -dS2cart[0]*A[2]; delS2crossA[2] = dS2cart[0]*A[1]; - - return S2*B + delS2crossA[_fComponent]; + + double IMFdelS2crossA[3]; + // Don't calculate zero terms + IMFdelS2crossA[0] = 0; + IMFdelS2crossA[1] = -IMFdS2cart[0]*IMFA[2]; + IMFdelS2crossA[2] = IMFdS2cart[0]*IMFA[1]; + + //return S2*B + delS2crossA[_fComponent]; + return S2*B + delS2crossA[_fComponent] + IMFS2*IMFB + IMFdelS2crossA[_fComponent]; } - else if(_derivative == 1) && (r1 > xlimit[0]) { + else if((_derivative == 1) && (r1 > xlimit[0])) { /* first derivatives of field calculated from diminishing vector potential del B'(r) = S2(s) del B(r) + B(r) del S2(s) + del (del S2(s) cross A(r)) @@ -204,6 +239,9 @@ double VectorDipole::call( double x, double y, double z) const 2*q[_fComponent]*r[_dComponent] + 3*rdotq*sameComponent)/r5; + // IMF field is constant + const double IMFdelB = 0.; + // Calculate del Ax, del Ay, del Az double delAy[3]; double delAz[3]; @@ -219,9 +257,25 @@ double VectorDipole::call( double x, double y, double z) const //delAx[1] = (-3./(r2*r2*r1))*(q[1]*r[2]-q[2]*r[1])*r[1] -q[2]/(r2*r1); //delAx[2] = (-3./(r2*r2*r1))*(q[1]*r[2]-q[2]*r[1])*r[2] +q[1]/(r2*r1); + // Calculate del IMFAx, del IMFAy, del IMFAz + double IMFdelAy[3]; + double IMFdelAx[3]; + double IMFdelAz[3]; + IMFdelAx[0] = 0.; + IMFdelAx[1] = -0.5*IMF[2]; + IMFdelAx[2] = 0.5*IMF[1]; + IMFdelAy[0] = 0.5*IMF[2]; + IMFdelAy[1] = 0.0; + IMFdelAy[2] = -0.5*IMF[0]; + IMFdelAz[0] = -0.5*IMF[1]; + IMFdelAz[1] = 0.5*IMF[0]; + IMFdelAz[2] = 0.0; + // Calculate del (dS2/dx), del (dS2/dy), del (dS2/dz) // Of course now only del (dS2/dx) is non-zero - ddidS2dx = 60.*(2.*ss*s - 3.*ss + s)/((xlimit[1]-xlimit[0])*(xlimit[1]-xlimit[0])); + const double ddidS2dx = 60.*(2.*ss*s - 3.*ss + s)/((xlimit[1]-xlimit[0])*(xlimit[1]-xlimit[0])); + // This is the same for IMF field scaling as well + double deldS2dx[3]; //double deldS2dy[3]; //double deldS2dz[3]; @@ -271,7 +325,25 @@ double VectorDipole::call( double x, double y, double z) const ddS2crossA[2][1] = deldS2dx[1]*A[1] + dS2cart[0]*delAy[1]; ddS2crossA[2][2] = deldS2dx[2]*A[1] + dS2cart[0]*delAy[2]; - return S2*delB + dS2cart[_dComponent]*B + ddS2crossA[_fComponent][_dComponent]; + // Now for IMF portion + // Only include components which are nonzero + double IMFddS2crossA[3][3]; + // derivatives of X-directional field + IMFddS2crossA[0][0] = 0; + IMFddS2crossA[0][1] = 0; + IMFddS2crossA[0][2] = 0; + // derivatives of Y-directional field + IMFddS2crossA[1][0] = - deldS2dx[0]*IMFA[2] - IMFdS2cart[0]*IMFdelAz[0]; + IMFddS2crossA[1][1] = - deldS2dx[1]*IMFA[2] - IMFdS2cart[0]*IMFdelAz[1]; + IMFddS2crossA[1][2] = - deldS2dx[2]*IMFA[2] - IMFdS2cart[0]*IMFdelAz[2]; + // derivatives of Z-directional field + IMFddS2crossA[2][0] = deldS2dx[0]*IMFA[1] + IMFdS2cart[0]*IMFdelAy[0]; + IMFddS2crossA[2][1] = deldS2dx[1]*IMFA[1] + IMFdS2cart[0]*IMFdelAy[1]; + IMFddS2crossA[2][2] = deldS2dx[2]*IMFA[1] + IMFdS2cart[0]*IMFdelAy[2]; + + //return S2*delB + dS2cart[_dComponent]*B + ddS2crossA[_fComponent][_dComponent]; + return S2*delB + dS2cart[_dComponent]*B + ddS2crossA[_fComponent][_dComponent] + + IMFS2*IMFdelB + IMFdS2cart[_dComponent]*IMFB + IMFddS2crossA[_fComponent][_dComponent]; } return 0; // dummy, but prevents gcc from yelling diff --git a/backgroundfield/vectordipole.hpp b/backgroundfield/vectordipole.hpp index ce2d81a69..2db065ea1 100644 --- a/backgroundfield/vectordipole.hpp +++ b/backgroundfield/vectordipole.hpp @@ -36,14 +36,15 @@ class VectorDipole: public FieldFunction { double q[3]; // Dipole moment; set to (0,0,moment) for z-aligned double center[3]; // Coordinates where the dipole sits; set to (0,0,0) double xlimit[2]; // X-coodrinate extents of full and zero dipole + double IMF[3]; // IMF value to scale to, starting at xlimit[0] and finishing at xlimit[1] public: VectorDipole(){ this->initialized = false; } - void initialize(const double moment,const double center_x, const double center_y, const double center_z, const double tilt_angle_phi, const double tilt_angle_theta, const double xlimit_f, const double xlimit_z); + void initialize(const double moment,const double center_x, const double center_y, const double center_z, const double tilt_angle_phi, const double tilt_angle_theta, const double xlimit_f, const double xlimit_z, const double IMF_Bx, const double IMF_By, const double IMF_Bz); virtual double call(double x, double y, double z) const; - virtual ~Dipole() {} + virtual ~VectorDipole() {} }; #endif diff --git a/projects/Magnetosphere/Magnetosphere.cpp b/projects/Magnetosphere/Magnetosphere.cpp index 14fbcaaf7..66bd25410 100644 --- a/projects/Magnetosphere/Magnetosphere.cpp +++ b/projects/Magnetosphere/Magnetosphere.cpp @@ -31,6 +31,7 @@ #include "../../backgroundfield/constantfield.hpp" #include "../../backgroundfield/dipole.hpp" #include "../../backgroundfield/linedipole.hpp" +#include "../../backgroundfield/vectordipole.hpp" #include "../../object_wrapper.h" #include "Magnetosphere.h" @@ -61,7 +62,10 @@ namespace projects { RP::add("Magnetosphere.dipoleTiltPhi","Magnitude of dipole tilt in radians", 0.0); RP::add("Magnetosphere.dipoleTiltTheta","Direction of dipole tilt from Sun-Earth-line in radians", 0.0); RP::add("Magnetosphere.dipoleXFull","X-coordinate up to which dipole is at full strength, in metres", 9.5565e7); // 15 RE - RP::add("Magnetosphere.dipoleXFull","X-coordinate after which dipole is at zero strength, in metres", 1.9113e8); // 30 RE + RP::add("Magnetosphere.dipoleXZero","X-coordinate after which dipole is at zero strength, in metres", 1.9113e8); // 30 RE + RP::add("Magnetosphere.dipoleInflowBX","Inflow magnetic field Bx component to which the vector potential dipole converges. Default is none.", 0.0); + RP::add("Magnetosphere.dipoleInflowBY","Inflow magnetic field By component to which the vector potential dipole converges. Default is none.", 0.0); + RP::add("Magnetosphere.dipoleInflowBZ","Inflow magnetic field Bz component to which the vector potential dipole converges. Default is none.", 0.0); // Per-population parameters for(uint i=0; i< getObjectWrapper().particleSpecies.size(); i++) { @@ -170,6 +174,18 @@ namespace projects { if(myRank == MASTER_RANK) cerr << __FILE__ << ":" << __LINE__ << " ERROR: This option has not been added!" << endl; exit(1); } + if(!Readparameters::get("Magnetosphere.dipoleInflowBX", this->dipoleInflowB[0])) { + if(myRank == MASTER_RANK) cerr << __FILE__ << ":" << __LINE__ << " ERROR: This option has not been added!" << endl; + exit(1); + } + if(!Readparameters::get("Magnetosphere.dipoleInflowBY", this->dipoleInflowB[1])) { + if(myRank == MASTER_RANK) cerr << __FILE__ << ":" << __LINE__ << " ERROR: This option has not been added!" << endl; + exit(1); + } + if(!Readparameters::get("Magnetosphere.dipoleInflowBZ", this->dipoleInflowB[2])) { + if(myRank == MASTER_RANK) cerr << __FILE__ << ":" << __LINE__ << " ERROR: This option has not been added!" << endl; + exit(1); + } // Per-population parameters for(uint i=0; i< getObjectWrapper().particleSpecies.size(); i++) { @@ -309,14 +325,12 @@ namespace projects { bgFieldDipole.initialize(8e15 *this->dipoleScalingFactor, this->dipoleMirrorLocationX, 0.0, 0.0, 0.0 );//mirror setBackgroundField(bgFieldDipole, BgBGrid, true); break; - case 4: // Vector potential dipole, vanishes after a given x-coordinate - bgVectorDipole.initialize(126.2e6 *this->dipoleScalingFactor, 0.0, 0.0, 0.0, this->dipoleTiltPhi, this->dipoleTiltTheta, this->dipoleXFull, this->dipoleXZero ); + case 4: // Vector potential dipole, vanishes or optionally scales to static inflow value after a given x-coordinate + bgVectorDipole.initialize(126.2e6 *this->dipoleScalingFactor, 0.0, 0.0, 0.0, this->dipoleTiltPhi, this->dipoleTiltTheta, this->dipoleXFull, this->dipoleXZero, this->dipoleInflowB[0], this->dipoleInflowB[1], this->dipoleInflowB[2]); setBackgroundField(bgVectorDipole, BgBGrid); - break; - + break; default: setBackgroundFieldToZero(BgBGrid); - } const auto localSize = BgBGrid.getLocalSize(); diff --git a/projects/Magnetosphere/Magnetosphere.h b/projects/Magnetosphere/Magnetosphere.h index 827a4d587..715df6b7b 100644 --- a/projects/Magnetosphere/Magnetosphere.h +++ b/projects/Magnetosphere/Magnetosphere.h @@ -93,7 +93,8 @@ namespace projects { Real dipoleTiltPhi; Real dipoleTiltTheta; Real dipoleXFull; - Real dipoleXNone; + Real dipoleXZero; + Real dipoleInflowB[3]; std::vector speciesParams; }; // class Magnetosphere From 3a64662569f44de2d854df2aa1d8151f17e30176 Mon Sep 17 00:00:00 2001 From: Grandin Maxime T J Date: Thu, 16 May 2019 09:30:14 +0300 Subject: [PATCH 434/602] Fixed a couple of bugs --- datareduction/datareductionoperator.cpp | 4 ++-- datareduction/datareductionoperator.h | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/datareduction/datareductionoperator.cpp b/datareduction/datareductionoperator.cpp index 5b10bec5a..e5e7df728 100644 --- a/datareduction/datareductionoperator.cpp +++ b/datareduction/datareductionoperator.cpp @@ -1556,7 +1556,7 @@ namespace DRO { return true; } - bool VariablePrecipitation::writeParameters(vlsv::Writer& vlsvWriter) { + bool VariablePrecipitationDiffFlux::writeParameters(vlsv::Writer& vlsvWriter) { for (int i=0; i E1limit) thread_E1_sum += block_data[n * SIZE_VELBLOCK+cellIndex(i,j,k)] * ENERGY * DV3; if (ENERGY > E2limit) thread_E2_sum += block_data[n * SIZE_VELBLOCK+cellIndex(i,j,k)] * ENERGY * DV3; } - } + EDensity[0] += thread_E0_sum; EDensity[1] += thread_E1_sum; EDensity[2] += thread_E2_sum; diff --git a/datareduction/datareductionoperator.h b/datareduction/datareductionoperator.h index 2b0959a91..61c6d4292 100644 --- a/datareduction/datareductionoperator.h +++ b/datareduction/datareductionoperator.h @@ -491,7 +491,7 @@ namespace DRO { bool doSkip; }; - class VariableEffectiveSparsityThreshold: public DataReductionOperatorHasParameters { + class VariableEffectiveSparsityThreshold: public DataReductionOperator { public: VariableEffectiveSparsityThreshold(cuint popID); virtual ~VariableEffectiveSparsityThreshold(); From bdaf90151c057301a470f8fd2868bf06c5df3a38 Mon Sep 17 00:00:00 2001 From: Tuomas Koskela Date: Thu, 16 May 2019 10:51:55 +0300 Subject: [PATCH 435/602] Removed CellParams::PERB* and CellParams::E* references from projects --- projects/Alfven/Alfven.cpp | 7 ------- projects/Dispersion/Dispersion.cpp | 16 ++++++++++------ 2 files changed, 10 insertions(+), 13 deletions(-) diff --git a/projects/Alfven/Alfven.cpp b/projects/Alfven/Alfven.cpp index e73022d27..1e0e4b9f5 100644 --- a/projects/Alfven/Alfven.cpp +++ b/projects/Alfven/Alfven.cpp @@ -160,13 +160,6 @@ namespace projects { } cuint nPts = pow(this->nSpaceSamples, 3.0); - cellParams[CellParams::EX ] = 0.0; - cellParams[CellParams::EY ] = 0.0; - cellParams[CellParams::EZ ] = 0.0; - //Field below could laso be set as background field - cellParams[CellParams::PERBX ] = this->B0 * cos(this->ALPHA) - this->A_MAG * this->B0 * sin(this->ALPHA) * dBxavg / nPts; - cellParams[CellParams::PERBY ] = this->B0 * sin(this->ALPHA) + this->A_MAG * this->B0 * cos(this->ALPHA) * dByavg / nPts; - cellParams[CellParams::PERBZ ] = this->B0 * this->A_MAG * dBzavg / nPts; } void Alfven::setProjectBField( diff --git a/projects/Dispersion/Dispersion.cpp b/projects/Dispersion/Dispersion.cpp index e3cbe9535..0bd572475 100644 --- a/projects/Dispersion/Dispersion.cpp +++ b/projects/Dispersion/Dispersion.cpp @@ -106,6 +106,10 @@ namespace projects { if(hook::END_OF_TIME_STEP == stage) { int myRank; MPI_Comm_rank(MPI_COMM_WORLD,&myRank); + + cerr << "Warning, this function does virtually nothing, since B and E are not updated in DCCRG cell params since May 2019" << endl; + cerr << "If this is undersirable to you, please implement writing the fields out of fsgrid objects" << endl; + vector localRhom(P::xcells_ini, 0.0), outputRhom(P::xcells_ini, 0.0), localPerBx(P::xcells_ini, 0.0), @@ -122,13 +126,13 @@ namespace projects { outputEz(P::xcells_ini, 0.0); for(uint i=0; iparameters[CellParams::PERBX]; - localPerBy[Parameters::localCells[i] - 1] = mpiGrid[Parameters::localCells[i]]->parameters[CellParams::PERBY]; - localPerBz[Parameters::localCells[i] - 1] = mpiGrid[Parameters::localCells[i]]->parameters[CellParams::PERBZ]; +// localPerBx[Parameters::localCells[i] - 1] = mpiGrid[Parameters::localCells[i]]->parameters[CellParams::PERBX]; +// localPerBy[Parameters::localCells[i] - 1] = mpiGrid[Parameters::localCells[i]]->parameters[CellParams::PERBY]; +// localPerBz[Parameters::localCells[i] - 1] = mpiGrid[Parameters::localCells[i]]->parameters[CellParams::PERBZ]; localRhom[Parameters::localCells[i] - 1] = mpiGrid[Parameters::localCells[i]]->parameters[CellParams::RHOM]; - localEx[Parameters::localCells[i] - 1] = mpiGrid[Parameters::localCells[i]]->parameters[CellParams::EX]; - localEy[Parameters::localCells[i] - 1] = mpiGrid[Parameters::localCells[i]]->parameters[CellParams::EY]; - localEz[Parameters::localCells[i] - 1] = mpiGrid[Parameters::localCells[i]]->parameters[CellParams::EZ]; +// localEx[Parameters::localCells[i] - 1] = mpiGrid[Parameters::localCells[i]]->parameters[CellParams::EX]; +// localEy[Parameters::localCells[i] - 1] = mpiGrid[Parameters::localCells[i]]->parameters[CellParams::EY]; +// localEz[Parameters::localCells[i] - 1] = mpiGrid[Parameters::localCells[i]]->parameters[CellParams::EZ]; } } From f98818a3bfb185212be4ea0f3eefb2e54d24adeb Mon Sep 17 00:00:00 2001 From: Tuomas Koskela Date: Thu, 16 May 2019 10:53:44 +0300 Subject: [PATCH 436/602] Removed obsolete datareducers --- datareduction/datareducer.cpp | 8 -------- 1 file changed, 8 deletions(-) diff --git a/datareduction/datareducer.cpp b/datareduction/datareducer.cpp index cb02571de..29d5635f7 100644 --- a/datareduction/datareducer.cpp +++ b/datareduction/datareducer.cpp @@ -101,10 +101,6 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti )); continue; } - // if(*it == "vg_BackgroundB") { // Static (typically dipole) magnetic field part - // outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("background_B",CellParams::BGBX,3)); - // continue; - // } if(*it == "fg_PerturbedB" || *it == "PerturbedB") { // Fluctuating magnetic field part outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fg_perturbed_B",[]( FsGrid< std::array, 2>& perBGrid, @@ -136,10 +132,6 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti )); continue; } - if(*it == "vg_PerturbedB") { // Fluctuating magnetic field part - outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("perturbed_B",CellParams::PERBX,3)); - continue; - } if(*it == "fg_E" || *it== "E") { // Bulk electric field at Yee-lattice locations outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fg_E",[]( FsGrid< std::array, 2>& perBGrid, From 341d4a14ea03a9f9a6a20afd76fe3b4e62d6da7b Mon Sep 17 00:00:00 2001 From: Markus Battarbee Date: Thu, 16 May 2019 11:27:53 +0300 Subject: [PATCH 437/602] corrected scaling factor for vector dipole --- projects/Magnetosphere/Magnetosphere.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/projects/Magnetosphere/Magnetosphere.cpp b/projects/Magnetosphere/Magnetosphere.cpp index 66bd25410..13052bdf3 100644 --- a/projects/Magnetosphere/Magnetosphere.cpp +++ b/projects/Magnetosphere/Magnetosphere.cpp @@ -326,7 +326,7 @@ namespace projects { setBackgroundField(bgFieldDipole, BgBGrid, true); break; case 4: // Vector potential dipole, vanishes or optionally scales to static inflow value after a given x-coordinate - bgVectorDipole.initialize(126.2e6 *this->dipoleScalingFactor, 0.0, 0.0, 0.0, this->dipoleTiltPhi, this->dipoleTiltTheta, this->dipoleXFull, this->dipoleXZero, this->dipoleInflowB[0], this->dipoleInflowB[1], this->dipoleInflowB[2]); + bgVectorDipole.initialize(8e15 *this->dipoleScalingFactor, 0.0, 0.0, 0.0, this->dipoleTiltPhi, this->dipoleTiltTheta, this->dipoleXFull, this->dipoleXZero, this->dipoleInflowB[0], this->dipoleInflowB[1], this->dipoleInflowB[2]); setBackgroundField(bgVectorDipole, BgBGrid); break; default: From 2267590caa055a44aea37096c6f56b5553c3c33f Mon Sep 17 00:00:00 2001 From: Tuomas Koskela Date: Thu, 16 May 2019 11:48:04 +0300 Subject: [PATCH 438/602] Large commit, had to modify all sysboundaries. Removed CellParams::PERB* and CellParams::E*. This caused a change in sysboundary/setByUser.cpp to set the template B on fsgrid instead of mpiGrid. It is set in applyInitialCondition, and passing in perBGrid had to be added to each class that inherits it from sysboundarycondition. Other minor changes: - Removal of useless datareducers / io reads - Removal of communications in spatial_cell --- common.h | 12 ------ grid.cpp | 4 +- ioread.cpp | 2 +- iowrite.cpp | 2 +- spatial_cell.cpp | 26 +------------ sysboundary/antisymmetric.cpp | 1 + sysboundary/antisymmetric.h | 1 + sysboundary/donotcompute.cpp | 22 ++++++----- sysboundary/donotcompute.h | 1 + sysboundary/ionosphere.cpp | 1 + sysboundary/ionosphere.h | 1 + sysboundary/outflow.cpp | 1 + sysboundary/outflow.h | 1 + sysboundary/project_boundary.cpp | 1 + sysboundary/project_boundary.h | 1 + sysboundary/setbyuser.cpp | 62 +++++++++++++++++++++++++----- sysboundary/setbyuser.h | 6 ++- sysboundary/setmaxwellian.cpp | 21 ++-------- sysboundary/setmaxwellian.h | 2 +- sysboundary/sysboundary.cpp | 3 +- sysboundary/sysboundary.h | 1 + sysboundary/sysboundarycondition.h | 1 + 22 files changed, 92 insertions(+), 81 deletions(-) diff --git a/common.h b/common.h index c0245bde6..811053f89 100644 --- a/common.h +++ b/common.h @@ -130,23 +130,11 @@ namespace CellParams { DX, /*!< Grid separation in x-coordinate.*/ DY, /*!< Grid separation in y-coordinate.*/ DZ, /*!< Grid separation in z-coordinate.*/ - EX, /*!< Total electric field x-component, averaged over cell edge. Used to propagate BX,BY,BZ.*/ - EY, /*!< Total wlectric field y-component, averaged over cell edge. Used to propagate BX,BY,BZ.*/ - EZ, /*!< Total electric field z-component, averaged over cell edge. Used to propagate BX,BY,BZ.*/ - PERBX, /*!< Perturbed Magnetic field x-component, averaged over cell x-face. Propagated by field solver.*/ - PERBY, /*!< Perturbed Magnetic field y-component, averaged over cell y-face. Propagated by field solver.*/ - PERBZ, /*!< Perturbed Magnetic field z-component, averaged over cell z-face. Propagated by field solver.*/ RHOM, /*!< Total mass density. Calculated by Vlasov propagator, used to propagate BX,BY,BZ.*/ VX, /*!< Vx. Calculated by Vlasov propagator, used to propagate BX,BY,BZ.*/ VY, /*!< Vy. Calculated by Vlasov propagator, used to propagate BX,BY,BZ.*/ VZ, /*!< Vz. Calculated by Vlasov propagator, used to propagate BX,BY,BZ.*/ RHOQ, /*!< Total charge density. Calculated by Vlasov propagator, used to propagate BX,BY,BZ.*/ - EX_DT2, /*!< Intermediate step value for RK2 time stepping in field solver.*/ - EY_DT2, /*!< Intermediate step value for RK2 time stepping in field solver.*/ - EZ_DT2, /*!< Intermediate step value for RK2 time stepping in field solver.*/ - PERBX_DT2, /*!< Intermediate step value for PERBX for RK2 time stepping in field solver.*/ - PERBY_DT2, /*!< Intermediate step value for PERBY for RK2 time stepping in field solver.*/ - PERBZ_DT2, /*!< Intermediate step value for PERBZ for RK2 time stepping in field solver.*/ RHOM_DT2, /*!< Total mass density. Calculated by Vlasov propagator, used to propagate BX,BY,BZ.*/ VX_DT2, /*!< Vx. Calculated by Vlasov propagator, used to propagate BX,BY,BZ.*/ VY_DT2, /*!< Vy. Calculated by Vlasov propagator, used to propagate BX,BY,BZ.*/ diff --git a/grid.cpp b/grid.cpp index 1c073b1f4..66147b0c4 100644 --- a/grid.cpp +++ b/grid.cpp @@ -195,7 +195,7 @@ void initializeGrids( //initial state for sys-boundary cells, will skip those not set to be reapplied at restart phiprof::start("Apply system boundary conditions state"); - if (sysBoundaries.applyInitialState(mpiGrid, project) == false) { + if (sysBoundaries.applyInitialState(mpiGrid, perBGrid, project) == false) { cerr << " (MAIN) ERROR: System boundary conditions initial state was not applied correctly." << endl; exit(1); } @@ -227,7 +227,7 @@ void initializeGrids( // Initial state for sys-boundary cells phiprof::stop("Apply initial state"); phiprof::start("Apply system boundary conditions state"); - if (sysBoundaries.applyInitialState(mpiGrid, project) == false) { + if (sysBoundaries.applyInitialState(mpiGrid, perBGrid, project) == false) { cerr << " (MAIN) ERROR: System boundary conditions initial state was not applied correctly." << endl; exit(1); } diff --git a/ioread.cpp b/ioread.cpp index 02953780b..d1071df33 100644 --- a/ioread.cpp +++ b/ioread.cpp @@ -996,7 +996,7 @@ bool exec_readGrid(dccrg::Dccrg& mpiGrid, //todo, check file datatype, and do not just use double phiprof::start("readCellParameters"); - if(success) { success=readCellParamsVariable(file,fileCells,localCellStartOffset,localCells,"perturbed_B",CellParams::PERBX,3,mpiGrid); } + //if(success) { success=readCellParamsVariable(file,fileCells,localCellStartOffset,localCells,"perturbed_B",CellParams::PERBX,3,mpiGrid); } // Backround B has to be set, there are also the derivatives that should be written/read if we wanted to only read in background field if(success) { success=readCellParamsVariable(file,fileCells,localCellStartOffset,localCells,"moments",CellParams::RHOM,5,mpiGrid); } if(success) { success=readCellParamsVariable(file,fileCells,localCellStartOffset,localCells,"moments_dt2",CellParams::RHOM_DT2,5,mpiGrid); } diff --git a/iowrite.cpp b/iowrite.cpp index 94ceb8671..126dd05d6 100644 --- a/iowrite.cpp +++ b/iowrite.cpp @@ -1270,7 +1270,7 @@ bool writeRestart(dccrg::Dccrg& mpiGrid, phiprof::start("reduceddataIO"); //write out DROs we need for restarts DataReducer restartReducer; - restartReducer.addOperator(new DRO::DataReductionOperatorCellParams("perturbed_B",CellParams::PERBX,3)); + //restartReducer.addOperator(new DRO::DataReductionOperatorCellParams("perturbed_B",CellParams::PERBX,3)); restartReducer.addOperator(new DRO::DataReductionOperatorCellParams("moments",CellParams::RHOM,5)); restartReducer.addOperator(new DRO::DataReductionOperatorCellParams("moments_dt2",CellParams::RHOM_DT2,5)); restartReducer.addOperator(new DRO::DataReductionOperatorCellParams("moments_r",CellParams::RHOM_R,5)); diff --git a/spatial_cell.cpp b/spatial_cell.cpp index c891c51a3..15c36ca61 100644 --- a/spatial_cell.cpp +++ b/spatial_cell.cpp @@ -674,31 +674,7 @@ namespace spatial_cell { displacements.push_back((uint8_t*) &(this->parameters[CellParams::BGBXVOL]) - (uint8_t*) this); block_lengths.push_back(sizeof(Real) * 6); } - - // send EX, EY EZ - if ((SpatialCell::mpi_transfer_type & Transfer::CELL_E)!=0){ - displacements.push_back((uint8_t*) &(this->parameters[CellParams::EX]) - (uint8_t*) this); - block_lengths.push_back(sizeof(Real) * 3); - } - - // send EX_DT2, EY_DT2, EZ_DT2 - if ((SpatialCell::mpi_transfer_type & Transfer::CELL_EDT2)!=0){ - displacements.push_back((uint8_t*) &(this->parameters[CellParams::EX_DT2]) - (uint8_t*) this); - block_lengths.push_back(sizeof(Real) * 3); - } - - // send PERBX, PERBY, PERBZ - if ((SpatialCell::mpi_transfer_type & Transfer::CELL_PERB)!=0){ - displacements.push_back((uint8_t*) &(this->parameters[CellParams::PERBX]) - (uint8_t*) this); - block_lengths.push_back(sizeof(Real) * 3); - } - - // send PERBX_DT2, PERBY_DT2, PERBZ_DT2 - if ((SpatialCell::mpi_transfer_type & Transfer::CELL_PERBDT2)!=0){ - displacements.push_back((uint8_t*) &(this->parameters[CellParams::PERBX_DT2]) - (uint8_t*) this); - block_lengths.push_back(sizeof(Real) * 3); - } - + // send RHOM, VX, VY, VZ if ((SpatialCell::mpi_transfer_type & Transfer::CELL_RHOM_V)!=0){ displacements.push_back((uint8_t*) &(this->parameters[CellParams::RHOM]) - (uint8_t*) this); diff --git a/sysboundary/antisymmetric.cpp b/sysboundary/antisymmetric.cpp index 60a3a0cf4..8f6fea7e5 100644 --- a/sysboundary/antisymmetric.cpp +++ b/sysboundary/antisymmetric.cpp @@ -168,6 +168,7 @@ namespace SBC { bool Antisymmetric::applyInitialState( const dccrg::Dccrg& mpiGrid, + FsGrid< std::array, 2> & perBGrid, Project &project ) { vector cells = mpiGrid.get_cells(); diff --git a/sysboundary/antisymmetric.h b/sysboundary/antisymmetric.h index 9f1218481..677af2926 100644 --- a/sysboundary/antisymmetric.h +++ b/sysboundary/antisymmetric.h @@ -55,6 +55,7 @@ namespace SBC { FsGrid< fsgrids::technical, 2> & technicalGrid); virtual bool applyInitialState( const dccrg::Dccrg& mpiGrid, + FsGrid< std::array, 2> & perBGrid, Project &project ); // virtual bool applySysBoundaryCondition( diff --git a/sysboundary/donotcompute.cpp b/sysboundary/donotcompute.cpp index 3a8775729..fab2630f5 100644 --- a/sysboundary/donotcompute.cpp +++ b/sysboundary/donotcompute.cpp @@ -55,6 +55,7 @@ namespace SBC { bool DoNotCompute::applyInitialState( const dccrg::Dccrg& mpiGrid, + FsGrid< std::array, 2> & perBGrid, Project& ) { vector cells = mpiGrid.get_cells(); @@ -62,16 +63,17 @@ namespace SBC { for (size_t i=0; isysBoundaryFlag != this->getIndex()) continue; - - cell->parameters[CellParams::PERBX] = 0.0; - cell->parameters[CellParams::PERBY] = 0.0; - cell->parameters[CellParams::PERBZ] = 0.0; - cell->parameters[CellParams::PERBX_DT2] = 0.0; - cell->parameters[CellParams::PERBY_DT2] = 0.0; - cell->parameters[CellParams::PERBZ_DT2] = 0.0; - cell->parameters[CellParams::EX] = 0.0; - cell->parameters[CellParams::EY] = 0.0; - cell->parameters[CellParams::EZ] = 0.0; + + //TODO: Set fields on B grid to 0 +// cell->parameters[CellParams::PERBX] = 0.0; +// cell->parameters[CellParams::PERBY] = 0.0; +// cell->parameters[CellParams::PERBZ] = 0.0; +// cell->parameters[CellParams::PERBX_DT2] = 0.0; +// cell->parameters[CellParams::PERBY_DT2] = 0.0; +// cell->parameters[CellParams::PERBZ_DT2] = 0.0; +// cell->parameters[CellParams::EX] = 0.0; +// cell->parameters[CellParams::EY] = 0.0; +// cell->parameters[CellParams::EZ] = 0.0; cell->parameters[CellParams::RHOM] = 0.0; cell->parameters[CellParams::VX] = 0.0; cell->parameters[CellParams::VY] = 0.0; diff --git a/sysboundary/donotcompute.h b/sysboundary/donotcompute.h index 697efe3af..c72a0b870 100644 --- a/sysboundary/donotcompute.h +++ b/sysboundary/donotcompute.h @@ -52,6 +52,7 @@ namespace SBC { FsGrid< fsgrids::technical, 2> & technicalGrid); virtual bool applyInitialState( const dccrg::Dccrg& mpiGrid, + FsGrid< std::array, 2> & perBGrid, Project &project ); virtual std::string getName() const; diff --git a/sysboundary/ionosphere.cpp b/sysboundary/ionosphere.cpp index f0cb26e6f..7b968bbea 100644 --- a/sysboundary/ionosphere.cpp +++ b/sysboundary/ionosphere.cpp @@ -233,6 +233,7 @@ namespace SBC { bool Ionosphere::applyInitialState( const dccrg::Dccrg& mpiGrid, + FsGrid< std::array, 2> & perBGrid, Project &project ) { vector cells = mpiGrid.get_cells(); diff --git a/sysboundary/ionosphere.h b/sysboundary/ionosphere.h index 62bbc2052..2922b4a52 100644 --- a/sysboundary/ionosphere.h +++ b/sysboundary/ionosphere.h @@ -67,6 +67,7 @@ namespace SBC { FsGrid< fsgrids::technical, 2> & technicalGrid); virtual bool applyInitialState( const dccrg::Dccrg& mpiGrid, + FsGrid< std::array, 2> & perBGrid, Project &project ); virtual Real fieldSolverBoundaryCondMagneticField( diff --git a/sysboundary/outflow.cpp b/sysboundary/outflow.cpp index a24ec9459..628aeb925 100644 --- a/sysboundary/outflow.cpp +++ b/sysboundary/outflow.cpp @@ -282,6 +282,7 @@ namespace SBC { bool Outflow::applyInitialState( const dccrg::Dccrg& mpiGrid, + FsGrid< std::array, 2> & perBGrid, Project &project ) { const vector& cells = getLocalCells(); diff --git a/sysboundary/outflow.h b/sysboundary/outflow.h index 18b278ed2..2c6bb1249 100644 --- a/sysboundary/outflow.h +++ b/sysboundary/outflow.h @@ -67,6 +67,7 @@ namespace SBC { FsGrid< fsgrids::technical, 2> & technicalGrid); virtual bool applyInitialState( const dccrg::Dccrg& mpiGrid, + FsGrid< std::array, 2> & perBGrid, Project &project ); virtual Real fieldSolverBoundaryCondMagneticField( diff --git a/sysboundary/project_boundary.cpp b/sysboundary/project_boundary.cpp index ec5945b59..df6070f5c 100644 --- a/sysboundary/project_boundary.cpp +++ b/sysboundary/project_boundary.cpp @@ -158,6 +158,7 @@ namespace SBC { bool ProjectBoundary::applyInitialState( const dccrg::Dccrg& mpiGrid, + FsGrid< std::array, 2> & perBGrid, Project &project ) { bool success = true; diff --git a/sysboundary/project_boundary.h b/sysboundary/project_boundary.h index 510479207..6e6815d69 100644 --- a/sysboundary/project_boundary.h +++ b/sysboundary/project_boundary.h @@ -56,6 +56,7 @@ namespace SBC { FsGrid< fsgrids::technical, 2> & technicalGrid); virtual bool applyInitialState( const dccrg::Dccrg& mpiGrid, + FsGrid< std::array, 2> & perBGrid, Project &project ); // virtual bool applySysBoundaryCondition( diff --git a/sysboundary/setbyuser.cpp b/sysboundary/setbyuser.cpp index 061338280..73f866fbe 100644 --- a/sysboundary/setbyuser.cpp +++ b/sysboundary/setbyuser.cpp @@ -144,12 +144,14 @@ namespace SBC { bool SetByUser::applyInitialState( const dccrg::Dccrg& mpiGrid, + FsGrid< std::array, 2> & perBGrid, Project &project ) { bool success = true; for (uint popID=0; popID& mpiGrid, + FsGrid< std::array, 2> & perBGrid) { + + std::array isThisCellOnAFace; + const std::array gridDims(perBGrid.getLocalSize()); + + for (int k=0; kat(fsgrids::bfield::PERBX) = templateB[iface][0]; + perBGrid.get(i,j,k)->at(fsgrids::bfield::PERBY) = templateB[iface][1]; + perBGrid.get(i,j,k)->at(fsgrids::bfield::PERBZ) = templateB[iface][2]; + } + } + } + } + } + return true; + } + + bool SetByUser::setCellsFromTemplate(const dccrg::Dccrg& mpiGrid,const uint popID) { vector cells = mpiGrid.get_cells(); #pragma omp parallel for @@ -289,13 +338,6 @@ namespace SBC { for(uint i=0; i<6; i++) { if(facesToProcess[i] && isThisCellOnAFace[i]) { - if (popID == 0) { - cell->parameters[CellParams::PERBX] = templateCells[i].parameters[CellParams::PERBX]; - cell->parameters[CellParams::PERBY] = templateCells[i].parameters[CellParams::PERBY]; - cell->parameters[CellParams::PERBZ] = templateCells[i].parameters[CellParams::PERBZ]; - - } - copyCellData(&templateCells[i], cell,true,false,popID); break; // This effectively sets the precedence of faces through the order of faces. } @@ -426,7 +468,7 @@ namespace SBC { for(uint i=0; i<6; i++) { int index; if(facesToProcess[i]) { - generateTemplateCell(templateCells[i], i, t); + generateTemplateCell(templateCells[i], templateB[i], i, t); } } return true; diff --git a/sysboundary/setbyuser.h b/sysboundary/setbyuser.h index 9a5d0881c..2bd54c84f 100644 --- a/sysboundary/setbyuser.h +++ b/sysboundary/setbyuser.h @@ -72,6 +72,7 @@ namespace SBC { FsGrid< fsgrids::technical, 2> & technicalGrid); virtual bool applyInitialState( const dccrg::Dccrg& mpiGrid, + FsGrid< std::array, 2> & perBGrid, Project &project ); virtual Real fieldSolverBoundaryCondMagneticField( @@ -141,13 +142,16 @@ namespace SBC { void interpolate(const int inputDataIndex, const uint popID, creal t, Real* outputData); bool generateTemplateCells(creal& t); - virtual void generateTemplateCell(spatial_cell::SpatialCell& templateCell, int inputDataIndex, creal& t) = 0; + virtual void generateTemplateCell(spatial_cell::SpatialCell& templateCell, Real B[3], int inputDataIndex, creal& t) = 0; bool setCellsFromTemplate(const dccrg::Dccrg& mpiGrid,const uint popID); + bool setBFromTemplate(const dccrg::Dccrg& mpiGrid, + FsGrid< std::array, 2> & perBGrid); /*! Array of bool telling which faces are going to be processed by the system boundary condition.*/ bool facesToProcess[6]; /*! Array of template spatial cells replicated over the corresponding simulation volume face. Only the template for an active face is actually being touched at all by the code. */ spatial_cell::SpatialCell templateCells[6]; + Real templateB[6][3]; /*! List of faces on which user-set boundary conditions are to be applied ([xyz][+-]). */ std::vector faceList; diff --git a/sysboundary/setmaxwellian.cpp b/sysboundary/setmaxwellian.cpp index 2b59b88bb..4f500945f 100644 --- a/sysboundary/setmaxwellian.cpp +++ b/sysboundary/setmaxwellian.cpp @@ -210,6 +210,7 @@ namespace SBC { */ void SetMaxwellian::generateTemplateCell( spatial_cell::SpatialCell& templateCell, + Real B[3], int inputDataIndex, creal& t ) { @@ -304,26 +305,12 @@ namespace SBC { templateCell.adjustSingleCellVelocityBlocks(popID); } // for-loop over particle species - templateCell.parameters[CellParams::PERBX] = Bx; - templateCell.parameters[CellParams::PERBY] = By; - templateCell.parameters[CellParams::PERBZ] = Bz; + B[0] = Bx; + B[1] = By; + B[2] = Bz; calculateCellMoments(&templateCell,true,true); - if(!this->isThisDynamic) { - // WARNING Time-independence assumed here. - templateCell.parameters[CellParams::RHOM_DT2] = templateCell.parameters[CellParams::RHOM]; - templateCell.parameters[CellParams::VX_DT2] = templateCell.parameters[CellParams::VX]; - templateCell.parameters[CellParams::VY_DT2] = templateCell.parameters[CellParams::VY]; - templateCell.parameters[CellParams::VZ_DT2] = templateCell.parameters[CellParams::VZ]; - templateCell.parameters[CellParams::RHOQ_DT2] = templateCell.parameters[CellParams::RHOQ]; - templateCell.parameters[CellParams::PERBX_DT2] = templateCell.parameters[CellParams::PERBX]; - templateCell.parameters[CellParams::PERBY_DT2] = templateCell.parameters[CellParams::PERBY]; - templateCell.parameters[CellParams::PERBZ_DT2] = templateCell.parameters[CellParams::PERBZ]; - } else { - cerr << "ERROR: this is not dynamic in time, please code it!" << endl; - abort(); - } } string SetMaxwellian::getName() const {return "SetMaxwellian";} diff --git a/sysboundary/setmaxwellian.h b/sysboundary/setmaxwellian.h index e5cd40035..12f74d86c 100644 --- a/sysboundary/setmaxwellian.h +++ b/sysboundary/setmaxwellian.h @@ -59,7 +59,7 @@ namespace SBC { virtual uint getIndex() const; protected: - void generateTemplateCell(spatial_cell::SpatialCell& templateCell, int inputDataIndex, creal& t); + void generateTemplateCell(spatial_cell::SpatialCell& templateCell, Real B[3], int inputDataIndex, creal& t); Real maxwellianDistribution(const uint popID, creal& rho, creal& T, creal& vx, creal& vy, creal& vz diff --git a/sysboundary/sysboundary.cpp b/sysboundary/sysboundary.cpp index ae6ceab9b..e512234a1 100644 --- a/sysboundary/sysboundary.cpp +++ b/sysboundary/sysboundary.cpp @@ -580,6 +580,7 @@ bool SysBoundary::classifyCells(dccrg::Dccrg& mpiGrid, + FsGrid< std::array, 2> & perBGrid, Project& project ) { bool success = true; @@ -596,7 +597,7 @@ bool SysBoundary::applyInitialState( ) { continue; } - if((*it)->applyInitialState(mpiGrid, project) == false) { + if((*it)->applyInitialState(mpiGrid, perBGrid, project) == false) { cerr << "ERROR: " << (*it)->getName() << " system boundary condition initial state not applied correctly." << endl; success = false; } diff --git a/sysboundary/sysboundary.h b/sysboundary/sysboundary.h index bc6387b54..ca94eb953 100644 --- a/sysboundary/sysboundary.h +++ b/sysboundary/sysboundary.h @@ -72,6 +72,7 @@ class SysBoundary { FsGrid< fsgrids::technical, 2> & technicalGrid); bool applyInitialState( dccrg::Dccrg& mpiGrid, + FsGrid< std::array, 2> & perBGrid, Project& project ); void applySysBoundaryVlasovConditions(dccrg::Dccrg& mpiGrid, creal& t); diff --git a/sysboundary/sysboundarycondition.h b/sysboundary/sysboundarycondition.h index 855a9a0a4..e8528af71 100644 --- a/sysboundary/sysboundarycondition.h +++ b/sysboundary/sysboundarycondition.h @@ -70,6 +70,7 @@ namespace SBC { FsGrid< fsgrids::technical, 2> & technicalGrid)=0; virtual bool applyInitialState( const dccrg::Dccrg& mpiGrid, + FsGrid< std::array, 2> & perBGrid, Project &project )=0; virtual Real fieldSolverBoundaryCondMagneticField( From 1c7b46f5a9f829e319c3e09088b1c65c48ae93a0 Mon Sep 17 00:00:00 2001 From: Maxime T J Grandin Date: Thu, 16 May 2019 11:54:18 +0300 Subject: [PATCH 439/602] Reverted changes in cfg file and added output of precipitation DRO --- .../Magnetosphere_polar_small.cfg | 43 ++++++++++++++++--- 1 file changed, 38 insertions(+), 5 deletions(-) diff --git a/testpackage/tests/Magnetosphere_polar_small/Magnetosphere_polar_small.cfg b/testpackage/tests/Magnetosphere_polar_small/Magnetosphere_polar_small.cfg index 66ccc94e6..6a772672a 100644 --- a/testpackage/tests/Magnetosphere_polar_small/Magnetosphere_polar_small.cfg +++ b/testpackage/tests/Magnetosphere_polar_small/Magnetosphere_polar_small.cfg @@ -1,3 +1,29 @@ + +Skip to content +Pull requests +Issues +Marketplace +Explore +@maximegrandin + +9 +6 + + 13 + +fmihpc/vlasiator +Code +Issues 103 +Pull requests 11 +Projects 0 +Wiki +Insights +vlasiator/testpackage/tests/Magnetosphere_polar_small/Magnetosphere_polar_small.cfg +@ursg ursg Add backstreaming outputs to Magnetosphere_Polar_small test 16e5a9f on 19 Nov 2018 +@ursg +@ykempf +@galfthan +144 lines (122 sloc) 2.25 KB project = Magnetosphere ParticlePopulations = proton dynamic_timestep = 1 @@ -12,12 +38,12 @@ charge = 1 diagnostic_write_interval = 10 write_initial_state = 0 -system_write_t_interval = 10 +system_write_t_interval = 20 system_write_file_name = bulk system_write_distribution_stride = 0 system_write_distribution_xline_stride = 10 -system_write_distribution_yline_stride = 0 -system_write_distribution_zline_stride = 10 +system_write_distribution_yline_stride = 10 +system_write_distribution_zline_stride = 1 #[bailout] #write_restart = 0 @@ -32,7 +58,7 @@ y_min = -5.0e6 y_max = 5.0e6 z_min = -250.0e6 z_max = 250.0e6 -t_max = 1000.05 +t_max = 20.05 [proton_vspace] @@ -76,7 +102,6 @@ output = BoundaryType output = MPIrank output = populations_Blocks output = fSaved -output = populations_PrecipitationDiffFlux diagnostic = populations_Blocks @@ -142,3 +167,11 @@ VZ0 = 0.0 nSpaceSamples = 1 nVelocitySamples = 1 + +[proton_precipitation] +nChannels = 16 +emin = 0.1 +emax = 100.0 + +[proton_energydensity] +solarwindspeed = -7.5e5 From f120b2788ba6541db6ce6d7592f06676d4732ee6 Mon Sep 17 00:00:00 2001 From: Grandin Maxime T J Date: Thu, 16 May 2019 11:55:53 +0300 Subject: [PATCH 440/602] Fixed bug in energy channel formula --- datareduction/datareducer.cpp | 7 ------- datareduction/datareductionoperator.cpp | 2 +- 2 files changed, 1 insertion(+), 8 deletions(-) diff --git a/datareduction/datareducer.cpp b/datareduction/datareducer.cpp index 40c9ddd0e..727890b02 100644 --- a/datareduction/datareducer.cpp +++ b/datareduction/datareducer.cpp @@ -665,13 +665,6 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti } continue; } - if(*it == "populations_PrecipitationDiffFlux") { - // Per-population precipitation directional differential number flux - for(unsigned int i =0; i < getObjectWrapper().particleSpecies.size(); i++) { - outputReducer->addOperator(new DRO::VariablePrecipitationDiffFlux(i)); - } - continue; - } if(*it == "derivs") { // Derivatives of all quantities that might be of interest outputReducer->addOperator(new DRO::DataReductionOperatorDerivatives("drhomdx",fieldsolver::drhomdx,1)); diff --git a/datareduction/datareductionoperator.cpp b/datareduction/datareductionoperator.cpp index e5e7df728..538c8b47d 100644 --- a/datareduction/datareductionoperator.cpp +++ b/datareduction/datareductionoperator.cpp @@ -1445,7 +1445,7 @@ namespace DRO { emax = getObjectWrapper().particleSpecies[popID].emax; // keV nChannels = getObjectWrapper().particleSpecies[popID].nChannels; // number of energy channels, logarithmically spaced between emin and emax for (int i=0; i Date: Thu, 16 May 2019 12:02:21 +0300 Subject: [PATCH 441/602] changed input tilt to degrees --- backgroundfield/vectordipole.cpp | 3 ++- projects/Magnetosphere/Magnetosphere.cpp | 6 +++--- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/backgroundfield/vectordipole.cpp b/backgroundfield/vectordipole.cpp index d5b25bfd5..192b48048 100644 --- a/backgroundfield/vectordipole.cpp +++ b/backgroundfield/vectordipole.cpp @@ -44,8 +44,9 @@ void VectorDipole::initialize(const double moment,const double center_x, const d // Scale dipole as a function of x-coordinate xlimit[0]=xlimit_f; // Full dipole when x < xlimit_f - xlimit[1]=xlimit_z; // Zero field when x > xlimit_z + xlimit[1]=xlimit_z; // Zero dipole when x > xlimit_z + // Going from xlimit_f to xlimit_z, scale in IMF B-field IMF[0]=IMF_Bx; IMF[1]=IMF_By; IMF[2]=IMF_Bz; diff --git a/projects/Magnetosphere/Magnetosphere.cpp b/projects/Magnetosphere/Magnetosphere.cpp index 13052bdf3..a46371f93 100644 --- a/projects/Magnetosphere/Magnetosphere.cpp +++ b/projects/Magnetosphere/Magnetosphere.cpp @@ -59,8 +59,8 @@ namespace projects { RP::add("Magnetosphere.refine_L1radius","Radius of L1-refined sphere", 1.59275e8); // 25 RE RP::add("Magnetosphere.refine_L1tailthick","Thickness of L1-refined tail region", 6.371e7); // 10 RE - RP::add("Magnetosphere.dipoleTiltPhi","Magnitude of dipole tilt in radians", 0.0); - RP::add("Magnetosphere.dipoleTiltTheta","Direction of dipole tilt from Sun-Earth-line in radians", 0.0); + RP::add("Magnetosphere.dipoleTiltPhi","Magnitude of dipole tilt, in degrees", 0.0); + RP::add("Magnetosphere.dipoleTiltTheta","Direction of dipole tilt from Sun-Earth-line, in degrees", 0.0); RP::add("Magnetosphere.dipoleXFull","X-coordinate up to which dipole is at full strength, in metres", 9.5565e7); // 15 RE RP::add("Magnetosphere.dipoleXZero","X-coordinate after which dipole is at zero strength, in metres", 1.9113e8); // 30 RE RP::add("Magnetosphere.dipoleInflowBX","Inflow magnetic field Bx component to which the vector potential dipole converges. Default is none.", 0.0); @@ -326,7 +326,7 @@ namespace projects { setBackgroundField(bgFieldDipole, BgBGrid, true); break; case 4: // Vector potential dipole, vanishes or optionally scales to static inflow value after a given x-coordinate - bgVectorDipole.initialize(8e15 *this->dipoleScalingFactor, 0.0, 0.0, 0.0, this->dipoleTiltPhi, this->dipoleTiltTheta, this->dipoleXFull, this->dipoleXZero, this->dipoleInflowB[0], this->dipoleInflowB[1], this->dipoleInflowB[2]); + bgVectorDipole.initialize(8e15 *this->dipoleScalingFactor, 0.0, 0.0, 0.0, this->dipoleTiltPhi*3.14159/180., this->dipoleTiltTheta*3.14159/180., this->dipoleXFull, this->dipoleXZero, this->dipoleInflowB[0], this->dipoleInflowB[1], this->dipoleInflowB[2]); setBackgroundField(bgVectorDipole, BgBGrid); break; default: From 4b01830fabb6f2d50db3889353992bae5076cdaa Mon Sep 17 00:00:00 2001 From: Tuomas Koskela Date: Thu, 16 May 2019 12:27:18 +0300 Subject: [PATCH 442/602] Removed background B derivatives --- common.h | 12 ++++++------ datareduction/datareducer.cpp | 6 ------ 2 files changed, 6 insertions(+), 12 deletions(-) diff --git a/common.h b/common.h index 778bc960b..017ba7ab4 100644 --- a/common.h +++ b/common.h @@ -195,12 +195,12 @@ namespace CellParams { */ namespace bvolderivatives { enum { - dBGBXVOLdy, - dBGBXVOLdz, - dBGBYVOLdx, - dBGBYVOLdz, - dBGBZVOLdx, - dBGBZVOLdy, +/* dBGBXVOLdy, */ +/* dBGBXVOLdz, */ +/* dBGBYVOLdx, */ +/* dBGBYVOLdz, */ +/* dBGBZVOLdx, */ +/* dBGBZVOLdy, */ dPERBXVOLdy, dPERBXVOLdz, dPERBYVOLdx, diff --git a/datareduction/datareducer.cpp b/datareduction/datareducer.cpp index cee612a9e..fc2805002 100644 --- a/datareduction/datareducer.cpp +++ b/datareduction/datareducer.cpp @@ -630,17 +630,11 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti if(*it == "BVOLderivs") { // Volume-averaged derivatives outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dPERBXVOLdy",bvolderivatives::dPERBXVOLdy,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dBGBXVOLdy",bvolderivatives::dBGBXVOLdy,1)); outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dPERBXVOLdz",bvolderivatives::dPERBXVOLdz,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dBGBXVOLdz",bvolderivatives::dBGBXVOLdz,1)); outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dPERBYVOLdx",bvolderivatives::dPERBYVOLdx,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dBGBYVOLdx",bvolderivatives::dBGBYVOLdx,1)); outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dPERBYVOLdz",bvolderivatives::dPERBYVOLdz,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dBGBYVOLdz",bvolderivatives::dBGBYVOLdz,1)); outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dPERBZVOLdx",bvolderivatives::dPERBZVOLdx,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dBGBZVOLdx",bvolderivatives::dBGBZVOLdx,1)); outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dPERBZVOLdy",bvolderivatives::dPERBZVOLdy,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dBGBZVOLdy",bvolderivatives::dBGBZVOLdy,1)); continue; } if(*it == "vg_GridCoordinates") { From e9108dff57aa80bf1bd5dad5024c0300add3a792 Mon Sep 17 00:00:00 2001 From: Maxime T J Grandin Date: Thu, 16 May 2019 13:41:12 +0300 Subject: [PATCH 443/602] Added output of precipitation in cfg file --- .../Magnetosphere_polar_small/Magnetosphere_polar_small.cfg | 1 + 1 file changed, 1 insertion(+) diff --git a/testpackage/tests/Magnetosphere_polar_small/Magnetosphere_polar_small.cfg b/testpackage/tests/Magnetosphere_polar_small/Magnetosphere_polar_small.cfg index 6a772672a..3f85f6b19 100644 --- a/testpackage/tests/Magnetosphere_polar_small/Magnetosphere_polar_small.cfg +++ b/testpackage/tests/Magnetosphere_polar_small/Magnetosphere_polar_small.cfg @@ -102,6 +102,7 @@ output = BoundaryType output = MPIrank output = populations_Blocks output = fSaved +output = populations_precipitationFlux diagnostic = populations_Blocks From 60d3614c3e01e5949e8f61b11c4c1913d80aa68d Mon Sep 17 00:00:00 2001 From: Maxime T J Grandin Date: Thu, 16 May 2019 13:47:01 +0300 Subject: [PATCH 444/602] Removed accidental header from cfg file --- .../Magnetosphere_polar_small.cfg | 26 ------------------- 1 file changed, 26 deletions(-) diff --git a/testpackage/tests/Magnetosphere_polar_small/Magnetosphere_polar_small.cfg b/testpackage/tests/Magnetosphere_polar_small/Magnetosphere_polar_small.cfg index 3f85f6b19..05842b588 100644 --- a/testpackage/tests/Magnetosphere_polar_small/Magnetosphere_polar_small.cfg +++ b/testpackage/tests/Magnetosphere_polar_small/Magnetosphere_polar_small.cfg @@ -1,29 +1,3 @@ - -Skip to content -Pull requests -Issues -Marketplace -Explore -@maximegrandin - -9 -6 - - 13 - -fmihpc/vlasiator -Code -Issues 103 -Pull requests 11 -Projects 0 -Wiki -Insights -vlasiator/testpackage/tests/Magnetosphere_polar_small/Magnetosphere_polar_small.cfg -@ursg ursg Add backstreaming outputs to Magnetosphere_Polar_small test 16e5a9f on 19 Nov 2018 -@ursg -@ykempf -@galfthan -144 lines (122 sloc) 2.25 KB project = Magnetosphere ParticlePopulations = proton dynamic_timestep = 1 From eda9bc6e055fb843b37643bc5830852c9c9b0434 Mon Sep 17 00:00:00 2001 From: Tuomas Koskela Date: Thu, 16 May 2019 14:00:46 +0300 Subject: [PATCH 445/602] Removed commented code --- common.h | 6 ------ ioread.cpp | 2 -- iowrite.cpp | 1 - projects/Dispersion/Dispersion.cpp | 8 +------- 4 files changed, 1 insertion(+), 16 deletions(-) diff --git a/common.h b/common.h index 017ba7ab4..0193d790c 100644 --- a/common.h +++ b/common.h @@ -195,12 +195,6 @@ namespace CellParams { */ namespace bvolderivatives { enum { -/* dBGBXVOLdy, */ -/* dBGBXVOLdz, */ -/* dBGBYVOLdx, */ -/* dBGBYVOLdz, */ -/* dBGBZVOLdx, */ -/* dBGBZVOLdy, */ dPERBXVOLdy, dPERBXVOLdz, dPERBYVOLdx, diff --git a/ioread.cpp b/ioread.cpp index d1071df33..147c46b37 100644 --- a/ioread.cpp +++ b/ioread.cpp @@ -996,8 +996,6 @@ bool exec_readGrid(dccrg::Dccrg& mpiGrid, //todo, check file datatype, and do not just use double phiprof::start("readCellParameters"); - //if(success) { success=readCellParamsVariable(file,fileCells,localCellStartOffset,localCells,"perturbed_B",CellParams::PERBX,3,mpiGrid); } -// Backround B has to be set, there are also the derivatives that should be written/read if we wanted to only read in background field if(success) { success=readCellParamsVariable(file,fileCells,localCellStartOffset,localCells,"moments",CellParams::RHOM,5,mpiGrid); } if(success) { success=readCellParamsVariable(file,fileCells,localCellStartOffset,localCells,"moments_dt2",CellParams::RHOM_DT2,5,mpiGrid); } if(success) { success=readCellParamsVariable(file,fileCells,localCellStartOffset,localCells,"moments_r",CellParams::RHOM_R,5,mpiGrid); } diff --git a/iowrite.cpp b/iowrite.cpp index 126dd05d6..f94cf744d 100644 --- a/iowrite.cpp +++ b/iowrite.cpp @@ -1270,7 +1270,6 @@ bool writeRestart(dccrg::Dccrg& mpiGrid, phiprof::start("reduceddataIO"); //write out DROs we need for restarts DataReducer restartReducer; - //restartReducer.addOperator(new DRO::DataReductionOperatorCellParams("perturbed_B",CellParams::PERBX,3)); restartReducer.addOperator(new DRO::DataReductionOperatorCellParams("moments",CellParams::RHOM,5)); restartReducer.addOperator(new DRO::DataReductionOperatorCellParams("moments_dt2",CellParams::RHOM_DT2,5)); restartReducer.addOperator(new DRO::DataReductionOperatorCellParams("moments_r",CellParams::RHOM_R,5)); diff --git a/projects/Dispersion/Dispersion.cpp b/projects/Dispersion/Dispersion.cpp index 0bd572475..3ed66066a 100644 --- a/projects/Dispersion/Dispersion.cpp +++ b/projects/Dispersion/Dispersion.cpp @@ -107,7 +107,7 @@ namespace projects { int myRank; MPI_Comm_rank(MPI_COMM_WORLD,&myRank); - cerr << "Warning, this function does virtually nothing, since B and E are not updated in DCCRG cell params since May 2019" << endl; + cerr << "Warning, this function does virtually nothing, since B and E are not updated in DCCRG cell params since PR#405" << endl; cerr << "If this is undersirable to you, please implement writing the fields out of fsgrid objects" << endl; vector localRhom(P::xcells_ini, 0.0), @@ -126,13 +126,7 @@ namespace projects { outputEz(P::xcells_ini, 0.0); for(uint i=0; iparameters[CellParams::PERBX]; -// localPerBy[Parameters::localCells[i] - 1] = mpiGrid[Parameters::localCells[i]]->parameters[CellParams::PERBY]; -// localPerBz[Parameters::localCells[i] - 1] = mpiGrid[Parameters::localCells[i]]->parameters[CellParams::PERBZ]; localRhom[Parameters::localCells[i] - 1] = mpiGrid[Parameters::localCells[i]]->parameters[CellParams::RHOM]; -// localEx[Parameters::localCells[i] - 1] = mpiGrid[Parameters::localCells[i]]->parameters[CellParams::EX]; -// localEy[Parameters::localCells[i] - 1] = mpiGrid[Parameters::localCells[i]]->parameters[CellParams::EY]; -// localEz[Parameters::localCells[i] - 1] = mpiGrid[Parameters::localCells[i]]->parameters[CellParams::EZ]; } } From 400b37dcbb79eb8688b68b8c46cf7170889c9845 Mon Sep 17 00:00:00 2001 From: Tuomas Koskela Date: Thu, 16 May 2019 14:16:50 +0300 Subject: [PATCH 446/602] Removed commented code --- sysboundary/donotcompute.cpp | 9 --------- 1 file changed, 9 deletions(-) diff --git a/sysboundary/donotcompute.cpp b/sysboundary/donotcompute.cpp index fab2630f5..e22db7ccc 100644 --- a/sysboundary/donotcompute.cpp +++ b/sysboundary/donotcompute.cpp @@ -65,15 +65,6 @@ namespace SBC { if(cell->sysBoundaryFlag != this->getIndex()) continue; //TODO: Set fields on B grid to 0 -// cell->parameters[CellParams::PERBX] = 0.0; -// cell->parameters[CellParams::PERBY] = 0.0; -// cell->parameters[CellParams::PERBZ] = 0.0; -// cell->parameters[CellParams::PERBX_DT2] = 0.0; -// cell->parameters[CellParams::PERBY_DT2] = 0.0; -// cell->parameters[CellParams::PERBZ_DT2] = 0.0; -// cell->parameters[CellParams::EX] = 0.0; -// cell->parameters[CellParams::EY] = 0.0; -// cell->parameters[CellParams::EZ] = 0.0; cell->parameters[CellParams::RHOM] = 0.0; cell->parameters[CellParams::VX] = 0.0; cell->parameters[CellParams::VY] = 0.0; From 0b7d86569e6997e01792eec512a3dab2d35e8268 Mon Sep 17 00:00:00 2001 From: Tuomas Koskela Date: Thu, 16 May 2019 14:18:20 +0300 Subject: [PATCH 447/602] Added missing break --- sysboundary/setbyuser.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/sysboundary/setbyuser.cpp b/sysboundary/setbyuser.cpp index 73f866fbe..b97bfa268 100644 --- a/sysboundary/setbyuser.cpp +++ b/sysboundary/setbyuser.cpp @@ -310,6 +310,7 @@ namespace SBC { perBGrid.get(i,j,k)->at(fsgrids::bfield::PERBX) = templateB[iface][0]; perBGrid.get(i,j,k)->at(fsgrids::bfield::PERBY) = templateB[iface][1]; perBGrid.get(i,j,k)->at(fsgrids::bfield::PERBZ) = templateB[iface][2]; + break; } } } From 5e5eeb3c774d269e2601d281a239593b6557c32e Mon Sep 17 00:00:00 2001 From: Tuomas Koskela Date: Thu, 16 May 2019 14:20:47 +0300 Subject: [PATCH 448/602] Restored setting time derivatives of rho and v --- sysboundary/setmaxwellian.cpp | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/sysboundary/setmaxwellian.cpp b/sysboundary/setmaxwellian.cpp index 4f500945f..7c924ff6d 100644 --- a/sysboundary/setmaxwellian.cpp +++ b/sysboundary/setmaxwellian.cpp @@ -311,6 +311,18 @@ namespace SBC { calculateCellMoments(&templateCell,true,true); + if(!this->isThisDynamic) { + // WARNING Time-independence assumed here. + templateCell.parameters[CellParams::RHOM_DT2] = templateCell.parameters[CellParams::RHOM]; + templateCell.parameters[CellParams::VX_DT2] = templateCell.parameters[CellParams::VX]; + templateCell.parameters[CellParams::VY_DT2] = templateCell.parameters[CellParams::VY]; + templateCell.parameters[CellParams::VZ_DT2] = templateCell.parameters[CellParams::VZ]; + templateCell.parameters[CellParams::RHOQ_DT2] = templateCell.parameters[CellParams::RHOQ]; + } else { + cerr << "ERROR: this is not dynamic in time, please code it!" << endl; + abort(); + } + } string SetMaxwellian::getName() const {return "SetMaxwellian";} From 752dfa2656759b0d2c0996028b7e50b9c0a13598 Mon Sep 17 00:00:00 2001 From: Tuomas Koskela Date: Thu, 16 May 2019 14:23:27 +0300 Subject: [PATCH 449/602] Removed dummy E and B fields from Dispersion::hook completely --- projects/Dispersion/Dispersion.cpp | 38 ++---------------------------- 1 file changed, 2 insertions(+), 36 deletions(-) diff --git a/projects/Dispersion/Dispersion.cpp b/projects/Dispersion/Dispersion.cpp index 3ed66066a..a7ac4aab6 100644 --- a/projects/Dispersion/Dispersion.cpp +++ b/projects/Dispersion/Dispersion.cpp @@ -111,55 +111,21 @@ namespace projects { cerr << "If this is undersirable to you, please implement writing the fields out of fsgrid objects" << endl; vector localRhom(P::xcells_ini, 0.0), - outputRhom(P::xcells_ini, 0.0), - localPerBx(P::xcells_ini, 0.0), - outputPerBx(P::xcells_ini, 0.0), - localPerBy(P::xcells_ini, 0.0), - outputPerBy(P::xcells_ini, 0.0), - localPerBz(P::xcells_ini, 0.0), - outputPerBz(P::xcells_ini, 0.0), - localEx(P::xcells_ini, 0.0), - outputEx(P::xcells_ini, 0.0), - localEy(P::xcells_ini, 0.0), - outputEy(P::xcells_ini, 0.0), - localEz(P::xcells_ini, 0.0), - outputEz(P::xcells_ini, 0.0); + outputRhom(P::xcells_ini, 0.0); + for(uint i=0; iparameters[CellParams::RHOM]; } } - MPI_Reduce(&(localPerBx[0]), &(outputPerBx[0]), P::xcells_ini, MPI_DOUBLE, MPI_SUM, MASTER_RANK, MPI_COMM_WORLD); - MPI_Reduce(&(localPerBy[0]), &(outputPerBy[0]), P::xcells_ini, MPI_DOUBLE, MPI_SUM, MASTER_RANK, MPI_COMM_WORLD); - MPI_Reduce(&(localPerBz[0]), &(outputPerBz[0]), P::xcells_ini, MPI_DOUBLE, MPI_SUM, MASTER_RANK, MPI_COMM_WORLD); - MPI_Reduce(&(localEx[0]), &(outputEx[0]), P::xcells_ini, MPI_DOUBLE, MPI_SUM, MASTER_RANK, MPI_COMM_WORLD); - MPI_Reduce(&(localEy[0]), &(outputEy[0]), P::xcells_ini, MPI_DOUBLE, MPI_SUM, MASTER_RANK, MPI_COMM_WORLD); - MPI_Reduce(&(localEz[0]), &(outputEz[0]), P::xcells_ini, MPI_DOUBLE, MPI_SUM, MASTER_RANK, MPI_COMM_WORLD); MPI_Reduce(&(localRhom[0]), &(outputRhom[0]), P::xcells_ini, MPI_DOUBLE, MPI_SUM, MASTER_RANK, MPI_COMM_WORLD); if(myRank == MASTER_RANK) { FILE* outputFile = fopen("perBxt.bin", "ab"); - fwrite(&(outputPerBx[0]), sizeof(outputPerBx[0]), P::xcells_ini, outputFile); - fclose(outputFile); - outputFile = fopen("perByt.bin", "ab"); - fwrite(&(outputPerBy[0]), sizeof(outputPerBy[0]), P::xcells_ini, outputFile); - fclose(outputFile); - outputFile = fopen("perBzt.bin", "ab"); - fwrite(&(outputPerBz[0]), sizeof(outputPerBz[0]), P::xcells_ini, outputFile); - fclose(outputFile); outputFile = fopen("rhomt.bin", "ab"); fwrite(&(outputRhom[0]), sizeof(outputRhom[0]), P::xcells_ini, outputFile); fclose(outputFile); - outputFile = fopen("Ext.bin", "ab"); - fwrite(&(outputEx[0]), sizeof(outputEx[0]), P::xcells_ini, outputFile); - fclose(outputFile); - outputFile = fopen("Eyt.bin", "ab"); - fwrite(&(outputEy[0]), sizeof(outputEy[0]), P::xcells_ini, outputFile); - fclose(outputFile); - outputFile = fopen("Ezt.bin", "ab"); - fwrite(&(outputEz[0]), sizeof(outputEz[0]), P::xcells_ini, outputFile); - fclose(outputFile); } } } From 08f615afcc34fc0daa2b95221f9e28a25e005f82 Mon Sep 17 00:00:00 2001 From: Grandin Maxime T J Date: Thu, 16 May 2019 15:44:09 +0300 Subject: [PATCH 450/602] Made the loss cone angle a cfg parameter --- datareduction/datareductionoperator.cpp | 18 ++++++++++++------ datareduction/datareductionoperator.h | 2 +- object_wrapper.cpp | 4 +++- particle_species.h | 1 + 4 files changed, 17 insertions(+), 8 deletions(-) diff --git a/datareduction/datareductionoperator.cpp b/datareduction/datareductionoperator.cpp index 538c8b47d..36ba0a504 100644 --- a/datareduction/datareductionoperator.cpp +++ b/datareduction/datareductionoperator.cpp @@ -1436,11 +1436,17 @@ namespace DRO { return true; } - // Precipitation directional differential number flux + /*! \brief Precipitation directional differential number flux + * Evaluation of the precipitating differential flux (per population). + * In a selected number (default: 16) of logarithmically spaced energy bins, the average of + * V*V/mass + * is calculated within the loss cone of fixed angular opening (default: 10 deg). + * The differential flux is converted in part. / cm^2 / s / sr / eV (unit used by observers). + * Parameters that can be set in cfg file: nChannels, emin [keV], emax [keV], lossConeAngle [deg] + */ VariablePrecipitationDiffFlux::VariablePrecipitationDiffFlux(cuint _popID): DataReductionOperatorHasParameters(),popID(_popID) { popName = getObjectWrapper().particleSpecies[popID].name; - cosAngle = cos(10.*M_PI/180.0); // cosine of fixed loss cone angle - + lossConeAngle = getObjectWrapper().particleSpecies[popID].lossConeAngle; // deg emin = getObjectWrapper().particleSpecies[popID].emin; // keV emax = getObjectWrapper().particleSpecies[popID].emax; // keV nChannels = getObjectWrapper().particleSpecies[popID].nChannels; // number of energy channels, logarithmically spaced between emin and emax @@ -1470,6 +1476,8 @@ namespace DRO { B[1] = cell->parameters[CellParams::PERBYVOL] + cell->parameters[CellParams::BGBYVOL]; B[2] = cell->parameters[CellParams::PERBZVOL] + cell->parameters[CellParams::BGBZVOL]; + Real cosAngle = cos(lossConeAngle*M_PI/180.0); // cosine of fixed loss cone angle + // Unit B-field direction creal normB = sqrt(B[0]*B[0] + B[1]*B[1] + B[2]*B[2]); std::array b_unit; @@ -1525,9 +1533,7 @@ namespace DRO { thread_count[binNumber] += countAndGate * DV3; } } - for (int i=0; i channels, dataDiffFlux; }; } // namespace DRO diff --git a/object_wrapper.cpp b/object_wrapper.cpp index 635060bed..661a81e46 100644 --- a/object_wrapper.cpp +++ b/object_wrapper.cpp @@ -75,6 +75,7 @@ bool ObjectWrapper::addPopulationParameters() { Readparameters::add(pop + "_precipitation.nChannels", "Number of energy channels for precipitation differential flux evaluation", 16); Readparameters::add(pop + "_precipitation.emin", "Lowest energy channel (in keV) for precipitation differential flux evaluation", 0.1); Readparameters::add(pop + "_precipitation.emax", "Highest energy channel (in keV) for precipitation differential flux evaluation", 100.0); + Readparameters::add(pop + "_precipitation.lossConeAngle", "Fixed loss cone opening angle (in deg) for precipitation differential flux evaluation", 10.0); // Energy density parameters Readparameters::add(pop + "_energydensity.limit1", "Lower limit of second bin for energy density, given in units of solar wind ram energy.", 5.0); @@ -177,10 +178,11 @@ bool ObjectWrapper::getParameters() { species.SolarWindEnergy = 0.5 * species.mass * species.SolarWindSpeed * species.SolarWindSpeed; } - // Precipitation parameters + // Get precipitation parameters Readparameters::get(pop + "_precipitation.nChannels", species.nChannels); Readparameters::get(pop + "_precipitation.emin", species.emin); Readparameters::get(pop + "_precipitation.emax", species.emax); + Readparameters::get(pop + "_precipitation.lossConeAngle", species.lossConeAngle); } return true; diff --git a/particle_species.h b/particle_species.h index ac6a726b4..d1e9fe635 100644 --- a/particle_species.h +++ b/particle_species.h @@ -68,6 +68,7 @@ namespace species { int nChannels; /*!< Number of energy channels for precipitation differential flux evaluation. Default 16. */ Real emin; /*!< Lowest energy channel (in keV) for precipitation differential flux evaluation. Default 0.1. */ Real emax; /*!< Highest energy channel (in keV) for precipitation differential flux evaluation. Default 100. */ + Real lossConeAngle; /*!< Fixed loss cone opening angle (in deg) for precipitation differential flux evaluation. Default 10. */ Species(); Species(const Species& other); From aac4ad3291e9a6ffef2ddd5af05f6785fadde850 Mon Sep 17 00:00:00 2001 From: Tuomas Koskela Date: Thu, 16 May 2019 15:46:36 +0300 Subject: [PATCH 451/602] Added TODO for E_Hall --- datareduction/datareducer.cpp | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/datareduction/datareducer.cpp b/datareduction/datareducer.cpp index fc2805002..a36e0cbea 100644 --- a/datareduction/datareducer.cpp +++ b/datareduction/datareducer.cpp @@ -531,6 +531,10 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti )); continue; } + if(*it == "fg_HallE") { + // TODO: Add outputreducer to get EHALL from fsgrid + continue; + } if(*it =="GradPeE") { // Electron pressure gradient contribution to the generalized ohm's law outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("EGRADPE",CellParams::EXGRADPE,3)); From e35e5394e4c77eb5c293079838413d11eca02ae4 Mon Sep 17 00:00:00 2001 From: Grandin Maxime T J Date: Thu, 16 May 2019 15:51:44 +0300 Subject: [PATCH 452/602] Moved flux unit conversion outside of parallel region + finalised documentation --- datareduction/datareductionoperator.cpp | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/datareduction/datareductionoperator.cpp b/datareduction/datareductionoperator.cpp index 36ba0a504..5d31254a4 100644 --- a/datareduction/datareductionoperator.cpp +++ b/datareduction/datareductionoperator.cpp @@ -1442,13 +1442,14 @@ namespace DRO { * V*V/mass * is calculated within the loss cone of fixed angular opening (default: 10 deg). * The differential flux is converted in part. / cm^2 / s / sr / eV (unit used by observers). - * Parameters that can be set in cfg file: nChannels, emin [keV], emax [keV], lossConeAngle [deg] + * Parameters that can be set in cfg file under [{species}_precipitation]: nChannels, emin [keV], emax [keV], lossConeAngle [deg] + * The energy channels are saved in bulk files as PrecipitationCentreEnergy{channel_number}. */ VariablePrecipitationDiffFlux::VariablePrecipitationDiffFlux(cuint _popID): DataReductionOperatorHasParameters(),popID(_popID) { popName = getObjectWrapper().particleSpecies[popID].name; lossConeAngle = getObjectWrapper().particleSpecies[popID].lossConeAngle; // deg emin = getObjectWrapper().particleSpecies[popID].emin; // keV - emax = getObjectWrapper().particleSpecies[popID].emax; // keV + emax = getObjectWrapper().particleSpecies[popID].emax; // keV nChannels = getObjectWrapper().particleSpecies[popID].nChannels; // number of energy channels, logarithmically spaced between emin and emax for (int i=0; i Date: Thu, 16 May 2019 16:04:20 +0300 Subject: [PATCH 453/602] Added tester functions for IMF scaling. Closing this branch as result has non-zero curl of B, making Hall calculations messy. --- .../vectorpotentialdipole_streamlines2.py | 193 ++++++++ .../vectorpotentialdipole_verify2.py | 430 ++++++++++++++++++ 2 files changed, 623 insertions(+) create mode 100644 doc/vectordipole/vectorpotentialdipole_streamlines2.py create mode 100644 doc/vectordipole/vectorpotentialdipole_verify2.py diff --git a/doc/vectordipole/vectorpotentialdipole_streamlines2.py b/doc/vectordipole/vectorpotentialdipole_streamlines2.py new file mode 100644 index 000000000..4be1b5552 --- /dev/null +++ b/doc/vectordipole/vectorpotentialdipole_streamlines2.py @@ -0,0 +1,193 @@ +#!/usr/bin/env python import matplotlib.pyplot as plt + +# /* +# * This file is part of Vlasiator. +# * Copyright 2010-2016 Finnish Meteorological Institute +# * Copyright 2017-2019 University of Helsinki +# * +# * For details of usage, see the COPYING file and read the "Rules of the Road" +# * at http://www.physics.helsinki.fi/vlasiator/ +# * +# * This program is free software; you can redistribute it and/or modify +# * it under the terms of the GNU General Public License as published by +# * the Free Software Foundation; either version 2 of the License, or +# * (at your option) any later version. +# * +# * This program is distributed in the hope that it will be useful, +# * but WITHOUT ANY WARRANTY; without even the implied warranty of +# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# * GNU General Public License for more details. +# * +# * You should have received a copy of the GNU General Public License along +# * with this program; if not, write to the Free Software Foundation, Inc., +# * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# */ +import numpy as np +import math +import sys,os +import pytools as pt +import matplotlib.pyplot as plt +import matplotlib as mpl +import scipy +import fieldmodels + +''' Testing routine for different dipole formulations + + Plots streamlines of magnetic field in the meridional x-z-plane for four different models + +''' + +if len(sys.argv)!=1: + testset = int(sys.argv[1]) +else: + testset = 0 + + +plt.switch_backend('Agg') +print(mpl.__version__) +outfilename = "./vecpotdip_verify_streamlines2_"+str(testset)+".png" + +RE=6371000. +epsilon=1.e-15 + +if testset==0: + tilt_angle_phi = 0. + tilt_angle_theta = 0. + BGB=[0,0,0] +elif testset==1: + tilt_angle_phi = 10. + tilt_angle_theta = 0. + BGB=[0,0,0] +elif testset==2: + tilt_angle_phi = 0. + tilt_angle_theta = 0. + BGB=[0,0,-5.e-9] +elif testset==3: + tilt_angle_phi = 10. + tilt_angle_theta = 45. + BGB=[0,0,0] +elif testset==3: + tilt_angle_phi = 0. + tilt_angle_theta = 0. + BGB=[2.236e-9,0,-2.236e-9] +elif testset==4: + tilt_angle_phi = 10. + tilt_angle_theta = 40. + BGB=[2.236e-9,0,-2.236e-9] +else: # Same as 0 + print("Default") + tilt_angle_phi = 0. + tilt_angle_theta = 0. + BGB=[0,0,0] + +fontsize=20 + +#fieldmodels.dipole.set_dipole(centerx, centery, centerz, tilt_phi, tilt_theta, mult=1.0, radius_f=None, radius_z=None): +dip = fieldmodels.dipole(0,0,0,tilt_angle_phi,tilt_angle_theta) +mdip = fieldmodels.dipole(80*RE,0,0,tilt_angle_phi,180.-tilt_angle_theta) + +# IMF scaling to inflow boundary +imfpot = fieldmodels.IMFpotential(radius_z=10, radius_f=40, IMF=BGB) + +# Create figure +fig = plt.figure() +fig.set_size_inches(20,20) + + +gs = mpl.gridspec.GridSpec(2, 2, wspace=0.25, hspace=0.25) +fig.add_subplot(gs[0, 0]) +fig.add_subplot(gs[0, 1]) +fig.add_subplot(gs[1, 0]) +fig.add_subplot(gs[1, 1]) +axes = fig.get_axes() + +fig.suptitle(r"Streamlines of meridional plane magnetic field with dipole tilt $\Phi="+str(int(tilt_angle_phi))+"$, $\Theta="+str(int(tilt_angle_theta))+"$ with IMF=("+str(BGB[0])+","+str(BGB[1])+","+str(BGB[2])+")", fontsize=fontsize) + +nx = 200 +nz = 200 +xmin, xmax = (-59,41) +zmin, zmax = (-50,50) + +x = np.linspace(xmin,xmax,num=nx) +z = np.linspace(zmin,zmax,num=nz) +BX = np.zeros([nx,nz]) +BZ = np.zeros([nx,nz]) + +[Xmesh,Zmesh] = scipy.meshgrid(x,z) + +# ax = axes[0] +# print("0") +# for i in range(len(x)): +# for j in range(len(z)): +# BX[j,i] = dip.get(x[i]*RE,0,z[j]*RE,0,0,0) +BGB[0] +# BZ[j,i] = dip.get(x[i]*RE,0,z[j]*RE,0,2,0) +BGB[2] +# ax.streamplot(Xmesh,Zmesh,BX,BZ,linewidth=1, density=5, color='k') +# ax.text(0.2,0.08,"Vector potential",transform=ax.transAxes, bbox=dict(facecolor='white', alpha=0.7), fontsize=fontsize) + +ax = axes[0] +print("0") +for i in range(len(x)): + for j in range(len(z)): + BX[j,i] = dip.get_old(x[i]*RE,0,z[j]*RE,0,0,0) +BGB[0] + BZ[j,i] = dip.get_old(x[i]*RE,0,z[j]*RE,0,2,0) +BGB[2] +ax.streamplot(Xmesh,Zmesh,BX,BZ,linewidth=1, density=5, color='k') +ax.text(0.2,0.08,"Regular dipole",transform=ax.transAxes, bbox=dict(facecolor='white', alpha=0.7), fontsize=fontsize) + + +ax = axes[1] +print("1") +for i in range(len(x)): + for j in range(len(z)): + BX[j,i] = dip.getX(x[i]*RE,0,z[j]*RE,0,0,0) +BGB[0] + BZ[j,i] = dip.getX(x[i]*RE,0,z[j]*RE,0,2,0) +BGB[2] +ax.streamplot(Xmesh,Zmesh,BX,BZ,linewidth=1, density=5, color='k') +ax.text(0.2,0.08,"Vector potential (X)",transform=ax.transAxes, bbox=dict(facecolor='white', alpha=0.7), fontsize=fontsize) + +# ax = axes[1] +# print("1") +# for i in range(len(x)): +# for j in range(len(z)): +# BX[j,i] = dip.getX(x[i]*RE,0,z[j]*RE,0,0,0) +# BZ[j,i] = dip.getX(x[i]*RE,0,z[j]*RE,0,2,0) +# BX[j,i] += mdip.getX(x[i]*RE,0,z[j]*RE,0,0,0) +BGB[2] +# BZ[j,i] += mdip.getX(x[i]*RE,0,z[j]*RE,0,2,0) +BGB[2] +# ax.streamplot(Xmesh,Zmesh,BX,BZ,linewidth=1, density=5, color='k') +# ax.text(0.2,0.08,"Vector potential + mirror (X)",transform=ax.transAxes, bbox=dict(facecolor='white', alpha=0.7), fontsize=fontsize) + +ax = axes[2] +print("2") +for i in range(len(x)): + for j in range(len(z)): + BX[j,i] = dip.get_old(x[i]*RE,0,z[j]*RE,0,0,0) + BZ[j,i] = dip.get_old(x[i]*RE,0,z[j]*RE,0,2,0) + BX[j,i] += mdip.get_old(x[i]*RE,0,z[j]*RE,0,0,0) +BGB[0] + BZ[j,i] += mdip.get_old(x[i]*RE,0,z[j]*RE,0,2,0) +BGB[2] +ax.streamplot(Xmesh,Zmesh,BX,BZ,linewidth=1, density=5, color='k') +ax.text(0.2,0.08,"Regular dipole + mirror",transform=ax.transAxes, bbox=dict(facecolor='white', alpha=0.7), fontsize=fontsize) + +# if tilt_angle_phi 1e-4: + ylims[1] = 1e-4 + ax.set_ylim(ylims) + +handles, labels = axes[-1].get_legend_handles_labels() +axes[-1].legend(handles, labels, fontsize=fontsize) + +fig.savefig(outfilename) +plt.close() + + + +if calcderivatives: + # Derivatives + step2=0.00001 # distance in each direction for calculating numerical derivative + for kkk in range(3): + print("derivatives d"+coords[kkk]) + dB1 = np.zeros([nr,3,3]) + dB2 = np.zeros([nr,3,3]) + dB3 = np.zeros([nr,3,3]) + dB4 = np.zeros([nr,3,3]) + + # Create figure + fig = plt.figure() + fig.set_size_inches(20,30) + for i in range(nsubplots): + fig.add_subplot(nsubplots,1,i+1) + axes = fig.get_axes() + + fig.suptitle(r"Numerical and analytical derivative ratios, profiles starting from ("+str(line_start[0])+","+str(line_start[1])+","+str(line_start[2])+") [RE] with dipole tilt $\Phi="+str(int(tilt_angle_phi))+"$, $\Theta="+str(int(tilt_angle_theta))+"$", fontsize=fontsize) + + for i in range(nsubplots): + print("derivatives subplot ",i) + ax = axes[i] + + xv = line_start[0]*RE + radii*np.sin(line_phi[i])*np.cos(line_theta[i]) + yv = line_start[1]*RE + radii*np.sin(line_phi[i])*np.sin(line_theta[i]) + zv = line_start[2]*RE + radii*np.cos(line_phi[i]) + + for j in range(nr): + for k in range(3): + B1[j,k] = dip.getX(xv[j],yv[j],zv[j],0,k,0) + imfpot.get(xv[j],yv[j],zv[j],0,k,0) + B2[j,k] = dip.get_old(xv[j],yv[j],zv[j],0,k,0) + B3[j,k] = B2[j,k] + mdip.get_old(xv[j],yv[j],zv[j],0,k,0) + B4[j,k] = dip.get_ldp(xv[j],yv[j],zv[j],0,k,0) + B4[j,k] = B4[j,k] + mdip.get_ldp(xv[j],yv[j],zv[j],0,k,0) + #for kk in range(3): + kk=kkk + dB1[j,k,kk] = dip.getX(xv[j],yv[j],zv[j],1,k,kk) + imfpot.get(xv[j],yv[j],zv[j],1,k,kk) + dB2[j,k,kk] = dip.get_old(xv[j],yv[j],zv[j],1,k,kk) + dB3[j,k,kk] = dB2[j,k,kk] + mdip.get_old(xv[j],yv[j],zv[j],1,k,kk) + dB4[j,k,kk] = dip.get_ldp(xv[j],yv[j],zv[j],1,k,kk) + dB4[j,k,kk] = dB4[j,k,kk] + mdip.get_ldp(xv[j],yv[j],zv[j],1,k,kk) + + # analytical derivative vs numerical derivative + for j in np.arange(1,nr-1): + for k in range(3): + + # d/dx + if kkk==0: + #cdbx=(dip.getX(xv[j]+step2*RE,yv[j],zv[j],0,k,0) - dip.getX(xv[j]-step2*RE,yv[j],zv[j],0,k,0))/(2*step2*RE) + cdbx=(dip.getX(xv[j]+step2*RE,yv[j],zv[j],0,k,0) - dip.getX(xv[j]-step2*RE,yv[j],zv[j],0,k,0) + imfpot.get(xv[j]+step2*RE,yv[j],zv[j],0,k,0) - imfpot.get(xv[j]-step2*RE,yv[j],zv[j],0,k,0))/(2*step2*RE) + if abs(cdbx) > epsilon*B1[j,k]: + dB1[j,k,0] = dB1[j,k,0]/cdbx + elif (abs(cdbx) epsilon*B2[j,k]: + dB2[j,k,0] = dB2[j,k,0]/cdbx + elif (abs(cdbx) epsilon*B3[j,k]: + dB3[j,k,0] = dB3[j,k,0]/cdbx + elif (abs(cdbx) epsilon*B4[j,k]: + dB4[j,k,0] = dB4[j,k,0]/cdbx + elif (abs(cdbx) epsilon*B1[j,k]: + dB1[j,k,1] = dB1[j,k,1]/cdby + elif (abs(cdby) epsilon*B2[j,k]: + dB2[j,k,1] = dB2[j,k,1]/cdby + elif (abs(cdby) epsilon*B3[j,k]: + dB3[j,k,1] = dB3[j,k,1]/cdby + elif (abs(cdby) epsilon*B4[j,k]: + dB4[j,k,1] = dB4[j,k,1]/cdby + elif (abs(cdby) epsilon*B1[j,k]: + dB1[j,k,2] = dB1[j,k,2]/cdbz + elif (abs(cdbz) epsilon*B2[j,k]: + dB2[j,k,2] = dB2[j,k,2]/cdbz + elif (abs(cdbz) epsilon*B3[j,k]: + dB3[j,k,2] = dB3[j,k,2]/cdbz + elif (abs(cdbz) epsilon*B4[j,k]: + dB4[j,k,2] = dB4[j,k,2]/cdbz + elif (abs(cdbz) Date: Thu, 16 May 2019 16:16:37 +0300 Subject: [PATCH 454/602] EHALL fsgrid reducer For sake of easyness, this has less descriptive names for the Hall components (being simply EHALL0 to EHALL10), but since we've only ever used them for debugging purposes anyway, I suppes this will be quite good enough. --- datareduction/datareducer.cpp | 30 +++++++++++++++++++++++++++++- 1 file changed, 29 insertions(+), 1 deletion(-) diff --git a/datareduction/datareducer.cpp b/datareduction/datareducer.cpp index d5828c337..e06742b6e 100644 --- a/datareduction/datareducer.cpp +++ b/datareduction/datareducer.cpp @@ -539,7 +539,35 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti continue; } if(*it == "fg_HallE") { - // TODO: Add outputreducer to get EHALL from fsgrid + for(int index=0; index<11; index++) { + std::string reducer_name = "EHALL" + std::to_string(index); + outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid(reducer_name,[index]( + FsGrid< std::array, 2>& perBGrid, + FsGrid< std::array, 2>& EGrid, + FsGrid< std::array, 2>& EHallGrid, + FsGrid< std::array, 2>& EGradPeGrid, + FsGrid< std::array, 2>& momentsGrid, + FsGrid< std::array, 2>& dPerBGrid, + FsGrid< std::array, 2>& dMomentsGrid, + FsGrid< std::array, 2>& BgBGrid, + FsGrid< std::array, 2>& volGrid, + FsGrid< fsgrids::technical, 2>& technicalGrid)->std::vector { + + std::array& gridSize = technicalGrid.getLocalSize(); + std::vector retval(gridSize[0]*gridSize[1]*gridSize[2]); + + // Iterate through fsgrid cells and extract EHall + for(int z=0; z Date: Thu, 16 May 2019 16:27:33 +0300 Subject: [PATCH 455/602] Added writing of lossConeAngle parameter to bulk files --- datareduction/datareductionoperator.cpp | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/datareduction/datareductionoperator.cpp b/datareduction/datareductionoperator.cpp index 5d31254a4..f4370da4b 100644 --- a/datareduction/datareductionoperator.cpp +++ b/datareduction/datareductionoperator.cpp @@ -1567,12 +1567,18 @@ namespace DRO { for (int i=0; i E1limit) thread_E1_sum += block_data[n * SIZE_VELBLOCK+cellIndex(i,j,k)] * ENERGY * DV3; if (ENERGY > E2limit) thread_E2_sum += block_data[n * SIZE_VELBLOCK+cellIndex(i,j,k)] * ENERGY * DV3; } + } + // Accumulate contributions coming from this velocity block to the + // spatial cell velocity moments. If multithreading / OpenMP is used, + // these updates need to be atomic: + # pragma omp critical + { EDensity[0] += thread_E0_sum; EDensity[1] += thread_E1_sum; EDensity[2] += thread_E2_sum; From 42640bd527d638e844ede7e3deb3934090b09982 Mon Sep 17 00:00:00 2001 From: ykempf Date: Thu, 16 May 2019 16:27:55 +0300 Subject: [PATCH 456/602] Correct BVOL derivatives on DCCRG for Vlasov solver. --- fieldsolver/gridGlue.cpp | 14 +++++++------- vlasovsolver/cpu_acc_transform.cpp | 12 ++++++------ vlasovsolver_amr/cpu_acc_transform.hpp | 12 ++++++------ 3 files changed, 19 insertions(+), 19 deletions(-) diff --git a/fieldsolver/gridGlue.cpp b/fieldsolver/gridGlue.cpp index 9e06f4f53..6948cada6 100644 --- a/fieldsolver/gridGlue.cpp +++ b/fieldsolver/gridGlue.cpp @@ -276,13 +276,13 @@ void getFieldsFromFsGrid( sendBuffer[ii].sums[3 ] += volcell->at(fsgrids::volfields::EXVOL); sendBuffer[ii].sums[4 ] += volcell->at(fsgrids::volfields::EYVOL); sendBuffer[ii].sums[5 ] += volcell->at(fsgrids::volfields::EZVOL); - sendBuffer[ii].sums[6 ] += volcell->at(fsgrids::volfields::dPERBXVOLdy); - sendBuffer[ii].sums[7 ] += volcell->at(fsgrids::volfields::dPERBXVOLdz); - sendBuffer[ii].sums[8 ] += volcell->at(fsgrids::volfields::dPERBYVOLdx); - sendBuffer[ii].sums[9 ] += volcell->at(fsgrids::volfields::dPERBYVOLdz); - sendBuffer[ii].sums[10] += volcell->at(fsgrids::volfields::dPERBZVOLdx); - sendBuffer[ii].sums[11] += volcell->at(fsgrids::volfields::dPERBZVOLdy); - sendBuffer[ii].sums[12] += bgcell->at(fsgrids::bgbfield::BGBXVOL); + sendBuffer[ii].sums[6 ] += volcell->at(fsgrids::volfields::dPERBXVOLdy) / technicalGrid.DY; + sendBuffer[ii].sums[7 ] += volcell->at(fsgrids::volfields::dPERBXVOLdz) / technicalGrid.DZ; + sendBuffer[ii].sums[8 ] += volcell->at(fsgrids::volfields::dPERBYVOLdx) / technicalGrid.DX; + sendBuffer[ii].sums[9 ] += volcell->at(fsgrids::volfields::dPERBYVOLdz) / technicalGrid.DZ; + sendBuffer[ii].sums[10] += volcell->at(fsgrids::volfields::dPERBZVOLdx) / technicalGrid.DX; + sendBuffer[ii].sums[11] += volcell->at(fsgrids::volfields::dPERBZVOLdy) / technicalGrid.DY; + sendBuffer[ii].sums[12] += bgcell->at(fsgrids::bgbfield::BGBXVOL); sendBuffer[ii].sums[13] += bgcell->at(fsgrids::bgbfield::BGBYVOL); sendBuffer[ii].sums[14] += bgcell->at(fsgrids::bgbfield::BGBZVOL); sendBuffer[ii].sums[15] += egradpecell->at(fsgrids::egradpe::EXGRADPE); diff --git a/vlasovsolver/cpu_acc_transform.cpp b/vlasovsolver/cpu_acc_transform.cpp index 5f07a9b2b..32570f447 100644 --- a/vlasovsolver/cpu_acc_transform.cpp +++ b/vlasovsolver/cpu_acc_transform.cpp @@ -77,13 +77,13 @@ Eigen::Transform compute_acceleration_transformation( //const Real perBz = spatial_cell->parameters[CellParams::PERBZVOL]; // read in derivatives need for curl of B (only perturbed, curl of background field is always 0!) - const Real dBXdy = spatial_cell->derivativesBVOL[bvolderivatives::dPERBXVOLdy]/spatial_cell->parameters[CellParams::DY]; - const Real dBXdz = spatial_cell->derivativesBVOL[bvolderivatives::dPERBXVOLdz]/spatial_cell->parameters[CellParams::DZ]; - const Real dBYdx = spatial_cell->derivativesBVOL[bvolderivatives::dPERBYVOLdx]/spatial_cell->parameters[CellParams::DX]; + const Real dBXdy = spatial_cell->derivativesBVOL[bvolderivatives::dPERBXVOLdy]; + const Real dBXdz = spatial_cell->derivativesBVOL[bvolderivatives::dPERBXVOLdz]; + const Real dBYdx = spatial_cell->derivativesBVOL[bvolderivatives::dPERBYVOLdx]; - const Real dBYdz = spatial_cell->derivativesBVOL[bvolderivatives::dPERBYVOLdz]/spatial_cell->parameters[CellParams::DZ]; - const Real dBZdx = spatial_cell->derivativesBVOL[bvolderivatives::dPERBZVOLdx]/spatial_cell->parameters[CellParams::DX]; - const Real dBZdy = spatial_cell->derivativesBVOL[bvolderivatives::dPERBZVOLdy]/spatial_cell->parameters[CellParams::DY]; + const Real dBYdz = spatial_cell->derivativesBVOL[bvolderivatives::dPERBYVOLdz]; + const Real dBZdx = spatial_cell->derivativesBVOL[bvolderivatives::dPERBZVOLdx]; + const Real dBZdy = spatial_cell->derivativesBVOL[bvolderivatives::dPERBZVOLdy]; const Eigen::Matrix B(Bx,By,Bz); Eigen::Matrix unit_B(B.normalized()); diff --git a/vlasovsolver_amr/cpu_acc_transform.hpp b/vlasovsolver_amr/cpu_acc_transform.hpp index 487e07556..876b01009 100644 --- a/vlasovsolver_amr/cpu_acc_transform.hpp +++ b/vlasovsolver_amr/cpu_acc_transform.hpp @@ -44,13 +44,13 @@ Transform compute_acceleration_transformation( SpatialCell* spati const Real perBy = spatial_cell->parameters[CellParams::PERBYVOL]; const Real perBz = spatial_cell->parameters[CellParams::PERBZVOL]; //read in derivatives need for curl of B (only pertrubed, curl of background field is always 0!) - const Real dBXdy = spatial_cell->derivativesBVOL[bvolderivatives::dPERBXVOLdy]/spatial_cell->parameters[CellParams::DY]; - const Real dBXdz = spatial_cell->derivativesBVOL[bvolderivatives::dPERBXVOLdz]/spatial_cell->parameters[CellParams::DZ]; - const Real dBYdx = spatial_cell->derivativesBVOL[bvolderivatives::dPERBYVOLdx]/spatial_cell->parameters[CellParams::DX]; + const Real dBXdy = spatial_cell->derivativesBVOL[bvolderivatives::dPERBXVOLdy]; + const Real dBXdz = spatial_cell->derivativesBVOL[bvolderivatives::dPERBXVOLdz]; + const Real dBYdx = spatial_cell->derivativesBVOL[bvolderivatives::dPERBYVOLdx]; - const Real dBYdz = spatial_cell->derivativesBVOL[bvolderivatives::dPERBYVOLdz]/spatial_cell->parameters[CellParams::DZ]; - const Real dBZdx = spatial_cell->derivativesBVOL[bvolderivatives::dPERBZVOLdx]/spatial_cell->parameters[CellParams::DX]; - const Real dBZdy = spatial_cell->derivativesBVOL[bvolderivatives::dPERBZVOLdy]/spatial_cell->parameters[CellParams::DY]; + const Real dBYdz = spatial_cell->derivativesBVOL[bvolderivatives::dPERBYVOLdz]; + const Real dBZdx = spatial_cell->derivativesBVOL[bvolderivatives::dPERBZVOLdx]; + const Real dBZdy = spatial_cell->derivativesBVOL[bvolderivatives::dPERBZVOLdy]; const Eigen::Matrix B(Bx,By,Bz); From 7206e52255a4d88b21bfb4a3cb6f7659a966f8f8 Mon Sep 17 00:00:00 2001 From: Grandin Maxime T J Date: Thu, 16 May 2019 16:35:56 +0300 Subject: [PATCH 457/602] Expanded documentation of energy density DRO --- datareduction/datareductionoperator.cpp | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/datareduction/datareductionoperator.cpp b/datareduction/datareductionoperator.cpp index f4370da4b..7e42ea0f2 100644 --- a/datareduction/datareductionoperator.cpp +++ b/datareduction/datareductionoperator.cpp @@ -1576,8 +1576,12 @@ namespace DRO { /*! \brief Energy density * Calculates the energy density of particles in three bins: total energy density, above E1limit*solar wind energy, and above E2limit*solar wind energy * Energy densities are given in eV/cm^3. - * Parameters that can be set in cfg file under [{species}_energydensity]: solarwindspeed [m/s], solarwindenergy [eV], limit1 [scalar], limit2 [scalar]. - * The energy thresholds are saved in bulk files as EnergyDensityESW, EnergyDensityELimit1, EnergyDensityELimit. + * Parameters that can be set in cfg file under [{species}_energydensity]: + * - solarwindspeed [m/s], + * - solarwindenergy [eV], + * - limit1 [scalar, default: 5.], + * - limit2 [scalar, default: 10.]. + * The energy thresholds are saved in bulk files (in eV) as parameters: EnergyDensityESW, EnergyDensityELimit1, EnergyDensityELimit. */ VariableEnergyDensity::VariableEnergyDensity(cuint _popID): DataReductionOperatorHasParameters(),popID(_popID) { popName = getObjectWrapper().particleSpecies[popID].name; From d8e09aa7c35fffdd7233ccb9e28f91fc6af70225 Mon Sep 17 00:00:00 2001 From: Urs Ganse Date: Thu, 16 May 2019 16:37:12 +0300 Subject: [PATCH 458/602] Rename EHALL to fg_HallE in output, remove hardcoded number. --- datareduction/datareducer.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/datareduction/datareducer.cpp b/datareduction/datareducer.cpp index e06742b6e..858640129 100644 --- a/datareduction/datareducer.cpp +++ b/datareduction/datareducer.cpp @@ -539,8 +539,8 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti continue; } if(*it == "fg_HallE") { - for(int index=0; index<11; index++) { - std::string reducer_name = "EHALL" + std::to_string(index); + for(int index=0; indexaddOperator(new DRO::DataReductionOperatorFsGrid(reducer_name,[index]( FsGrid< std::array, 2>& perBGrid, FsGrid< std::array, 2>& EGrid, From 7aa6e2265e1581c94cf9115947c42f57e1dc22ba Mon Sep 17 00:00:00 2001 From: Urs Ganse Date: Thu, 16 May 2019 16:53:15 +0300 Subject: [PATCH 459/602] Allow alternate names HallE or fg_HallE. --- datareduction/datareducer.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/datareduction/datareducer.cpp b/datareduction/datareducer.cpp index 858640129..f4b839121 100644 --- a/datareduction/datareducer.cpp +++ b/datareduction/datareducer.cpp @@ -538,7 +538,7 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti )); continue; } - if(*it == "fg_HallE") { + if(*it == "HallE" || *it == "fg_HallE") { for(int index=0; indexaddOperator(new DRO::DataReductionOperatorFsGrid(reducer_name,[index]( From dd65a9eed2e3409dbd284ac11d1d455d89c25c02 Mon Sep 17 00:00:00 2001 From: Grandin Maxime T J Date: Thu, 16 May 2019 17:09:56 +0300 Subject: [PATCH 460/602] Updated parameters.cpp to avoid conflict when merging --- parameters.cpp | 28 +++++++++++++++++++++++++++- 1 file changed, 27 insertions(+), 1 deletion(-) diff --git a/parameters.cpp b/parameters.cpp index 9012a989b..ddaea42ce 100644 --- a/parameters.cpp +++ b/parameters.cpp @@ -217,9 +217,35 @@ bool Parameters::addParameters(){ // Output variable parameters // NOTE Do not remove the : before the list of variable names as this is parsed by tools/check_vlasiator_cfg.sh - Readparameters::addComposing("variables.output", "List of data reduction operators (DROs) to add to the grid file output. Each variable to be added has to be on a new line output = XXX. Available (20190514): B fg_B BackgroundB vg_BackgroundB fg_BackgroundB PerturbedB vg_PerturbedB fg_PerturbedB E fg_E Rhom vg_Rhom fg_Rhom Rhoq vg_Rhoq fg_Rhoq populations_Rho V vg_V fg_V populations_V populations_moments_Backstream populations_moments_NonBackstream populations_EffectiveSparsityThreshold populations_RhoLossAdjust populations_EnergyDensity populations_PrecipitationFlux LBweight MaxVdt MaxRdt populations_MaxVdt populations_MaxRdt MaxFieldsdt MPIrank vg_rank FsGridRank fg_rank FsGridBoundaryType BoundaryType vg_BoundaryType fg_BoundaryType BoundaryLayer vg_BoundaryLayer fg_BoundaryLayer populations_Blocks fSaved populations_accSubcycles VolE vg_VolE fg_VolE HallE GradPeE VolB vg_VolB fg_VolB BackgroundVolB PerturbedVolB Pressure vg_Pressure fg_Pressure populations_PTensor derivs BVOLderivs GridCoordinates Potential BackgroundVolE ChargeDensity PotentialError MeshData"); + Readparameters::addComposing("variables.output", std::string()+"List of data reduction operators (DROs) to add to the grid file output. Each variable to be added has to be on a new line output = XXX. "+ + "Available (20190514): "+ + "B fg_B BackgroundB vg_BackgroundB fg_BackgroundB PerturbedB vg_PerturbedB fg_PerturbedB "+ + "E fg_E "+ + "Rhom vg_Rhom fg_Rhom Rhoq vg_Rhoq fg_Rhoq populations_Rho "+ + "V vg_V fg_V populations_V "+ + "populations_moments_Backstream populations_moments_NonBackstream "+ + "populations_EffectiveSparsityThreshold populations_RhoLossAdjust "+ + "populations_EnergyDensity populations_PrecipitationFlux "+ + "LBweight MaxVdt MaxRdt populations_MaxVdt populations_MaxRdt MaxFieldsdt "+ + "MPIrank vg_rank FsGridRank fg_rank "+ + "FsGridBoundaryType BoundaryType vg_BoundaryType fg_BoundaryType BoundaryLayer vg_BoundaryLayer fg_BoundaryLayer "+ + "populations_Blocks fSaved "+ + "populations_accSubcycles "+ + "VolE vg_VolE fg_VolE HallE GradPeE VolB vg_VolB fg_VolB BackgroundVolB PerturbedVolB "+ + "Pressure vg_Pressure fg_Pressure populations_PTensor "+ + "derivs BVOLderivs "+ + "GridCoordinates BackgroundVolE MeshData"); // NOTE Do not remove the : before the list of variable names as this is parsed by tools/check_vlasiator_cfg.sh + Readparameters::addComposing("variables.diagnostic", std::string()+"List of data reduction operators (DROs) to add to the diagnostic runtime output. Each variable to be added has to be on a new line diagnostic = XXX. "+ + "Available (20190320): "+ + "FluxB FluxE "+ + "populations_Blocks "+ + "Rhom populations_RhoLossAdjust "+ + "LBweight "+ + "populations_MaxVdt MaxVdt populations_MaxRdt MaxRdt MaxFieldsdt "+ + "populations_MaxDistributionFunction populations_MinDistributionFunction"); + Readparameters::addComposing("variables.diagnostic", "List of data reduction operators (DROs) to add to the diagnostic runtime output. Each variable to be added has to be on a new line diagnostic = XXX. Available (20190320): FluxB FluxE populations_Blocks Rhom populations_RhoLossAdjust LBweight populations_MaxVdt MaxVdt populations_MaxRdt MaxRdt MaxFieldsdt populations_MaxDistributionFunction populations_MinDistributionFunction"); // bailout parameters From 1ba368c830906badd089c3a361867d2bb9939033 Mon Sep 17 00:00:00 2001 From: Grandin Maxime T J Date: Thu, 16 May 2019 17:13:14 +0300 Subject: [PATCH 461/602] Deleted extra line in parameters.cpp --- parameters.cpp | 2 -- 1 file changed, 2 deletions(-) diff --git a/parameters.cpp b/parameters.cpp index ddaea42ce..0b7926098 100644 --- a/parameters.cpp +++ b/parameters.cpp @@ -246,8 +246,6 @@ bool Parameters::addParameters(){ "populations_MaxVdt MaxVdt populations_MaxRdt MaxRdt MaxFieldsdt "+ "populations_MaxDistributionFunction populations_MinDistributionFunction"); - Readparameters::addComposing("variables.diagnostic", "List of data reduction operators (DROs) to add to the diagnostic runtime output. Each variable to be added has to be on a new line diagnostic = XXX. Available (20190320): FluxB FluxE populations_Blocks Rhom populations_RhoLossAdjust LBweight populations_MaxVdt MaxVdt populations_MaxRdt MaxRdt MaxFieldsdt populations_MaxDistributionFunction populations_MinDistributionFunction"); - // bailout parameters Readparameters::add("bailout.write_restart", "If 1, write a restart file on bailout. Gets reset when sending a STOP (1) or a KILL (0).", true); Readparameters::add("bailout.min_dt", "Minimum time step below which bailout occurs (s).", 1e-6); From a02deaec9bf19ab0a8db6f18fa38b6cc370c91aa Mon Sep 17 00:00:00 2001 From: Markus Battarbee Date: Fri, 17 May 2019 10:46:58 +0300 Subject: [PATCH 462/602] Changed internal names of precipitation reducer parameters to show which reducer they belong to --- datareduction/datareductionoperator.cpp | 8 ++++---- object_wrapper.cpp | 15 ++++++++------- particle_species.h | 8 ++++---- 3 files changed, 16 insertions(+), 15 deletions(-) diff --git a/datareduction/datareductionoperator.cpp b/datareduction/datareductionoperator.cpp index b4d442619..5a01427ca 100644 --- a/datareduction/datareductionoperator.cpp +++ b/datareduction/datareductionoperator.cpp @@ -1336,10 +1336,10 @@ namespace DRO { */ VariablePrecipitationDiffFlux::VariablePrecipitationDiffFlux(cuint _popID): DataReductionOperatorHasParameters(),popID(_popID) { popName = getObjectWrapper().particleSpecies[popID].name; - lossConeAngle = getObjectWrapper().particleSpecies[popID].lossConeAngle; // deg - emin = getObjectWrapper().particleSpecies[popID].emin; // keV - emax = getObjectWrapper().particleSpecies[popID].emax; // keV - nChannels = getObjectWrapper().particleSpecies[popID].nChannels; // number of energy channels, logarithmically spaced between emin and emax + lossConeAngle = getObjectWrapper().particleSpecies[popID].precipitatioLossConeAngle; // deg + emin = getObjectWrapper().particleSpecies[popID].precipitationEmin; // keV + emax = getObjectWrapper().particleSpecies[popID].precipitationEmax; // keV + nChannels = getObjectWrapper().particleSpecies[popID].precipitationNChannels; // number of energy channels, logarithmically spaced between emin and emax for (int i=0; i Date: Fri, 17 May 2019 10:57:50 +0300 Subject: [PATCH 463/602] rationalized unit usage in energy density DRO --- datareduction/datareductionoperator.cpp | 17 ++++++++++------- object_wrapper.cpp | 6 ++++-- 2 files changed, 14 insertions(+), 9 deletions(-) diff --git a/datareduction/datareductionoperator.cpp b/datareduction/datareductionoperator.cpp index 5a01427ca..2ef5be11f 100644 --- a/datareduction/datareductionoperator.cpp +++ b/datareduction/datareductionoperator.cpp @@ -1460,8 +1460,6 @@ namespace DRO { return true; } - - /*! \brief Energy density * Calculates the energy density of particles in three bins: total energy density, above E1limit*solar wind energy, and above E2limit*solar wind energy * Energy densities are given in eV/cm^3. @@ -1470,10 +1468,14 @@ namespace DRO { * - solarwindenergy [eV], * - limit1 [scalar, default: 5.], * - limit2 [scalar, default: 10.]. - * The energy thresholds are saved in bulk files (in eV) as parameters: EnergyDensityESW, EnergyDensityELimit1, EnergyDensityELimit. + * The energy thresholds are saved in bulk files as parameters: + * - EnergyDensityESW (in eV), + * - EnergyDensityELimit1 (as scalar multiplier of EnergyDensityESW), + * - EnergyDensityELimit2 (as scalar multiplier of EnergyDensityESW). */ VariableEnergyDensity::VariableEnergyDensity(cuint _popID): DataReductionOperatorHasParameters(),popID(_popID) { popName = getObjectWrapper().particleSpecies[popID].name; + // Store internally in SI units solarwindenergy = getObjectWrapper().particleSpecies[popID].SolarWindEnergy; E1limit = solarwindenergy * getObjectWrapper().particleSpecies[popID].EnergyDensityLimit1; E2limit = solarwindenergy * getObjectWrapper().particleSpecies[popID].EnergyDensityLimit2; @@ -1541,7 +1543,7 @@ namespace DRO { } } - // Store energy density in units eV/cm^3 instead of Joules per m^3 + // Output energy density in units eV/cm^3 instead of Joules per m^3 EDensity[0] *= (1.0e-6)/physicalconstants::CHARGE; EDensity[1] *= (1.0e-6)/physicalconstants::CHARGE; EDensity[2] *= (1.0e-6)/physicalconstants::CHARGE; @@ -1556,10 +1558,11 @@ namespace DRO { } bool VariableEnergyDensity::writeParameters(vlsv::Writer& vlsvWriter) { - // Output energies in eV + // Output solar wind energy in eV Real swe = solarwindenergy/physicalconstants::CHARGE; - Real e1l = E1limit/physicalconstants::CHARGE; - Real e2l = E2limit/physicalconstants::CHARGE; + // Output other bin limits as multipliers + Real e1l = getObjectWrapper().particleSpecies[popID].EnergyDensityLimit1; + Real e2l = getObjectWrapper().particleSpecies[popID].EnergyDensityLimit2; if( vlsvWriter.writeParameter(popName+"_EnergyDensityESW", &swe) == false ) { return false; } if( vlsvWriter.writeParameter(popName+"_EnergyDensityELimit1", &e1l) == false ) { return false; } diff --git a/object_wrapper.cpp b/object_wrapper.cpp index a2bd0bd60..eb8cd0514 100644 --- a/object_wrapper.cpp +++ b/object_wrapper.cpp @@ -175,8 +175,10 @@ bool ObjectWrapper::getParameters() { const Real EPSILON = 1.e-25; if (species.SolarWindEnergy < EPSILON) { - // Calculate energy and convert it into eV - species.SolarWindEnergy = 0.5 * species.mass * species.SolarWindSpeed * species.SolarWindSpeed/physicalconstants::CHARGE; + // Energy stored internally in SI units + species.SolarWindEnergy = 0.5 * species.mass * species.SolarWindSpeed * species.SolarWindSpeed; + } else { + species.SolarWindEnergy = species.SolarWindEnergy*physicalconstants::CHARGE; } // Get precipitation parameters From 542b74e85ac217921019d8bfb55a68944982caa8 Mon Sep 17 00:00:00 2001 From: Urs Ganse Date: Fri, 17 May 2019 11:43:55 +0300 Subject: [PATCH 464/602] Fix fsgrid datareducer indentation, simplify DX reducers. --- datareduction/datareducer.cpp | 251 +++++++++++++++------------------- 1 file changed, 112 insertions(+), 139 deletions(-) diff --git a/datareduction/datareducer.cpp b/datareduction/datareducer.cpp index f4b839121..94cc1bae8 100644 --- a/datareduction/datareducer.cpp +++ b/datareduction/datareducer.cpp @@ -54,16 +54,16 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti // Iterate through fsgrid cells and extract total magnetic field for(int z=0; zmaxFsDt; - } - } + for(int y=0; ymaxFsDt; + } + } } return retval; } @@ -435,11 +435,11 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti // Iterate through fsgrid cells and extract boundary flag for(int z=0; zsysBoundaryFlag; - } - } + for(int y=0; ysysBoundaryFlag; + } + } } return retval; } @@ -470,11 +470,11 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti // Iterate through fsgrid cells and extract boundary layer for(int z=0; zsysBoundaryLayer; - } - } + for(int y=0; ysysBoundaryLayer; + } + } } return retval; } @@ -525,13 +525,13 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti // Iterate through fsgrid cells and extract EVOL for(int z=0; z& gridSize = technicalGrid.getLocalSize(); std::vector retval(gridSize[0]*gridSize[1]*gridSize[2]); - // Iterate through fsgrid cells and extract total BVOL + // Iterate through fsgrid cells and extract X coordinateL for(int z=0; z& gridSize = technicalGrid.getLocalSize(); std::vector retval(gridSize[0]*gridSize[1]*gridSize[2]); - // Iterate through fsgrid cells and extract total BVOL + // Iterate through fsgrid cells and extract Y coordinate for(int z=0; z& gridSize = technicalGrid.getLocalSize(); std::vector retval(gridSize[0]*gridSize[1]*gridSize[2]); - // Iterate through fsgrid cells and extract total BVOL + // Iterate through fsgrid cells and extract Z coordinate for(int z=0; z& technicalGrid)->std::vector { std::array& gridSize = technicalGrid.getLocalSize(); - std::vector retval(gridSize[0]*gridSize[1]*gridSize[2]); - - // Iterate through fsgrid cells and extract total BVOL - for(int z=0; z retval(gridSize[0]*gridSize[1]*gridSize[2], technicalGrid.DX); return retval; } )); @@ -804,16 +795,7 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti FsGrid< fsgrids::technical, 2>& technicalGrid)->std::vector { std::array& gridSize = technicalGrid.getLocalSize(); - std::vector retval(gridSize[0]*gridSize[1]*gridSize[2]); - - // Iterate through fsgrid cells and extract total BVOL - for(int z=0; z retval(gridSize[0]*gridSize[1]*gridSize[2], technicalGrid.DY); return retval; } )); @@ -830,16 +812,7 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti FsGrid< fsgrids::technical, 2>& technicalGrid)->std::vector { std::array& gridSize = technicalGrid.getLocalSize(); - std::vector retval(gridSize[0]*gridSize[1]*gridSize[2]); - - // Iterate through fsgrid cells and extract total BVOL - for(int z=0; z retval(gridSize[0]*gridSize[1]*gridSize[2], technicalGrid.DZ); return retval; } )); From 3db67e0faa88b4339a24e7009aebc03ea293ebb3 Mon Sep 17 00:00:00 2001 From: Markus Battarbee Date: Fri, 17 May 2019 12:53:52 +0300 Subject: [PATCH 465/602] Precipitation DRO outputs population in parameters, uses eV instead of keV --- datareduction/datareductionoperator.cpp | 17 +++++++++-------- object_wrapper.cpp | 7 +++++-- .../Magnetosphere_polar_small.cfg | 6 +++--- 3 files changed, 17 insertions(+), 13 deletions(-) diff --git a/datareduction/datareductionoperator.cpp b/datareduction/datareductionoperator.cpp index 2ef5be11f..640beacc0 100644 --- a/datareduction/datareductionoperator.cpp +++ b/datareduction/datareductionoperator.cpp @@ -1331,14 +1331,14 @@ namespace DRO { * V*V/mass * is calculated within the loss cone of fixed angular opening (default: 10 deg). * The differential flux is converted in part. / cm^2 / s / sr / eV (unit used by observers). - * Parameters that can be set in cfg file under [{species}_precipitation]: nChannels, emin [keV], emax [keV], lossConeAngle [deg] + * Parameters that can be set in cfg file under [{species}_precipitation]: nChannels, emin [eV], emax [eV], lossConeAngle [deg] * The energy channels are saved in bulk files as PrecipitationCentreEnergy{channel_number}. */ VariablePrecipitationDiffFlux::VariablePrecipitationDiffFlux(cuint _popID): DataReductionOperatorHasParameters(),popID(_popID) { popName = getObjectWrapper().particleSpecies[popID].name; - lossConeAngle = getObjectWrapper().particleSpecies[popID].precipitatioLossConeAngle; // deg - emin = getObjectWrapper().particleSpecies[popID].precipitationEmin; // keV - emax = getObjectWrapper().particleSpecies[popID].precipitationEmax; // keV + lossConeAngle = getObjectWrapper().particleSpecies[popID].precipitationLossConeAngle; // deg + emin = getObjectWrapper().particleSpecies[popID].precipitationEmin; // already converted to SI + emax = getObjectWrapper().particleSpecies[popID].precipitationEmax; // already converted to SI nChannels = getObjectWrapper().particleSpecies[popID].precipitationNChannels; // number of energy channels, logarithmically spaced between emin and emax for (int i=0; i Date: Mon, 20 May 2019 09:05:15 +0300 Subject: [PATCH 466/602] Put difference between regular and scaled dipole into perb field --- backgroundfield/vectorRdipole.cpp_unused | 250 ----------------------- backgroundfield/vectorRdipole.hpp_unused | 50 ----- projects/Magnetosphere/Magnetosphere.cpp | 9 +- 3 files changed, 8 insertions(+), 301 deletions(-) delete mode 100644 backgroundfield/vectorRdipole.cpp_unused delete mode 100644 backgroundfield/vectorRdipole.hpp_unused diff --git a/backgroundfield/vectorRdipole.cpp_unused b/backgroundfield/vectorRdipole.cpp_unused deleted file mode 100644 index c94d21fba..000000000 --- a/backgroundfield/vectorRdipole.cpp_unused +++ /dev/null @@ -1,250 +0,0 @@ -/* - * This file is part of Vlasiator. - * Copyright 2010-2016 Finnish Meteorological Institute - * Copyright 2017-2019 University of Helsinki - * - * For details of usage, see the COPYING file and read the "Rules of the Road" - * at http://www.physics.helsinki.fi/vlasiator/ - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - */ -/* -Background magnetic field class of Vlasiator. -*/ - -#include -#include -#include "vectordipole.hpp" -#include "../common.h" - -// tilt_angle_phi is from the z-axis in radians -// tilt_angle_theta is from the Sun-Earth-line in radians -void VectorDipole::initialize(const double moment,const double center_x, const double center_y, const double center_z, const double tilt_angle_phi=0, const double tilt_angle_theta=0, const double radius_f, const double radius_z){ - this->initialized = true; - - q[0]=-sin(tilt_angle_phi)*cos(tilt_angle_theta)*moment; - q[1]=-sin(tilt_angle_phi)*sin(tilt_angle_theta)*moment; - q[2]=-cos(tilt_angle_phi)*moment; - - center[0]=center_x; - center[1]=center_y; - center[2]=center_z; - - radius[0]=radius_f; - radius[1]=radius_z; -} - - - -double VectorDipole::call( double x, double y, double z) const -{ - const double minimumR=1e-3*physicalconstants::R_E; //The dipole field is defined to be outside of Earth, and units are in meters - if(this->initialized==false) - return 0.0; - double r[3]; - - r[0]= x-center[0]; - r[1]= y-center[1]; - r[2]= z-center[2]; - - double r2 = r[0]*r[0]+r[1]*r[1]+r[2]*r[2]; - - if(r2=radius[1]*radius[1]) - return 0.0; //set zero field and derivatives outside "zero radius" - - /* This function is called from within other calls, one component at a time. - The component in question is defined using the _fComponent index. If a derivative - is requested, the direction of the derivative is defined using _dComponent. */ - - const double r1 = sqrt(r2); - const double r5 = (r2*r2*r1); - const double rdotq=q[0]*r[0] + q[1]*r[1] +q[2]*r[2]; - const double B=( 3*r[_fComponent]*rdotq-q[_fComponent]*r2)/r5; - - if(_derivative == 0) && (r1 <= radius[0]) - // Full dipole field within full radius - return B; - - if(_derivative == 1) && (r1 <= radius[0]){ - //first derivatives of full field - unsigned int sameComponent; - if(_dComponent==_fComponent) - sameComponent=1; - else - sameComponent=0; - - /* Confirmed Battarbee 26.04.2019: This is the correct - 3D dipole derivative. */ - return -5*B*r[_dComponent]/r2+ - (3*q[_dComponent]*r[_fComponent] - - 2*q[_fComponent]*r[_dComponent] + - 3*rdotq*sameComponent)/r5; - } - - /* Within transition range (between "full radius" and "zero radius"), use - a vector potential scaled with the smootherstep function. Calculated - and coded by Markus Battarbee, 30.04.2019 */ - - // Calculate vector potential within transition range - double A[3]; - A[0] = (q[1]*r[2]-q[2]*r[1]) / (r2*r1); - A[1] = (q[2]*r[0]-q[0]*r[2]) / (r2*r1); - A[2] = (q[0]*r[1]-q[1]*r[0]) / (r2*r1); - // Coordinate within smootherstep function - const double Sx = -(r1-radius[1])/(radius[1]-radius[0]); - const double Sx2 = Sx*Sx; - // Smootherstep and its radial derivative - const double S2 = 6.*Sx2*Sx2*Sx - 15.*Sx2*Sx2 + 10.*Sx2*Sx; - const double dS2dr = -(30.*Sx2*Sx2 - 60.*Sx2*Sx + 30.*Sx2)/(radius[1]-radius[0]); - - // Cartesian derivatives of S2 - double dS2cart[3]; - dS2cart[0] = (r[0]/r1)*dS2dr; - dS2cart[1] = (r[1]/r1)*dS2dr; - dS2cart[2] = (r[2]/r1)*dS2dr; - - if(_derivative == 0) && (r1 > radius[0]) { - /* Within transition range (between radius[0] and radius[1]) we - multiply the magnetic field with the S2 smootherstep function - and add an additional corrective term to remove divergence. This - is based on using the dipole field vector potential and scaling - it using the smootherstep function S2. - - Notation: - q = dipole moment (vector) - r = position vector - R = position distance - - The regular dipole field vector potential - A(r) = (mu0/4 pi R^3) * (q cross r) - - The smootherstep function - ( 0, x<=0 - S2(Sx) = ( 6x^5 -15x^4 +10x^3, 0<=x<=1 - ( 1, x>=1 - - Radial distance scaling for S2 - Sx = -(R-radius[1])/(radius[1]-radius[0]) - - The scaled vector potential is A'(r) = A(r)*S2(Sx) - - The scaled magnetic field is - B'(r) = del cross A'(r) - =(NRL)= S2(Sx) del cross A(r) + del S2(Sx) cross A(r) - = S2(Sx) B(r) + del S2(Sx) cross A(r) - - */ - double delS2crossA[3]; - delS2crossA[0] = dS2cart[1]*A[2] - dS2cart[2]*A[1]; - delS2crossA[1] = dS2cart[2]*A[0] - dS2cart[0]*A[2]; - delS2crossA[2] = dS2cart[0]*A[1] - dS2cart[1]*A[0]; - - return S2*B + delS2crossA[_fComponent]; - } - - else if(_derivative == 1) && (r1 > radius[0]) { - /* first derivatives of field calculated from diminishing vector potential - - del B'(r) = S2(Sx) del B(r) + B(r) del S2(Sx) + del (del S2(Sx) cross A(r)) - - component-wise: - - del Bx = S2(Sx) del Bx + del S2(Sx) Bx + del(del S2(Sx) cross A)@i=x - del By = S2(Sx) del By + del S2(Sx) By + del(del S2(Sx) cross A)@i=y - del Bz = S2(Sx) del Bz + del S2(Sx) Bz + del(del S2(Sx) cross A)@i=z - - where - - del(del S2(Sx) cross A)@i=x = del (dS2/dy Az - dS/dz Ay) - = del(dS/dy) Az + dS/dy del Az - del(DS/dz) Ay - dS/dz del Ay - - del(del S2(Sx) cross A)@i=y = del (dS2/dz Ax - dS/dx Az) - = del(dS/dz) Ax + dS/dz del Ax - del(DS/dx) Az - dS/dx del Az - - del(del S2(Sx) cross A)@i=z = del (dS2/dx Ay - dS/dy Ax) - = del(dS/dx) Ay + dS/dx del Ay - del(DS/dy) Ax - dS/dy del Ax - **********/ - - unsigned int sameComponent; - if(_dComponent==_fComponent) - sameComponent=1; - else - sameComponent=0; - - // Regular derivative of B - const double delB = -5*B*r[_dComponent]/r2+ - (3*q[_dComponent]*r[_fComponent] - - 2*q[_fComponent]*r[_dComponent] + - 3*rdotq*sameComponent)/r5; - - // Calculate del Ax, del Ay, del Az - double delAx[3]; - double delAy[3]; - double delAz[3]; - delAx[0] = (-3./(r2*r2*r1))*(q[1]*r[2]-q[2]*r[1])*r[0]; - delAx[1] = (-3./(r2*r2*r1))*(q[1]*r[2]-q[2]*r[1])*r[1] -q[2]/(r2*r1); - delAx[2] = (-3./(r2*r2*r1))*(q[1]*r[2]-q[2]*r[1])*r[2] +q[1]/(r2*r1); - delAy[0] = (-3./(r2*r2*r1))*(q[2]*r[0]-q[0]*r[2])*r[0] +q[2]/(r2*r1); - delAy[1] = (-3./(r2*r2*r1))*(q[2]*r[0]-q[0]*r[2])*r[1]; - delAy[2] = (-3./(r2*r2*r1))*(q[2]*r[0]-q[0]*r[2])*r[2] -q[0]/(r2*r1); - delAz[0] = (-3./(r2*r2*r1))*(q[0]*r[1]-q[1]*r[0])*r[0] -q[1]/(r2*r1); - delAz[1] = (-3./(r2*r2*r1))*(q[0]*r[1]-q[1]*r[0])*r[1] +q[0]/(r2*r1); - delAz[2] = (-3./(r2*r2*r1))*(q[0]*r[1]-q[1]*r[0])*r[2]; - - // Calculate del (dS2/dx), del (dS2/dy), del (dS2/dz) - ddidS2dr = 60.*(2.*Sx2*Sx - 3.*Sx2 + Sx)/(r2*(radius[1]-radius[0])*(radius[1]-radius[0])); - double deldS2dx[3]; - double deldS2dy[3]; - double deldS2dz[3]; - deldS2dx[0] = ddidS2dr*r[0]*r[0] -(r[0]/(r2*r1))*dS2dr*r[0] + dS2dr/r1; - deldS2dx[1] = ddidS2dr*r[0]*r[1] -(r[0]/(r2*r1))*dS2dr*r[1]; - deldS2dx[2] = ddidS2dr*r[0]*r[2] -(r[0]/(r2*r1))*dS2dr*r[2]; - deldS2dy[0] = ddidS2dr*r[1]*r[0] -(r[1]/(r2*r1))*dS2dr*r[0]; - deldS2dy[1] = ddidS2dr*r[1]*r[1] -(r[1]/(r2*r1))*dS2dr*r[1] + dS2dr/r1; - deldS2dy[2] = ddidS2dr*r[1]*r[2] -(r[1]/(r2*r1))*dS2dr*r[2]; - deldS2dz[0] = ddidS2dr*r[2]*r[0] -(r[2]/(r2*r1))*dS2dr*r[0]; - deldS2dz[1] = ddidS2dr*r[2]*r[1] -(r[2]/(r2*r1))*dS2dr*r[1]; - deldS2dz[2] = ddidS2dr*r[2]*r[2] -(r[2]/(r2*r1))*dS2dr*r[2] + dS2dr/r1; - - // Calculate del(del S2(Sx) cross A)@i=x, del(del S2(Sx) cross A)@i=y, del(del S2(Sx) cross A)@i=z - double ddS2crossA[3][3]; - // derivatives of X-directional field - ddS2crossA[0][0] = deldS2dy[0]*A[2] + dS2cart[1]*delAz[0] - deldS2dz[0]*A[1] - dS2cart[2]*delAy[0]; - ddS2crossA[0][1] = deldS2dy[1]*A[2] + dS2cart[1]*delAz[1] - deldS2dz[1]*A[1] - dS2cart[2]*delAy[1]; - ddS2crossA[0][2] = deldS2dy[2]*A[2] + dS2cart[1]*delAz[2] - deldS2dz[2]*A[1] - dS2cart[2]*delAy[2]; - // derivatives of Y-directional field - ddS2crossA[1][0] = deldS2dz[0]*A[0] + dS2cart[2]*delAx[0] - deldS2dx[0]*A[2] - dS2cart[0]*delAz[0]; - ddS2crossA[1][1] = deldS2dz[1]*A[0] + dS2cart[2]*delAx[1] - deldS2dx[1]*A[2] - dS2cart[0]*delAz[1]; - ddS2crossA[1][2] = deldS2dz[2]*A[0] + dS2cart[2]*delAx[2] - deldS2dx[2]*A[2] - dS2cart[0]*delAz[2]; - // derivatives of Z-directional field - ddS2crossA[2][0] = deldS2dx[0]*A[1] + dS2cart[0]*delAy[0] - deldS2dy[0]*A[0] - dS2cart[1]*delAx[0]; - ddS2crossA[2][1] = deldS2dx[1]*A[1] + dS2cart[0]*delAy[1] - deldS2dy[1]*A[0] - dS2cart[1]*delAx[1]; - ddS2crossA[2][2] = deldS2dx[2]*A[1] + dS2cart[0]*delAy[2] - deldS2dy[2]*A[0] - dS2cart[1]*delAx[2]; - - return S2*delB + dS2cart[_dComponent]*B + ddS2crossA[_fComponent][_dComponent]; - } - - return 0; // dummy, but prevents gcc from yelling -} - - - - - - diff --git a/backgroundfield/vectorRdipole.hpp_unused b/backgroundfield/vectorRdipole.hpp_unused deleted file mode 100644 index bf8c2e817..000000000 --- a/backgroundfield/vectorRdipole.hpp_unused +++ /dev/null @@ -1,50 +0,0 @@ -/* - * This file is part of Vlasiator. - * Copyright 2010-2016 Finnish Meteorological Institute - * Copyright 2017-2019 University of Helsinki - * - * For details of usage, see the COPYING file and read the "Rules of the Road" - * at http://www.physics.helsinki.fi/vlasiator/ - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - */ -/* -Background magnetic field class of Vlasiator. -*/ - -#ifndef VECTORDIPOLE_HPP -#define VECTORDIPOLE_HPP -#include "fieldfunction.hpp" - - - -class VectorDipole: public FieldFunction { -private: - bool initialized; - double q[3]; // Dipole moment; set to (0,0,moment) for z-aligned - double center[3]; // Coordinates where the dipole sits; set to (0,0,0) - double radius[2]; // Radial extents of full and zero dipole -public: - - VectorDipole(){ - this->initialized = false; - } - void initialize(const double moment,const double center_x, const double center_y, const double center_z, const double tilt_angle_phi, const double tilt_angle_theta, const double radius_f, const double radius_z); - virtual double call(double x, double y, double z) const; - virtual ~Dipole() {} -}; - -#endif - diff --git a/projects/Magnetosphere/Magnetosphere.cpp b/projects/Magnetosphere/Magnetosphere.cpp index a46371f93..b4ca36d0d 100644 --- a/projects/Magnetosphere/Magnetosphere.cpp +++ b/projects/Magnetosphere/Magnetosphere.cpp @@ -326,8 +326,15 @@ namespace projects { setBackgroundField(bgFieldDipole, BgBGrid, true); break; case 4: // Vector potential dipole, vanishes or optionally scales to static inflow value after a given x-coordinate + // What we in fact do is we place the regular dipole in the background field, and the + // corrective terms in the perturbed field. This maintains the BGB as curl-free. + bgFieldDipole.initialize(8e15 *this->dipoleScalingFactor, 0.0, 0.0, 0.0, 0.0 );//set dipole moment + setBackgroundField(bgFieldDipole, BgBGrid); + // Difference into perBgrid + bgFieldDipole.initialize(-8e15 *this->dipoleScalingFactor, 0.0, 0.0, 0.0, 0.0 ); + setBackgroundField(bgFieldDipole, perBGrid); bgVectorDipole.initialize(8e15 *this->dipoleScalingFactor, 0.0, 0.0, 0.0, this->dipoleTiltPhi*3.14159/180., this->dipoleTiltTheta*3.14159/180., this->dipoleXFull, this->dipoleXZero, this->dipoleInflowB[0], this->dipoleInflowB[1], this->dipoleInflowB[2]); - setBackgroundField(bgVectorDipole, BgBGrid); + setBackgroundField(bgVectorDipole, perBGrid, true); break; default: setBackgroundFieldToZero(BgBGrid); From af3d7e0e8ba60f82d99805ba16f700b2eb63ccbf Mon Sep 17 00:00:00 2001 From: Markus Battarbee Date: Mon, 20 May 2019 10:25:20 +0300 Subject: [PATCH 467/602] Moved initialisation of perturbed field into own function calls --- backgroundfield/backgroundfield.cpp | 84 ++++++++++++++++++++++++ backgroundfield/backgroundfield.h | 10 +++ projects/Magnetosphere/Magnetosphere.cpp | 6 +- 3 files changed, 97 insertions(+), 3 deletions(-) diff --git a/backgroundfield/backgroundfield.cpp b/backgroundfield/backgroundfield.cpp index 1adf3f71c..5456a7665 100644 --- a/backgroundfield/backgroundfield.cpp +++ b/backgroundfield/backgroundfield.cpp @@ -148,5 +148,89 @@ void setBackgroundFieldToZero( } } } +} + + +void setPerturbedField( + FieldFunction& bfFunction, + FsGrid< std::array, 2>& perBGrid, + bool append) { + + /*if we do not add a new background to the existing one we first put everything to zero*/ + if(append==false) { + setPerturbedFieldToZero(perBGrid); + } + //these are doubles, as the averaging functions copied from Gumics + //use internally doubles. In any case, it should provide more + //accurate results also for float simulations + double accuracy = 1e-17; + double start[3]; + double end[3]; + double dx[3]; + unsigned int faceCoord1[3]; + unsigned int faceCoord2[3]; + + //the coordinates of the edges face with a normal in the third coordinate direction, stored here to enable looping + faceCoord1[0]=1; + faceCoord2[0]=2; + faceCoord1[1]=0; + faceCoord2[1]=2; + faceCoord1[2]=0; + faceCoord2[2]=1; + + auto localSize = perBGrid.getLocalSize(); + + // Do not thread this blindly, the bfFunction.set* calls below are not thread-safe at the moment. + for (int x = 0; x < localSize[0]; ++x) { + for (int y = 0; y < localSize[1]; ++y) { + for (int z = 0; z < localSize[2]; ++z) { + std::array start3 = perBGrid.getPhysicalCoords(x, y, z); + start[0] = start3[0]; + start[1] = start3[1]; + start[2] = start3[2]; + + dx[0] = perBGrid.DX; + dx[1] = perBGrid.DY; + dx[2] = perBGrid.DZ; + + end[0]=start[0]+dx[0]; + end[1]=start[1]+dx[1]; + end[2]=start[2]+dx[2]; + + //Face averages + for(uint fComponent=0; fComponent<3; fComponent++){ + bfFunction.setDerivative(0); + bfFunction.setComponent((coordinate)fComponent); + perBGrid.get(x,y,z)->at(fsgrids::bfield::PERBX+fComponent) += + surfaceAverage(bfFunction, + (coordinate)fComponent, + accuracy, + start, + dx[faceCoord1[fComponent]], + dx[faceCoord2[fComponent]] + ); + + } + // Derivatives or volume averages are not calculated for the perBField + } + } + } } + +void setPerturbedFieldToZero( + FsGrid< std::array, 2> & perBGrid) { + auto localSize = perBGrid.getLocalSize(); + + #pragma omp parallel for collapse(3) + for (int x = 0; x < localSize[0]; ++x) { + for (int y = 0; y < localSize[1]; ++y) { + for (int z = 0; z < localSize[2]; ++z) { + for (int i = 0; i < fsgrids::bfield::N_BFIELD; ++i) { + perBGrid.get(x,y,z)->at(i) = 0; + } + } + } + } +} + diff --git a/backgroundfield/backgroundfield.h b/backgroundfield/backgroundfield.h index 7996b8b37..0c880cef3 100644 --- a/backgroundfield/backgroundfield.h +++ b/backgroundfield/backgroundfield.h @@ -38,5 +38,15 @@ void setBackgroundFieldToZero( FsGrid< std::array, 2>& BgBGrid ); +void setPerturbedField( + FieldFunction& bgFunction, + FsGrid< std::array, 2>& perBGrid, + bool append=false +); + +void setPerturbedFieldToZero( + FsGrid< std::array, 2>& perBGrid +); + #endif diff --git a/projects/Magnetosphere/Magnetosphere.cpp b/projects/Magnetosphere/Magnetosphere.cpp index b4ca36d0d..adbc6edc5 100644 --- a/projects/Magnetosphere/Magnetosphere.cpp +++ b/projects/Magnetosphere/Magnetosphere.cpp @@ -290,7 +290,7 @@ namespace projects { /* set 0-centered dipole */ void Magnetosphere::setProjectBField( - FsGrid< std::array, 2> & perBGrid, + FsGrid< std::array, 2>& perBGrid, FsGrid< std::array, 2>& BgBGrid, FsGrid< fsgrids::technical, 2>& technicalGrid ) { @@ -332,9 +332,9 @@ namespace projects { setBackgroundField(bgFieldDipole, BgBGrid); // Difference into perBgrid bgFieldDipole.initialize(-8e15 *this->dipoleScalingFactor, 0.0, 0.0, 0.0, 0.0 ); - setBackgroundField(bgFieldDipole, perBGrid); + setPerturbedField(bgFieldDipole, perBGrid); bgVectorDipole.initialize(8e15 *this->dipoleScalingFactor, 0.0, 0.0, 0.0, this->dipoleTiltPhi*3.14159/180., this->dipoleTiltTheta*3.14159/180., this->dipoleXFull, this->dipoleXZero, this->dipoleInflowB[0], this->dipoleInflowB[1], this->dipoleInflowB[2]); - setBackgroundField(bgVectorDipole, perBGrid, true); + setPerturbedField(bgVectorDipole, perBGrid, true); break; default: setBackgroundFieldToZero(BgBGrid); From dc9d1808f7a01c7260d71cee5066cef1d581299e Mon Sep 17 00:00:00 2001 From: Markus Battarbee Date: Mon, 20 May 2019 14:31:47 +0300 Subject: [PATCH 468/602] Structured metada using calls addUnitMetadata() and getUnitMetadata() --- datareduction/datareducer.cpp | 96 ++++++++++++++++++++++++- datareduction/datareducer.h | 2 + datareduction/datareductionoperator.cpp | 4 +- datareduction/datareductionoperator.h | 15 +++- iowrite.cpp | 16 ++++- 5 files changed, 124 insertions(+), 9 deletions(-) diff --git a/datareduction/datareducer.cpp b/datareduction/datareducer.cpp index d5828c337..691ac7b0e 100644 --- a/datareduction/datareducer.cpp +++ b/datareduction/datareducer.cpp @@ -36,6 +36,12 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti for (it = P::outputVariableList.begin(); it != P::outputVariableList.end(); it++) { + + /* Note: Each data reducer generation should be followed by a call to setUnitMetaData + with the following arguments: + unit, unit in LaTeX formulation, variable in LaTeX formulation, conversion factor + */ + if(*it == "fg_B" || *it == "B") { // Bulk magnetic field at Yee-Lattice locations outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fg_B",[]( FsGrid< std::array, 2>& perBGrid, @@ -66,9 +72,10 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti } } return retval; - } - )); - continue; + } + )); + outputReducer.back()->setUnitMetaData("T","$\mathrm{T}$","$B$",1.0); + continue; } if(*it == "fg_BackgroundB" || *it == "BackgroundB") { // Static (typically dipole) magnetic field part outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fg_background_B",[]( @@ -99,6 +106,7 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti return retval; } )); + outputReducer.back()->setUnitMetaData("T","$\mathrm{T}$","$B_\mathrm{bg}$",1.0); continue; } if(*it == "fg_PerturbedB" || *it == "PerturbedB") { // Fluctuating magnetic field part @@ -130,6 +138,7 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti return retval; } )); + outputReducer.back()->setUnitMetaData("T","$\mathrm{T}$","$B_\mathrm{per}$",1.0); continue; } if(*it == "fg_E" || *it== "E") { // Bulk electric field at Yee-lattice locations @@ -161,10 +170,12 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti return retval; } )); + outputReducer.back()->setUnitMetaData("V/m","$\mathrm{V}\,\mathrm{m}^{-1}$","$E$",1.0); continue; } if(*it == "vg_Rhom" || *it == "Rhom") { // Overall mass density (summed over all populations) outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("rhom",CellParams::RHOM,1)); + outputReducer.back()->setUnitMetaData("kg/m^3","$\mathrm{kg}\,\mathrm{m}^{-3}$","$\rho_\mathrm{m}$",1.0); continue; } if(*it == "fg_Rhom") { // Overall mass density (summed over all populations) @@ -194,10 +205,12 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti return retval; } )); + outputReducer.back()->setUnitMetaData("kg/m^3","$\mathrm{kg}\,\mathrm{m}^{-3}$","$\rho_\mathrm{m}$",1.0); continue; } if(*it == "vg_Rhoq" || *it == "Rhoq") { // Overall charge density (summed over all populations) outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("rhoq",CellParams::RHOQ,1)); + outputReducer.back()->setUnitMetaData("C/m^3","$\mathrm{C}\,\mathrm{m}^{-3}$","$\rho_\mathrm{q}$",1.0); continue; } if(*it == "fg_Rhoq") { // Overall charge density (summed over all populations) @@ -227,6 +240,7 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti return retval; } )); + outputReducer.back()->setUnitMetaData("C/m^3","$\mathrm{C}\,\mathrm{m}^{-3}$","$\rho_\mathrm{q}$",1.0); continue; } if(*it == "populations_Rho") { // Per-population particle number density @@ -234,12 +248,14 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti species::Species& species=getObjectWrapper().particleSpecies[i]; const std::string& pop = species.name; outputReducer->addOperator(new DRO::DataReductionOperatorPopulations(pop + "/rho", i, offsetof(spatial_cell::Population, RHO), 1)); + outputReducer.back()->setUnitMetaData("1/m^3","$\mathrm{m}^{-3}$","$\n_\mathrm{"+pop+"}$",1.0); } continue; } if(*it == "V" || *it == "vg_V") { // Overall effective bulk density defining the center-of-mass frame from all populations outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("V",CellParams::VX,3)); + outputReducer.back()->setUnitMetaData("m/s","$\mathrm{m}\,\mathrm{s}^{-1}$","$V$",1.0); continue; } if(*it == "fg_V") { // Overall effective bulk density defining the center-of-mass frame from all populations @@ -271,6 +287,7 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti return retval; } )); + outputReducer.back()->setUnitMetaData("m/s","$\mathrm{m}\,\mathrm{s}^{-1}$","$V$",1.0); continue; } if(*it == "populations_V") { // Per population bulk velocities @@ -278,24 +295,33 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti species::Species& species=getObjectWrapper().particleSpecies[i]; const std::string& pop = species.name; outputReducer->addOperator(new DRO::DataReductionOperatorPopulations(pop + "/V", i, offsetof(spatial_cell::Population, V), 3)); + outputReducer.back()->setUnitMetaData("m/s","$\mathrm{m}\,\mathrm{s}^{-1}$","$V_\mathrm{"+pop+"}$",1.0); } continue; } if(*it == "populations_moments_Backstream") { // Per-population moments of the backstreaming part for(unsigned int i =0; i < getObjectWrapper().particleSpecies.size(); i++) { outputReducer->addOperator(new DRO::VariableRhoBackstream(i)); + outputReducer.back()->setUnitMetaData("1/m^3","$\mathrm{m}^{-3}$","$n_\mathrm{"+pop+",st}$",1.0); outputReducer->addOperator(new DRO::VariableVBackstream(i)); + outputReducer.back()->setUnitMetaData("m/s","$\mathrm{m}\,\mathrm{s}^{-1}$","$V_\mathrm{"+pop+",st}$",1.0); outputReducer->addOperator(new DRO::VariablePTensorBackstreamDiagonal(i)); + outputReducer.back()->setUnitMetaData("Pa","$\mathrm{Pa}$","$\mathcal{P}_\mathrm{"+pop+",st}$",1.0); outputReducer->addOperator(new DRO::VariablePTensorBackstreamOffDiagonal(i)); + outputReducer.back()->setUnitMetaData("Pa","$\mathrm{Pa}$","$\mathcal{\tilde{P}}_\mathrm{"+pop+",st}$",1.0); } continue; } if(*it == "populations_moments_NonBackstream") { // Per-population moments of the non-backstreaming (thermal?) part. for(unsigned int i =0; i < getObjectWrapper().particleSpecies.size(); i++) { outputReducer->addOperator(new DRO::VariableRhoNonBackstream(i)); + outputReducer.back()->setUnitMetaData("1/m^3","$\mathrm{m}^{-3}$","$n_\mathrm{"+pop+",th}$",1.0); outputReducer->addOperator(new DRO::VariableVNonBackstream(i)); + outputReducer.back()->setUnitMetaData("m/s","$\mathrm{m}\,\mathrm{s}^{-1}$","$V_\mathrm{"+pop+",th}$",1.0); outputReducer->addOperator(new DRO::VariablePTensorNonBackstreamDiagonal(i)); + outputReducer.back()->setUnitMetaData("Pa","$\mathrm{Pa}$","$\mathcal{P}_\mathrm{"+pop+",th}$",1.0); outputReducer->addOperator(new DRO::VariablePTensorNonBackstreamOffDiagonal(i)); + outputReducer.back()->setUnitMetaData("Pa","$\mathrm{Pa}$","$\mathcal{\tilde{P}}_\mathrm{"+pop+",th}$",1.0); } continue; } @@ -303,6 +329,7 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti // Effective sparsity threshold affecting each cell, if dynamic threshould algorithm is used for(unsigned int i =0; i < getObjectWrapper().particleSpecies.size(); i++) { outputReducer->addOperator(new DRO::VariableEffectiveSparsityThreshold(i)); + outputReducer.back()->setUnitMetaData("s^3/m^6","$\mathrm{m}^{-6}\,\mathrm{s}^{3}$","$f_\mathrm{"+pop+",min}$",1.0); } continue; } @@ -312,17 +339,20 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti species::Species& species=getObjectWrapper().particleSpecies[i]; const std::string& pop = species.name; outputReducer->addOperator(new DRO::DataReductionOperatorPopulations(pop + "/rho_loss_adjust", i, offsetof(spatial_cell::Population, RHOLOSSADJUST), 1)); + outputReducer.back()->setUnitMetaData("1/m^3","$\mathrm{m}^{-3}$","$\Delta_\mathrm{loss} n_\mathrm{"+pop+"}$",1.0); } continue; } if(*it == "LBweight" || *it == "vg_LBweight") { // Load balance metric for LB debugging outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("LB_weight",CellParams::LBWEIGHTCOUNTER,1)); + outputReducer.back()->setUnitMetaData("","","$\mathrm{LB weight}$",0.0); continue; } if(*it == "MaxVdt") { // Overall maximum timestep constraint as calculated by the velocity space vlasov update outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("max_v_dt",CellParams::MAXVDT,1)); + outputReducer.back()->setUnitMetaData("s","$\mathrm{s}$","$\Delta t_\mathrm{V,max}$",1.0); continue; } if(*it == "populations_MaxVdt") { @@ -331,12 +361,14 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti species::Species& species=getObjectWrapper().particleSpecies[i]; const std::string& pop = species.name; outputReducer->addOperator(new DRO::DataReductionOperatorPopulations(pop + "/MaxVdt", i, offsetof(spatial_cell::Population, max_dt[1]), 1)); + outputReducer.back()->setUnitMetaData("s","$\mathrm{s}$","$\Delta t_\mathrm{"+pop+",V,max}$",1.0); } continue; } if(*it == "MaxRdt") { // Overall maximum timestep constraint as calculated by the real space vlasov update outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("max_r_dt",CellParams::MAXRDT,1)); + outputReducer.back()->setUnitMetaData("s","$\mathrm{s}$","$\Delta t_\mathrm{R,max}$",1.0); continue; } if(*it == "populations_MaxRdt") { @@ -345,6 +377,7 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti species::Species& species=getObjectWrapper().particleSpecies[i]; const std::string& pop = species.name; outputReducer->addOperator(new DRO::DataReductionOperatorPopulations(pop + "/MaxRdt", i, offsetof(spatial_cell::Population, max_dt[0]), 1)); + outputReducer.back()->setUnitMetaData("s","$\mathrm{s}$","$\Delta t_\mathrm{"+pop+",R,max}$",1.0); } continue; } @@ -352,9 +385,13 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti // Per-population energy density in three energy ranges for(unsigned int i =0; i < getObjectWrapper().particleSpecies.size(); i++) { outputReducer->addOperator(new DRO::VariableEnergyDensity(i)); + outputReducer.back()->setUnitMetaData("eV/cm^3","$\mathrm{eV}\,\mathrm{cm}^{-3}$","$U_\mathrm{"+pop+"}$",(1.0e-6)/physicalconstants::CHARGE); } continue; } + + // For precipitation: + // outputReducer.back()->setUnitMetaData("1/(cm^2 sr s eV)","$\mathrm{cm}^{-2}\,\mathrm{sr}^{-1}\,\mathrm{s}^{-1}\,\mathrm{eV}^{-1}$","$\mathcal{F}_\mathrm{"+pop+"}$",(1.0e-4)*physicalconstants::CHARGE); if(*it == "MaxFieldsdt" || *it == "fg_MaxFieldsdt") { // Maximum timestep constraint as calculated by the fieldsolver outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("MaxFieldsdt",[]( @@ -383,11 +420,13 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti return retval; } )); + outputReducer.back()->setUnitMetaData("s","$\mathrm{s}$","$\Delta t_\mathrm{f,max}$",1.0); continue; } if(*it == "MPIrank" || *it == "vg_rank") { // Map of spatial decomposition of the DCCRG grid into MPI ranks outputReducer->addOperator(new DRO::MPIrank); + outputReducer.back()->setUnitMetaData("","","$\mathrm{MPI rank}$",0.0); continue; } if(*it == "FsGridRank" || *it == "fg_rank") { @@ -409,11 +448,13 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti return retval; } )); + outputReducer.back()->setUnitMetaData("","","$\mathrm{fGrid rank}$",0.0); continue; } if(*it == "BoundaryType" || *it == "vg_BoundaryType") { // Type of boundarycells outputReducer->addOperator(new DRO::BoundaryType); + outputReducer.back()->setUnitMetaData("","","$\mathrm{vGrid Boundary type}$",0.0); continue; } if(*it == "fg_BoundaryType") { @@ -444,11 +485,13 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti return retval; } )); + outputReducer.back()->setUnitMetaData("","","$\mathrm{fGrid Boundary type}$",0.0); continue; } if(*it == "BoundaryLayer" || *it == "vg_BoundaryLayer") { // For boundaries with multiple layers: layer count per cell outputReducer->addOperator(new DRO::BoundaryLayer); + outputReducer.back()->setUnitMetaData("","","$\mathrm{vGrid Boundary layer}$",0.0); continue; } if(*it == "fg_BoundaryLayer") { @@ -479,18 +522,21 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti return retval; } )); + outputReducer.back()->setUnitMetaData("","","$\mathrm{fGrid Boundary layer}$",0.0); continue; } if (*it == "populations_Blocks") { // Per-population velocity space block counts for(unsigned int i =0; i < getObjectWrapper().particleSpecies.size(); i++) { outputReducer->addOperator(new DRO::Blocks(i)); + outputReducer.back()->setUnitMetaData("","","$\mathrm{"pop+" blocks}$",0.0); } continue; } if(*it == "fSaved") { // Boolean marker whether a velocity space is saved in a given spatial cell outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("fSaved",CellParams::ISCELLSAVINGF,1)); + outputReducer.back()->setUnitMetaData("","","$f(v)_\mathrm{"+pop+" saved}$",0.0); continue; } if(*it == "populations_accSubcycles") { @@ -499,12 +545,14 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti species::Species& species=getObjectWrapper().particleSpecies[i]; const std::string& pop = species.name; outputReducer->addOperator(new DRO::DataReductionOperatorPopulations(pop + "/acc_subcycles", i, offsetof(spatial_cell::Population, ACCSUBCYCLES), 1)); + outputReducer.back()->setUnitMetaData("","","$\mathrm{"pop+" Acc subcycles}$",0.0); } continue; } if(*it == "VolE" || *it == "vg_VolE") { // Volume-averaged E field outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("E_vol",CellParams::EXVOL,3)); + outputReducer.back()->setUnitMetaData("V/m","$\mathrm{V}\,\mathrm{m}^{-1}$","$E_\mathrm{vol,vg}$",1.0); continue; } if(*it == "fg_VolE") { @@ -536,20 +584,24 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti return retval; } )); + outputReducer.back()->setUnitMetaData("V/m","$\mathrm{V}\,\mathrm{m}^{-1}$","$E_\mathrm{vol,fg}$",1.0); continue; } if(*it == "fg_HallE") { // TODO: Add outputreducer to get EHALL from fsgrid + // outputReducer.back()->setUnitMetaData("V/m","$\mathrm{V}\,\mathrm{m}^{-1}$","$E_\mathrm{Hall}$",1.0); continue; } if(*it =="GradPeE") { // Electron pressure gradient contribution to the generalized ohm's law outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("EGRADPE",CellParams::EXGRADPE,3)); + outputReducer.back()->setUnitMetaData("V/m","$\mathrm{V}\,\mathrm{m}^{-1}$","$E_{\del P_\mathrm{e}}$",1.0); continue; } if(*it == "VolB" || *it == "vg_VolB") { // Volume-averaged magnetic field outputReducer->addOperator(new DRO::VariableBVol); + outputReducer.back()->setUnitMetaData("T","$\mathrm{T}$","$B_\mathrm{vol,vg}$",1.0); continue; } if(*it == "fg_VolB") { // Static (typically dipole) magnetic field part @@ -584,19 +636,23 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti return retval; } )); + outputReducer.back()->setUnitMetaData("T","$\mathrm{T}$","$B_\mathrm{vol,fg}$",1.0); continue; } if(*it == "BackgroundVolB") { outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("BGB_vol",CellParams::BGBXVOL,3)); + outputReducer.back()->setUnitMetaData("T","$\mathrm{T}$","$B_\mathrm{vol,vg,bg}$",1.0); continue; } if(*it == "PerturbedVolB") { outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("PERB_vol",CellParams::PERBXVOL,3)); + outputReducer.back()->setUnitMetaData("T","$\mathrm{T}$","$B_\mathrm{vol,vg,per}$",1.0); continue; } if(*it == "Pressure" || *it== "vg_Pressure") { // Overall scalar pressure from all populations outputReducer->addOperator(new DRO::VariablePressureSolver); + outputReducer.back()->setUnitMetaData("Pa","$\mathrm{Pa}$","$P_\mathrm{solver}$",1.0); continue; } if(*it == "fg_Pressure") { @@ -628,34 +684,49 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti return retval; } )); + outputReducer.back()->setUnitMetaData("Pa","$\mathrm{Pa}$","$P_\mathrm{fg}$",1.0); continue; } if(*it == "populations_PTensor") { // Per-population pressure tensor, stored as diagonal and offdiagonal components for(unsigned int i =0; i < getObjectWrapper().particleSpecies.size(); i++) { outputReducer->addOperator(new DRO::VariablePTensorDiagonal(i)); + outputReducer.back()->setUnitMetaData("Pa","$\mathrm{Pa}$","$\mathcal{P}_\mathrm{"+pop+"}$",1.0); outputReducer->addOperator(new DRO::VariablePTensorOffDiagonal(i)); + outputReducer.back()->setUnitMetaData("Pa","$\mathrm{Pa}$","$\mathcal{\tilde{P}}_\mathrm{"+pop+"}$",1.0); } continue; } if(*it == "BVOLderivs") { // Volume-averaged derivatives outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dPERBXVOLdy",bvolderivatives::dPERBXVOLdy,1)); + outputReducer.back()->setUnitMetaData("T/m","$\mathrm{T}\,\mathrm{m}^{-1}$","$\Delta B_{X,\mathrm{per,vol,vg}} (\Delta Y)^{-1}$",1.0); outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dPERBXVOLdz",bvolderivatives::dPERBXVOLdz,1)); + outputReducer.back()->setUnitMetaData("T/m","$\mathrm{T}\,\mathrm{m}^{-1}$","$\Delta B_{X,\mathrm{per,vol,vg}} (\Delta Z)^{-1}$",1.0); outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dPERBYVOLdx",bvolderivatives::dPERBYVOLdx,1)); + outputReducer.back()->setUnitMetaData("T/m","$\mathrm{T}\,\mathrm{m}^{-1}$","$\Delta B_{Y,\mathrm{per,vol,vg}} (\Delta X)^{-1}$",1.0); outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dPERBYVOLdz",bvolderivatives::dPERBYVOLdz,1)); + outputReducer.back()->setUnitMetaData("T/m","$\mathrm{T}\,\mathrm{m}^{-1}$","$\Delta B_{Y,\mathrm{per,vol,vg}} (\Delta Z)^{-1}$",1.0); outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dPERBZVOLdx",bvolderivatives::dPERBZVOLdx,1)); + outputReducer.back()->setUnitMetaData("T/m","$\mathrm{T}\,\mathrm{m}^{-1}$","$\Delta B_{Z,\mathrm{per,vol,vg}} (\Delta X)^{-1}$",1.0); outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dPERBZVOLdy",bvolderivatives::dPERBZVOLdy,1)); + outputReducer.back()->setUnitMetaData("T/m","$\mathrm{T}\,\mathrm{m}^{-1}$","$\Delta B_{Z,\mathrm{per,vol,vg}} (\Delta Y)^{-1}$",1.0); continue; } if(*it == "vg_GridCoordinates") { // Spatial coordinates for each cell outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("vg_X",CellParams::XCRD,1)); + outputReducer.back()->setUnitMetaData("m","$\mathrm{m}$","$X_\mathrm{vg}$",1.0); outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("vg_Y",CellParams::YCRD,1)); + outputReducer.back()->setUnitMetaData("m","$\mathrm{m}$","$Y_\mathrm{vg}$",1.0); outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("vg_Z",CellParams::ZCRD,1)); + outputReducer.back()->setUnitMetaData("m","$\mathrm{m}$","$Z_\mathrm{vg}$",1.0); outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("vg_DX",CellParams::DX,1)); + outputReducer.back()->setUnitMetaData("m","$\mathrm{m}$","$\delta X_\mathrm{vg}$",1.0); outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("vg_DY",CellParams::DY,1)); + outputReducer.back()->setUnitMetaData("m","$\mathrm{m}$","$\delta Y_\mathrm{vg}$",1.0); outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("vg_DZ",CellParams::DZ,1)); + outputReducer.back()->setUnitMetaData("m","$\mathrm{m}$","$\delta Z_\mathrm{vg}$",1.0); continue; } if(*it == "fg_GridCoordinates") { @@ -685,6 +756,7 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti return retval; } )); + outputReducer.back()->setUnitMetaData("m","$\mathrm{m}$","$X_\mathrm{fg}$",1.0); outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fg_Y",[]( FsGrid< std::array, 2>& perBGrid, FsGrid< std::array, 2>& EGrid, @@ -711,6 +783,7 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti return retval; } )); + outputReducer.back()->setUnitMetaData("m","$\mathrm{m}$","$Y_\mathrm{fg}$",1.0); outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fg_Z",[]( FsGrid< std::array, 2>& perBGrid, FsGrid< std::array, 2>& EGrid, @@ -737,6 +810,7 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti return retval; } )); + outputReducer.back()->setUnitMetaData("m","$\mathrm{m}$","$Z_\mathrm{fg}$",1.0); outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fg_DX",[]( FsGrid< std::array, 2>& perBGrid, FsGrid< std::array, 2>& EGrid, @@ -763,6 +837,7 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti return retval; } )); + outputReducer.back()->setUnitMetaData("m","$\mathrm{m}$","$\delta X_\mathrm{fg}$",1.0); outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fg_DY",[]( FsGrid< std::array, 2>& perBGrid, FsGrid< std::array, 2>& EGrid, @@ -789,6 +864,7 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti return retval; } )); + outputReducer.back()->setUnitMetaData("m","$\mathrm{m}$","$\delta Y_\mathrm{fg}$",1.0); outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fg_DZ",[]( FsGrid< std::array, 2>& perBGrid, FsGrid< std::array, 2>& EGrid, @@ -815,10 +891,12 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti return retval; } )); + outputReducer.back()->setUnitMetaData("m","$\mathrm{m}$","$\delta Z_\mathrm{fg}$",1.0); continue; } if (*it == "MeshData") { outputReducer->addOperator(new DRO::VariableMeshData); + outputReducer.back()->setUnitMetaData("","","\mathrm{Mesh data}$",0.0); continue; } // After all the continue; statements one should never land here. @@ -966,6 +1044,18 @@ bool DataReducer::getDataVectorInfo(const unsigned int& operatorID,std::string& return operators[operatorID]->getDataVectorInfo(dataType,dataSize,vectorSize); } +/** Get metadata on the unit of data calculated by the given DataReductionOperator. + * @param operatorID ID number of the DataReductionOperator whose output unit metadata is requested. + * @param unit Physical unit of variable + * @param unitLaTeX Physical unit of variable, written using LaTeX notation + * @param unitConversion Floating point value of conversion factor to SI units + * @return If true, DataReductionOperator was found and it returned sensible values. + */ +bool DataReducer::getUnitMetadata(const unsigned int& operatorID,std::string& unit,std::string& unitLaTeX,std::string& variableLaTeX,Real& unitConversion) const { + if (operatorID >= operators.size()) return false; + return operators[operatorID]->getUnitMetadata(unit, unitLaTeX, variableLaTeX, unitConversion); +} + /** Ask a DataReductionOperator if it wants to take care of writing the data * to output file instead of letting be handled in iowrite.cpp. * @param operatorID ID number of the DataReductionOperator. diff --git a/datareduction/datareducer.h b/datareduction/datareducer.h index ac37d9546..c8b59ce6c 100644 --- a/datareduction/datareducer.h +++ b/datareduction/datareducer.h @@ -44,6 +44,8 @@ class DataReducer { bool addOperator(DRO::DataReductionOperator* op); bool getDataVectorInfo(const unsigned int& operatorID,std::string& dataType, unsigned int& dataSize,unsigned int& vectorSize) const; + bool getUnitMetadata(const unsigned int& operatorID,std::string& unit, std::string& unitLaTeX, std::string& variableLaTeX, Real& unitConversion); + std::string getName(const unsigned int& operatorID) const; bool handlesWriting(const unsigned int& operatorID) const; bool hasParameters(const unsigned int& operatorID) const; diff --git a/datareduction/datareductionoperator.cpp b/datareduction/datareductionoperator.cpp index 0c4774cab..b4c4be605 100644 --- a/datareduction/datareductionoperator.cpp +++ b/datareduction/datareductionoperator.cpp @@ -70,7 +70,6 @@ namespace DRO { return false; } - DataReductionOperatorCellParams::DataReductionOperatorCellParams(const std::string& name,const unsigned int parameterIndex,const unsigned int _vectorSize): DataReductionOperator() { vectorSize=_vectorSize; @@ -85,7 +84,7 @@ namespace DRO { _vectorSize = vectorSize; return true; } - + std::string DataReductionOperatorCellParams::getName() const {return variableName;} bool DataReductionOperatorCellParams::reduceData(const SpatialCell* cell,char* buffer) { @@ -129,7 +128,6 @@ namespace DRO { bool DataReductionOperatorFsGrid::setSpatialCell(const SpatialCell* cell) { return true; } - bool DataReductionOperatorFsGrid::writeFsGridData( FsGrid< std::array, 2>& perBGrid, diff --git a/datareduction/datareductionoperator.h b/datareduction/datareductionoperator.h index ce5154afa..a6cd363e7 100644 --- a/datareduction/datareductionoperator.h +++ b/datareduction/datareductionoperator.h @@ -57,13 +57,26 @@ namespace DRO { virtual ~DataReductionOperator(); virtual bool getDataVectorInfo(std::string& dataType,unsigned int& dataSize,unsigned int& vectorSize) const = 0; + virtual bool getUnitMetadata(std::string& _unit,std::string& _unitLaTeX,std::string& _variableLaTeX,Real _unitConversion) { + unit = _unit; + unitLaTeX = _unitLaTeX; + unitConversion = _unitConversion; + variableLaTeX = _variableLaTeX; + return true; + }; + virtual bool setUnitMetadata(std::string& _unit,std::string& _unitLaTeX,std::string& _variableLaTeX,Real _unitConversion) unit(_unit),unitLaTeX(_unitLaTeX),unitConversion(_unitConversion),variableLaTeX(_variableLaTeX) const = 0; + virtual std::string getName() const = 0; virtual bool reduceData(const SpatialCell* cell,char* buffer); virtual bool reduceDiagnostic(const SpatialCell* cell,Real * result); virtual bool setSpatialCell(const SpatialCell* cell) = 0; protected: - + std::string _unit; + std::string _unitLaTeX; + std::string _variableLaTeX; + Real _unitConversion; + }; class DataReductionOperatorHandlesWriting: public DataReductionOperator { diff --git a/iowrite.cpp b/iowrite.cpp index d412a2b2e..9876da6fd 100644 --- a/iowrite.cpp +++ b/iowrite.cpp @@ -318,8 +318,9 @@ bool writeDataReducer(const dccrg::Dccrg& int dataReducerIndex, Writer& vlsvWriter){ map attribs; - string variableName,dataType; + string variableName,dataType,unitString,unitStringLaTeX, variableStringLaTeX; bool success=true; + Real unitConversionFactor; const string meshName = "SpatialGrid"; variableName = dataReducer.getName(dataReducerIndex); @@ -342,7 +343,18 @@ bool writeDataReducer(const dccrg::Dccrg& phiprof::stop("DRO_"+variableName); return false; } - + + // Request variable unit metadata: unit, latex-formatted unit, and conversion factor to SI + if (dataReducer.getUnitMetadata(dataReducerIndex,unitString,unitStringLaTeX,variableStringLaTeX, unitConversionFactor) == false) { + cerr << "ERROR when requesting unit metadata from DRO " << dataReducerIndex << endl; + phiprof::stop("DRO_"+variableName); + return false; + } + attribs["unit"]=unitString; + attribs["unitLaTeX"]=unitStringLaTeX; + attribs["unitConversion"]=unitConversionFactor; + attribs["variableLaTeX"]=variableStringLaTeX; + // If DRO has a vector size of 0 it means this DRO should not write out anything. This is used e.g. for DROs we want only for certain populations. if (vectorSize == 0) { phiprof::stop("DRO_"+variableName); From c6869e3330d78531d72d74582f33057eb4804b50 Mon Sep 17 00:00:00 2001 From: Markus Battarbee Date: Mon, 20 May 2019 16:18:01 +0300 Subject: [PATCH 469/602] Metadata construction compiles --- datareduction/datareducer.cpp | 167 +++++++++++++++----------- datareduction/datareducer.h | 3 +- datareduction/datareductionoperator.h | 18 ++- iowrite.cpp | 2 +- 4 files changed, 111 insertions(+), 79 deletions(-) diff --git a/datareduction/datareducer.cpp b/datareduction/datareducer.cpp index 691ac7b0e..809f94080 100644 --- a/datareduction/datareducer.cpp +++ b/datareduction/datareducer.cpp @@ -74,7 +74,7 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti return retval; } )); - outputReducer.back()->setUnitMetaData("T","$\mathrm{T}$","$B$",1.0); + outputReducer->addMetadata(outputReducer->size()-1,"T","$\\mathrm{T}$","$B$",1.0); continue; } if(*it == "fg_BackgroundB" || *it == "BackgroundB") { // Static (typically dipole) magnetic field part @@ -106,7 +106,7 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti return retval; } )); - outputReducer.back()->setUnitMetaData("T","$\mathrm{T}$","$B_\mathrm{bg}$",1.0); + outputReducer->addMetadata(outputReducer->size()-1,"T","$\\mathrm{T}$","$B_\\mathrm{bg}$",1.0); continue; } if(*it == "fg_PerturbedB" || *it == "PerturbedB") { // Fluctuating magnetic field part @@ -138,7 +138,7 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti return retval; } )); - outputReducer.back()->setUnitMetaData("T","$\mathrm{T}$","$B_\mathrm{per}$",1.0); + outputReducer->addMetadata(outputReducer->size()-1,"T","$\\mathrm{T}$","$B_\\mathrm{per}$)",1.0); continue; } if(*it == "fg_E" || *it== "E") { // Bulk electric field at Yee-lattice locations @@ -170,12 +170,12 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti return retval; } )); - outputReducer.back()->setUnitMetaData("V/m","$\mathrm{V}\,\mathrm{m}^{-1}$","$E$",1.0); + outputReducer->addMetadata(outputReducer->size()-1,"V/m","$\\mathrm{V}\\,\\mathrm{m}^{-1}$","$E$",1.0); continue; } if(*it == "vg_Rhom" || *it == "Rhom") { // Overall mass density (summed over all populations) outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("rhom",CellParams::RHOM,1)); - outputReducer.back()->setUnitMetaData("kg/m^3","$\mathrm{kg}\,\mathrm{m}^{-3}$","$\rho_\mathrm{m}$",1.0); + outputReducer->addMetadata(outputReducer->size()-1,"kg/m^3","$\\mathrm{kg}\\,\\mathrm{m}^{-3}$","$\\rho_\\mathrm{m}$",1.0); continue; } if(*it == "fg_Rhom") { // Overall mass density (summed over all populations) @@ -205,12 +205,12 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti return retval; } )); - outputReducer.back()->setUnitMetaData("kg/m^3","$\mathrm{kg}\,\mathrm{m}^{-3}$","$\rho_\mathrm{m}$",1.0); + outputReducer->addMetadata(outputReducer->size()-1,"kg/m^3","$\\mathrm{kg}\\,\\mathrm{m}^{-3}$","$\\rho_\\mathrm{m}$",1.0); continue; } if(*it == "vg_Rhoq" || *it == "Rhoq") { // Overall charge density (summed over all populations) outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("rhoq",CellParams::RHOQ,1)); - outputReducer.back()->setUnitMetaData("C/m^3","$\mathrm{C}\,\mathrm{m}^{-3}$","$\rho_\mathrm{q}$",1.0); + outputReducer->addMetadata(outputReducer->size()-1,"C/m^3","$\\mathrm{C}\\,\\mathrm{m}^{-3}$","$\\rho_\\mathrm{q}$",1.0); continue; } if(*it == "fg_Rhoq") { // Overall charge density (summed over all populations) @@ -240,7 +240,7 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti return retval; } )); - outputReducer.back()->setUnitMetaData("C/m^3","$\mathrm{C}\,\mathrm{m}^{-3}$","$\rho_\mathrm{q}$",1.0); + outputReducer->addMetadata(outputReducer->size()-1,"C/m^3","$\\mathrm{C}\\,\\mathrm{m}^{-3}$","$\\rho_\\mathrm{q}$",1.0); continue; } if(*it == "populations_Rho") { // Per-population particle number density @@ -248,14 +248,14 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti species::Species& species=getObjectWrapper().particleSpecies[i]; const std::string& pop = species.name; outputReducer->addOperator(new DRO::DataReductionOperatorPopulations(pop + "/rho", i, offsetof(spatial_cell::Population, RHO), 1)); - outputReducer.back()->setUnitMetaData("1/m^3","$\mathrm{m}^{-3}$","$\n_\mathrm{"+pop+"}$",1.0); + outputReducer->addMetadata(outputReducer->size()-1,"1/m^3","$\\mathrm{m}^{-3}$","$\\n_\\mathrm{"+pop+"}$",1.0); } continue; } if(*it == "V" || *it == "vg_V") { // Overall effective bulk density defining the center-of-mass frame from all populations outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("V",CellParams::VX,3)); - outputReducer.back()->setUnitMetaData("m/s","$\mathrm{m}\,\mathrm{s}^{-1}$","$V$",1.0); + outputReducer->addMetadata(outputReducer->size()-1,"m/s","$\\mathrm{m}\\,\\mathrm{s}^{-1}$","$V$",1.0); continue; } if(*it == "fg_V") { // Overall effective bulk density defining the center-of-mass frame from all populations @@ -287,7 +287,7 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti return retval; } )); - outputReducer.back()->setUnitMetaData("m/s","$\mathrm{m}\,\mathrm{s}^{-1}$","$V$",1.0); + outputReducer->addMetadata(outputReducer->size()-1,"m/s","$\\mathrm{m}\\,\\mathrm{s}^{-1}$","$V$",1.0); continue; } if(*it == "populations_V") { // Per population bulk velocities @@ -295,41 +295,47 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti species::Species& species=getObjectWrapper().particleSpecies[i]; const std::string& pop = species.name; outputReducer->addOperator(new DRO::DataReductionOperatorPopulations(pop + "/V", i, offsetof(spatial_cell::Population, V), 3)); - outputReducer.back()->setUnitMetaData("m/s","$\mathrm{m}\,\mathrm{s}^{-1}$","$V_\mathrm{"+pop+"}$",1.0); + outputReducer->addMetadata(outputReducer->size()-1,"m/s","$\\mathrm{m}\\,\\mathrm{s}^{-1}$","$V_\\mathrm{"+pop+"}$",1.0); } continue; } if(*it == "populations_moments_Backstream") { // Per-population moments of the backstreaming part for(unsigned int i =0; i < getObjectWrapper().particleSpecies.size(); i++) { + species::Species& species=getObjectWrapper().particleSpecies[i]; + const std::string& pop = species.name; outputReducer->addOperator(new DRO::VariableRhoBackstream(i)); - outputReducer.back()->setUnitMetaData("1/m^3","$\mathrm{m}^{-3}$","$n_\mathrm{"+pop+",st}$",1.0); + outputReducer->addMetadata(outputReducer->size()-1,"1/m^3","$\\mathrm{m}^{-3}$","$n_\\mathrm{"+pop+",st}$",1.0); outputReducer->addOperator(new DRO::VariableVBackstream(i)); - outputReducer.back()->setUnitMetaData("m/s","$\mathrm{m}\,\mathrm{s}^{-1}$","$V_\mathrm{"+pop+",st}$",1.0); + outputReducer->addMetadata(outputReducer->size()-1,"m/s","$\\mathrm{m}\\,\\mathrm{s}^{-1}$","$V_\\mathrm{"+pop+",st}$",1.0); outputReducer->addOperator(new DRO::VariablePTensorBackstreamDiagonal(i)); - outputReducer.back()->setUnitMetaData("Pa","$\mathrm{Pa}$","$\mathcal{P}_\mathrm{"+pop+",st}$",1.0); + outputReducer->addMetadata(outputReducer->size()-1,"Pa","$\\mathrm{Pa}$","$\\mathcal{P}_\\mathrm{"+pop+",st}$",1.0); outputReducer->addOperator(new DRO::VariablePTensorBackstreamOffDiagonal(i)); - outputReducer.back()->setUnitMetaData("Pa","$\mathrm{Pa}$","$\mathcal{\tilde{P}}_\mathrm{"+pop+",st}$",1.0); + outputReducer->addMetadata(outputReducer->size()-1,"Pa","$\\mathrm{Pa}$","$\\mathcal{\\tilde{P}}_\\mathrm{"+pop+",st}$",1.0); } continue; } if(*it == "populations_moments_NonBackstream") { // Per-population moments of the non-backstreaming (thermal?) part. for(unsigned int i =0; i < getObjectWrapper().particleSpecies.size(); i++) { + species::Species& species=getObjectWrapper().particleSpecies[i]; + const std::string& pop = species.name; outputReducer->addOperator(new DRO::VariableRhoNonBackstream(i)); - outputReducer.back()->setUnitMetaData("1/m^3","$\mathrm{m}^{-3}$","$n_\mathrm{"+pop+",th}$",1.0); + outputReducer->addMetadata(outputReducer->size()-1,"1/m^3","$\\mathrm{m}^{-3}$","$n_\\mathrm{"+pop+",th}$",1.0); outputReducer->addOperator(new DRO::VariableVNonBackstream(i)); - outputReducer.back()->setUnitMetaData("m/s","$\mathrm{m}\,\mathrm{s}^{-1}$","$V_\mathrm{"+pop+",th}$",1.0); + outputReducer->addMetadata(outputReducer->size()-1,"m/s","$\\mathrm{m}\\,\\mathrm{s}^{-1}$","$V_\\mathrm{"+pop+",th}$",1.0); outputReducer->addOperator(new DRO::VariablePTensorNonBackstreamDiagonal(i)); - outputReducer.back()->setUnitMetaData("Pa","$\mathrm{Pa}$","$\mathcal{P}_\mathrm{"+pop+",th}$",1.0); + outputReducer->addMetadata(outputReducer->size()-1,"Pa","$\\mathrm{Pa}$","$\\mathcal{P}_\\mathrm{"+pop+",th}$",1.0); outputReducer->addOperator(new DRO::VariablePTensorNonBackstreamOffDiagonal(i)); - outputReducer.back()->setUnitMetaData("Pa","$\mathrm{Pa}$","$\mathcal{\tilde{P}}_\mathrm{"+pop+",th}$",1.0); + outputReducer->addMetadata(outputReducer->size()-1,"Pa","$\\mathrm{Pa}$","$\\mathcal{\\tilde{P}}_\\mathrm{"+pop+",th}$",1.0); } continue; } if(*it == "populations_MinValue" || *it == "populations_EffectiveSparsityThreshold") { // Effective sparsity threshold affecting each cell, if dynamic threshould algorithm is used for(unsigned int i =0; i < getObjectWrapper().particleSpecies.size(); i++) { + species::Species& species=getObjectWrapper().particleSpecies[i]; + const std::string& pop = species.name; outputReducer->addOperator(new DRO::VariableEffectiveSparsityThreshold(i)); - outputReducer.back()->setUnitMetaData("s^3/m^6","$\mathrm{m}^{-6}\,\mathrm{s}^{3}$","$f_\mathrm{"+pop+",min}$",1.0); + outputReducer->addMetadata(outputReducer->size()-1,"s^3/m^6","$\\mathrm{m}^{-6}\\,\\mathrm{s}^{3}$","$f_\\mathrm{"+pop+",min}$",1.0); } continue; } @@ -339,20 +345,20 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti species::Species& species=getObjectWrapper().particleSpecies[i]; const std::string& pop = species.name; outputReducer->addOperator(new DRO::DataReductionOperatorPopulations(pop + "/rho_loss_adjust", i, offsetof(spatial_cell::Population, RHOLOSSADJUST), 1)); - outputReducer.back()->setUnitMetaData("1/m^3","$\mathrm{m}^{-3}$","$\Delta_\mathrm{loss} n_\mathrm{"+pop+"}$",1.0); + outputReducer->addMetadata(outputReducer->size()-1,"1/m^3","$\\mathrm{m}^{-3}$","$\\Delta_\\mathrm{loss} n_\\mathrm{"+pop+"}$",1.0); } continue; } if(*it == "LBweight" || *it == "vg_LBweight") { // Load balance metric for LB debugging outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("LB_weight",CellParams::LBWEIGHTCOUNTER,1)); - outputReducer.back()->setUnitMetaData("","","$\mathrm{LB weight}$",0.0); + outputReducer->addMetadata(outputReducer->size()-1,"","","$\\mathrm{LB weight}$",0.0); continue; } if(*it == "MaxVdt") { // Overall maximum timestep constraint as calculated by the velocity space vlasov update outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("max_v_dt",CellParams::MAXVDT,1)); - outputReducer.back()->setUnitMetaData("s","$\mathrm{s}$","$\Delta t_\mathrm{V,max}$",1.0); + outputReducer->addMetadata(outputReducer->size()-1,"s","$\\mathrm{s}$","$\\Delta t_\\mathrm{V,max}$",1.0); continue; } if(*it == "populations_MaxVdt") { @@ -361,14 +367,14 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti species::Species& species=getObjectWrapper().particleSpecies[i]; const std::string& pop = species.name; outputReducer->addOperator(new DRO::DataReductionOperatorPopulations(pop + "/MaxVdt", i, offsetof(spatial_cell::Population, max_dt[1]), 1)); - outputReducer.back()->setUnitMetaData("s","$\mathrm{s}$","$\Delta t_\mathrm{"+pop+",V,max}$",1.0); + outputReducer->addMetadata(outputReducer->size()-1,"s","$\\mathrm{s}$","$\\Delta t_\\mathrm{"+pop+",V,max}$",1.0); } continue; } if(*it == "MaxRdt") { // Overall maximum timestep constraint as calculated by the real space vlasov update outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("max_r_dt",CellParams::MAXRDT,1)); - outputReducer.back()->setUnitMetaData("s","$\mathrm{s}$","$\Delta t_\mathrm{R,max}$",1.0); + outputReducer->addMetadata(outputReducer->size()-1,"s","$\\mathrm{s}$","$\\Delta t_\\mathrm{R,max}$",1.0); continue; } if(*it == "populations_MaxRdt") { @@ -377,21 +383,23 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti species::Species& species=getObjectWrapper().particleSpecies[i]; const std::string& pop = species.name; outputReducer->addOperator(new DRO::DataReductionOperatorPopulations(pop + "/MaxRdt", i, offsetof(spatial_cell::Population, max_dt[0]), 1)); - outputReducer.back()->setUnitMetaData("s","$\mathrm{s}$","$\Delta t_\mathrm{"+pop+",R,max}$",1.0); + outputReducer->addMetadata(outputReducer->size()-1,"s","$\\mathrm{s}$","$\\Delta t_\\mathrm{"+pop+",R,max}$",1.0); } continue; } if(*it == "populations_EnergyDensity") { // Per-population energy density in three energy ranges for(unsigned int i =0; i < getObjectWrapper().particleSpecies.size(); i++) { + species::Species& species=getObjectWrapper().particleSpecies[i]; + const std::string& pop = species.name; outputReducer->addOperator(new DRO::VariableEnergyDensity(i)); - outputReducer.back()->setUnitMetaData("eV/cm^3","$\mathrm{eV}\,\mathrm{cm}^{-3}$","$U_\mathrm{"+pop+"}$",(1.0e-6)/physicalconstants::CHARGE); + outputReducer->addMetadata(outputReducer->size()-1,"eV/cm^3","$\\mathrm{eV}\\,\\mathrm{cm}^{-3}$","$U_\\mathrm{"+pop+"}$",(1.0e-6)/physicalconstants::CHARGE); } continue; } // For precipitation: - // outputReducer.back()->setUnitMetaData("1/(cm^2 sr s eV)","$\mathrm{cm}^{-2}\,\mathrm{sr}^{-1}\,\mathrm{s}^{-1}\,\mathrm{eV}^{-1}$","$\mathcal{F}_\mathrm{"+pop+"}$",(1.0e-4)*physicalconstants::CHARGE); + // outputReducer->addMetadata(outputReducer->size()-1,"1/(cm^2 sr s eV)","$\\mathrm{cm}^{-2}\\,\\mathrm{sr}^{-1}\\,\\mathrm{s}^{-1}\\,\\mathrm{eV}^{-1}$","$\\mathcal{F}_\\mathrm{"+pop+"}$",(1.0e-4)*physicalconstants::CHARGE); if(*it == "MaxFieldsdt" || *it == "fg_MaxFieldsdt") { // Maximum timestep constraint as calculated by the fieldsolver outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("MaxFieldsdt",[]( @@ -420,13 +428,13 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti return retval; } )); - outputReducer.back()->setUnitMetaData("s","$\mathrm{s}$","$\Delta t_\mathrm{f,max}$",1.0); + outputReducer->addMetadata(outputReducer->size()-1,"s","$\\mathrm{s}$","$\\Delta t_\\mathrm{f,max}$",1.0); continue; } if(*it == "MPIrank" || *it == "vg_rank") { // Map of spatial decomposition of the DCCRG grid into MPI ranks outputReducer->addOperator(new DRO::MPIrank); - outputReducer.back()->setUnitMetaData("","","$\mathrm{MPI rank}$",0.0); + outputReducer->addMetadata(outputReducer->size()-1,"","","$\\mathrm{MPI rank}$",0.0); continue; } if(*it == "FsGridRank" || *it == "fg_rank") { @@ -448,13 +456,13 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti return retval; } )); - outputReducer.back()->setUnitMetaData("","","$\mathrm{fGrid rank}$",0.0); + outputReducer->addMetadata(outputReducer->size()-1,"","","$\\mathrm{fGrid rank}$",0.0); continue; } if(*it == "BoundaryType" || *it == "vg_BoundaryType") { // Type of boundarycells outputReducer->addOperator(new DRO::BoundaryType); - outputReducer.back()->setUnitMetaData("","","$\mathrm{vGrid Boundary type}$",0.0); + outputReducer->addMetadata(outputReducer->size()-1,"","","$\\mathrm{vGrid Boundary type}$",0.0); continue; } if(*it == "fg_BoundaryType") { @@ -485,13 +493,13 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti return retval; } )); - outputReducer.back()->setUnitMetaData("","","$\mathrm{fGrid Boundary type}$",0.0); + outputReducer->addMetadata(outputReducer->size()-1,"","","$\\mathrm{fGrid Boundary type}$",0.0); continue; } if(*it == "BoundaryLayer" || *it == "vg_BoundaryLayer") { // For boundaries with multiple layers: layer count per cell outputReducer->addOperator(new DRO::BoundaryLayer); - outputReducer.back()->setUnitMetaData("","","$\mathrm{vGrid Boundary layer}$",0.0); + outputReducer->addMetadata(outputReducer->size()-1,"","","$\\mathrm{vGrid Boundary layer}$",0.0); continue; } if(*it == "fg_BoundaryLayer") { @@ -522,21 +530,23 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti return retval; } )); - outputReducer.back()->setUnitMetaData("","","$\mathrm{fGrid Boundary layer}$",0.0); + outputReducer->addMetadata(outputReducer->size()-1,"","","$\\mathrm{fGrid Boundary layer}$",0.0); continue; } if (*it == "populations_Blocks") { // Per-population velocity space block counts for(unsigned int i =0; i < getObjectWrapper().particleSpecies.size(); i++) { + species::Species& species=getObjectWrapper().particleSpecies[i]; + const std::string& pop = species.name; outputReducer->addOperator(new DRO::Blocks(i)); - outputReducer.back()->setUnitMetaData("","","$\mathrm{"pop+" blocks}$",0.0); + outputReducer->addMetadata(outputReducer->size()-1,"","","$\\mathrm{"+pop+" blocks}$",0.0); } continue; } if(*it == "fSaved") { // Boolean marker whether a velocity space is saved in a given spatial cell outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("fSaved",CellParams::ISCELLSAVINGF,1)); - outputReducer.back()->setUnitMetaData("","","$f(v)_\mathrm{"+pop+" saved}$",0.0); + outputReducer->addMetadata(outputReducer->size()-1,"","","$f(v)_\\mathrm{saved}$",0.0); continue; } if(*it == "populations_accSubcycles") { @@ -545,14 +555,14 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti species::Species& species=getObjectWrapper().particleSpecies[i]; const std::string& pop = species.name; outputReducer->addOperator(new DRO::DataReductionOperatorPopulations(pop + "/acc_subcycles", i, offsetof(spatial_cell::Population, ACCSUBCYCLES), 1)); - outputReducer.back()->setUnitMetaData("","","$\mathrm{"pop+" Acc subcycles}$",0.0); + outputReducer->addMetadata(outputReducer->size()-1,"","","$\\mathrm{"+pop+" Acc subcycles}$",0.0); } continue; } if(*it == "VolE" || *it == "vg_VolE") { // Volume-averaged E field outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("E_vol",CellParams::EXVOL,3)); - outputReducer.back()->setUnitMetaData("V/m","$\mathrm{V}\,\mathrm{m}^{-1}$","$E_\mathrm{vol,vg}$",1.0); + outputReducer->addMetadata(outputReducer->size()-1,"V/m","$\\mathrm{V}\\,\\mathrm{m}^{-1}$","$E_\\mathrm{vol,vg}$",1.0); continue; } if(*it == "fg_VolE") { @@ -584,24 +594,24 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti return retval; } )); - outputReducer.back()->setUnitMetaData("V/m","$\mathrm{V}\,\mathrm{m}^{-1}$","$E_\mathrm{vol,fg}$",1.0); + outputReducer->addMetadata(outputReducer->size()-1,"V/m","$\\mathrm{V}\\,\\mathrm{m}^{-1}$","$E_\\mathrm{vol,fg}$",1.0); continue; } if(*it == "fg_HallE") { // TODO: Add outputreducer to get EHALL from fsgrid - // outputReducer.back()->setUnitMetaData("V/m","$\mathrm{V}\,\mathrm{m}^{-1}$","$E_\mathrm{Hall}$",1.0); + // outputReducer->addMetadata(outputReducer->size()-1,"V/m","$\\mathrm{V}\\,\\mathrm{m}^{-1}$","$E_\\mathrm{Hall}$",1.0); continue; } if(*it =="GradPeE") { // Electron pressure gradient contribution to the generalized ohm's law outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("EGRADPE",CellParams::EXGRADPE,3)); - outputReducer.back()->setUnitMetaData("V/m","$\mathrm{V}\,\mathrm{m}^{-1}$","$E_{\del P_\mathrm{e}}$",1.0); + outputReducer->addMetadata(outputReducer->size()-1,"V/m","$\\mathrm{V}\\,\\mathrm{m}^{-1}$","$E_{\\del P_\\mathrm{e}}$",1.0); continue; } if(*it == "VolB" || *it == "vg_VolB") { // Volume-averaged magnetic field outputReducer->addOperator(new DRO::VariableBVol); - outputReducer.back()->setUnitMetaData("T","$\mathrm{T}$","$B_\mathrm{vol,vg}$",1.0); + outputReducer->addMetadata(outputReducer->size()-1,"T","$\\mathrm{T}$","$B_\\mathrm{vol,vg}$",1.0); continue; } if(*it == "fg_VolB") { // Static (typically dipole) magnetic field part @@ -636,23 +646,23 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti return retval; } )); - outputReducer.back()->setUnitMetaData("T","$\mathrm{T}$","$B_\mathrm{vol,fg}$",1.0); + outputReducer->addMetadata(outputReducer->size()-1,"T","$\\mathrm{T}$","$B_\\mathrm{vol,fg}$",1.0); continue; } if(*it == "BackgroundVolB") { outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("BGB_vol",CellParams::BGBXVOL,3)); - outputReducer.back()->setUnitMetaData("T","$\mathrm{T}$","$B_\mathrm{vol,vg,bg}$",1.0); + outputReducer->addMetadata(outputReducer->size()-1,"T","$\\mathrm{T}$","$B_\\mathrm{vol,vg,bg}$",1.0); continue; } if(*it == "PerturbedVolB") { outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("PERB_vol",CellParams::PERBXVOL,3)); - outputReducer.back()->setUnitMetaData("T","$\mathrm{T}$","$B_\mathrm{vol,vg,per}$",1.0); + outputReducer->addMetadata(outputReducer->size()-1,"T","$\\mathrm{T}$","$B_\\mathrm{vol,vg,per}$",1.0); continue; } if(*it == "Pressure" || *it== "vg_Pressure") { // Overall scalar pressure from all populations outputReducer->addOperator(new DRO::VariablePressureSolver); - outputReducer.back()->setUnitMetaData("Pa","$\mathrm{Pa}$","$P_\mathrm{solver}$",1.0); + outputReducer->addMetadata(outputReducer->size()-1,"Pa","$\\mathrm{Pa}$","$P_\\mathrm{solver}$",1.0); continue; } if(*it == "fg_Pressure") { @@ -684,49 +694,51 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti return retval; } )); - outputReducer.back()->setUnitMetaData("Pa","$\mathrm{Pa}$","$P_\mathrm{fg}$",1.0); + outputReducer->addMetadata(outputReducer->size()-1,"Pa","$\\mathrm{Pa}$","$P_\\mathrm{fg}$",1.0); continue; } if(*it == "populations_PTensor") { // Per-population pressure tensor, stored as diagonal and offdiagonal components for(unsigned int i =0; i < getObjectWrapper().particleSpecies.size(); i++) { + species::Species& species=getObjectWrapper().particleSpecies[i]; + const std::string& pop = species.name; outputReducer->addOperator(new DRO::VariablePTensorDiagonal(i)); - outputReducer.back()->setUnitMetaData("Pa","$\mathrm{Pa}$","$\mathcal{P}_\mathrm{"+pop+"}$",1.0); + outputReducer->addMetadata(outputReducer->size()-1,"Pa","$\\mathrm{Pa}$","$\\mathcal{P}_\\mathrm{"+pop+"}$",1.0); outputReducer->addOperator(new DRO::VariablePTensorOffDiagonal(i)); - outputReducer.back()->setUnitMetaData("Pa","$\mathrm{Pa}$","$\mathcal{\tilde{P}}_\mathrm{"+pop+"}$",1.0); + outputReducer->addMetadata(outputReducer->size()-1,"Pa","$\\mathrm{Pa}$","$\\mathcal{\\tilde{P}}_\\mathrm{"+pop+"}$",1.0); } continue; } if(*it == "BVOLderivs") { // Volume-averaged derivatives outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dPERBXVOLdy",bvolderivatives::dPERBXVOLdy,1)); - outputReducer.back()->setUnitMetaData("T/m","$\mathrm{T}\,\mathrm{m}^{-1}$","$\Delta B_{X,\mathrm{per,vol,vg}} (\Delta Y)^{-1}$",1.0); + outputReducer->addMetadata(outputReducer->size()-1,"T/m","$\\mathrm{T}\\,\\mathrm{m}^{-1}$","$\\Delta B_{X,\\mathrm{per,vol,vg}} (\\Delta Y)^{-1}$",1.0); outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dPERBXVOLdz",bvolderivatives::dPERBXVOLdz,1)); - outputReducer.back()->setUnitMetaData("T/m","$\mathrm{T}\,\mathrm{m}^{-1}$","$\Delta B_{X,\mathrm{per,vol,vg}} (\Delta Z)^{-1}$",1.0); + outputReducer->addMetadata(outputReducer->size()-1,"T/m","$\\mathrm{T}\\,\\mathrm{m}^{-1}$","$\\Delta B_{X,\\mathrm{per,vol,vg}} (\\Delta Z)^{-1}$",1.0); outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dPERBYVOLdx",bvolderivatives::dPERBYVOLdx,1)); - outputReducer.back()->setUnitMetaData("T/m","$\mathrm{T}\,\mathrm{m}^{-1}$","$\Delta B_{Y,\mathrm{per,vol,vg}} (\Delta X)^{-1}$",1.0); + outputReducer->addMetadata(outputReducer->size()-1,"T/m","$\\mathrm{T}\\,\\mathrm{m}^{-1}$","$\\Delta B_{Y,\\mathrm{per,vol,vg}} (\\Delta X)^{-1}$",1.0); outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dPERBYVOLdz",bvolderivatives::dPERBYVOLdz,1)); - outputReducer.back()->setUnitMetaData("T/m","$\mathrm{T}\,\mathrm{m}^{-1}$","$\Delta B_{Y,\mathrm{per,vol,vg}} (\Delta Z)^{-1}$",1.0); + outputReducer->addMetadata(outputReducer->size()-1,"T/m","$\\mathrm{T}\\,\\mathrm{m}^{-1}$","$\\Delta B_{Y,\\mathrm{per,vol,vg}} (\\Delta Z)^{-1}$",1.0); outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dPERBZVOLdx",bvolderivatives::dPERBZVOLdx,1)); - outputReducer.back()->setUnitMetaData("T/m","$\mathrm{T}\,\mathrm{m}^{-1}$","$\Delta B_{Z,\mathrm{per,vol,vg}} (\Delta X)^{-1}$",1.0); + outputReducer->addMetadata(outputReducer->size()-1,"T/m","$\\mathrm{T}\\,\\mathrm{m}^{-1}$","$\\Delta B_{Z,\\mathrm{per,vol,vg}} (\\Delta X)^{-1}$",1.0); outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dPERBZVOLdy",bvolderivatives::dPERBZVOLdy,1)); - outputReducer.back()->setUnitMetaData("T/m","$\mathrm{T}\,\mathrm{m}^{-1}$","$\Delta B_{Z,\mathrm{per,vol,vg}} (\Delta Y)^{-1}$",1.0); + outputReducer->addMetadata(outputReducer->size()-1,"T/m","$\\mathrm{T}\\,\\mathrm{m}^{-1}$","$\\Delta B_{Z,\\mathrm{per,vol,vg}} (\\Delta Y)^{-1}$",1.0); continue; } if(*it == "vg_GridCoordinates") { // Spatial coordinates for each cell outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("vg_X",CellParams::XCRD,1)); - outputReducer.back()->setUnitMetaData("m","$\mathrm{m}$","$X_\mathrm{vg}$",1.0); + outputReducer->addMetadata(outputReducer->size()-1,"m","$\\mathrm{m}$","$X_\\mathrm{vg}$",1.0); outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("vg_Y",CellParams::YCRD,1)); - outputReducer.back()->setUnitMetaData("m","$\mathrm{m}$","$Y_\mathrm{vg}$",1.0); + outputReducer->addMetadata(outputReducer->size()-1,"m","$\\mathrm{m}$","$Y_\\mathrm{vg}$",1.0); outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("vg_Z",CellParams::ZCRD,1)); - outputReducer.back()->setUnitMetaData("m","$\mathrm{m}$","$Z_\mathrm{vg}$",1.0); + outputReducer->addMetadata(outputReducer->size()-1,"m","$\\mathrm{m}$","$Z_\\mathrm{vg}$",1.0); outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("vg_DX",CellParams::DX,1)); - outputReducer.back()->setUnitMetaData("m","$\mathrm{m}$","$\delta X_\mathrm{vg}$",1.0); + outputReducer->addMetadata(outputReducer->size()-1,"m","$\\mathrm{m}$","$\\delta X_\\mathrm{vg}$",1.0); outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("vg_DY",CellParams::DY,1)); - outputReducer.back()->setUnitMetaData("m","$\mathrm{m}$","$\delta Y_\mathrm{vg}$",1.0); + outputReducer->addMetadata(outputReducer->size()-1,"m","$\\mathrm{m}$","$\\delta Y_\\mathrm{vg}$",1.0); outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("vg_DZ",CellParams::DZ,1)); - outputReducer.back()->setUnitMetaData("m","$\mathrm{m}$","$\delta Z_\mathrm{vg}$",1.0); + outputReducer->addMetadata(outputReducer->size()-1,"m","$\\mathrm{m}$","$\\delta Z_\\mathrm{vg}$",1.0); continue; } if(*it == "fg_GridCoordinates") { @@ -756,7 +768,7 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti return retval; } )); - outputReducer.back()->setUnitMetaData("m","$\mathrm{m}$","$X_\mathrm{fg}$",1.0); + outputReducer->addMetadata(outputReducer->size()-1,"m","$\\mathrm{m}$","$X_\\mathrm{fg}$",1.0); outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fg_Y",[]( FsGrid< std::array, 2>& perBGrid, FsGrid< std::array, 2>& EGrid, @@ -783,7 +795,7 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti return retval; } )); - outputReducer.back()->setUnitMetaData("m","$\mathrm{m}$","$Y_\mathrm{fg}$",1.0); + outputReducer->addMetadata(outputReducer->size()-1,"m","$\\mathrm{m}$","$Y_\\mathrm{fg}$",1.0); outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fg_Z",[]( FsGrid< std::array, 2>& perBGrid, FsGrid< std::array, 2>& EGrid, @@ -810,7 +822,7 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti return retval; } )); - outputReducer.back()->setUnitMetaData("m","$\mathrm{m}$","$Z_\mathrm{fg}$",1.0); + outputReducer->addMetadata(outputReducer->size()-1,"m","$\\mathrm{m}$","$Z_\\mathrm{fg}$",1.0); outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fg_DX",[]( FsGrid< std::array, 2>& perBGrid, FsGrid< std::array, 2>& EGrid, @@ -837,7 +849,7 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti return retval; } )); - outputReducer.back()->setUnitMetaData("m","$\mathrm{m}$","$\delta X_\mathrm{fg}$",1.0); + outputReducer->addMetadata(outputReducer->size()-1,"m","$\\mathrm{m}$","$\\delta X_\\mathrm{fg}$",1.0); outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fg_DY",[]( FsGrid< std::array, 2>& perBGrid, FsGrid< std::array, 2>& EGrid, @@ -864,7 +876,7 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti return retval; } )); - outputReducer.back()->setUnitMetaData("m","$\mathrm{m}$","$\delta Y_\mathrm{fg}$",1.0); + outputReducer->addMetadata(outputReducer->size()-1,"m","$\\mathrm{m}$","$\\delta Y_\\mathrm{fg}$",1.0); outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fg_DZ",[]( FsGrid< std::array, 2>& perBGrid, FsGrid< std::array, 2>& EGrid, @@ -891,12 +903,12 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti return retval; } )); - outputReducer.back()->setUnitMetaData("m","$\mathrm{m}$","$\delta Z_\mathrm{fg}$",1.0); + outputReducer->addMetadata(outputReducer->size()-1,"m","$\\mathrm{m}$","$\\delta Z_\\mathrm{fg}$",1.0); continue; } if (*it == "MeshData") { outputReducer->addOperator(new DRO::VariableMeshData); - outputReducer.back()->setUnitMetaData("","","\mathrm{Mesh data}$",0.0); + outputReducer->addMetadata(outputReducer->size()-1,"","","\\mathrm{Mesh data}$",0.0); continue; } // After all the continue; statements one should never land here. @@ -1044,6 +1056,19 @@ bool DataReducer::getDataVectorInfo(const unsigned int& operatorID,std::string& return operators[operatorID]->getDataVectorInfo(dataType,dataSize,vectorSize); } +/** Add a metadata to the specified DRO::DataReductionOperator. + * @param operatorID ID number of the DataReductionOperator to add metadata to + * @param unit string with the physical unit of the DRO result + * @param unitLaTeX LaTeX-formatted string with the physical unit of the DRO result + * @param variableLaTeX LaTeX-formatted string with a descriptive short name for the DRO result + * @param conversionFactor floating point conversion factor between DRO result and SI units + * @return If true, the given metadata was added successfully. + */ +bool DataReducer::addMetadata(const unsigned int operatorID, std::string unit,std::string unitLaTeX,std::string variableLaTeX,Real unitConversion) { + if (operatorID >= operators.size()) return false; + return operators[operatorID]->setUnitMetadata(unit,unitLaTeX,variableLaTeX,unitConversion); +} + /** Get metadata on the unit of data calculated by the given DataReductionOperator. * @param operatorID ID number of the DataReductionOperator whose output unit metadata is requested. * @param unit Physical unit of variable @@ -1051,7 +1076,7 @@ bool DataReducer::getDataVectorInfo(const unsigned int& operatorID,std::string& * @param unitConversion Floating point value of conversion factor to SI units * @return If true, DataReductionOperator was found and it returned sensible values. */ -bool DataReducer::getUnitMetadata(const unsigned int& operatorID,std::string& unit,std::string& unitLaTeX,std::string& variableLaTeX,Real& unitConversion) const { +bool DataReducer::getMetadata(const unsigned int& operatorID,std::string& unit,std::string& unitLaTeX,std::string& variableLaTeX,Real& unitConversion) const { if (operatorID >= operators.size()) return false; return operators[operatorID]->getUnitMetadata(unit, unitLaTeX, variableLaTeX, unitConversion); } diff --git a/datareduction/datareducer.h b/datareduction/datareducer.h index c8b59ce6c..ac79f1551 100644 --- a/datareduction/datareducer.h +++ b/datareduction/datareducer.h @@ -44,7 +44,8 @@ class DataReducer { bool addOperator(DRO::DataReductionOperator* op); bool getDataVectorInfo(const unsigned int& operatorID,std::string& dataType, unsigned int& dataSize,unsigned int& vectorSize) const; - bool getUnitMetadata(const unsigned int& operatorID,std::string& unit, std::string& unitLaTeX, std::string& variableLaTeX, Real& unitConversion); + bool addMetadata(const unsigned int operatorID,std::string unit,std::string unitLaTeX,std::string variableLaTeX,Real unitConversion); + bool getMetadata(const unsigned int& operatorID,std::string& unit,std::string& unitLaTeX,std::string& variableLaTeX,Real& unitConversion) const; std::string getName(const unsigned int& operatorID) const; bool handlesWriting(const unsigned int& operatorID) const; diff --git a/datareduction/datareductionoperator.h b/datareduction/datareductionoperator.h index a6cd363e7..325ce452b 100644 --- a/datareduction/datareductionoperator.h +++ b/datareduction/datareductionoperator.h @@ -58,13 +58,19 @@ namespace DRO { virtual bool getDataVectorInfo(std::string& dataType,unsigned int& dataSize,unsigned int& vectorSize) const = 0; virtual bool getUnitMetadata(std::string& _unit,std::string& _unitLaTeX,std::string& _variableLaTeX,Real _unitConversion) { + _unit=unit; + _unitLaTeX=unitLaTeX; + _unitConversion=unitConversion; + _variableLaTeX=variableLaTeX; + return true; + }; + virtual bool setUnitMetadata(std::string& _unit,std::string& _unitLaTeX,std::string& _variableLaTeX,Real _unitConversion) { unit = _unit; unitLaTeX = _unitLaTeX; unitConversion = _unitConversion; variableLaTeX = _variableLaTeX; return true; - }; - virtual bool setUnitMetadata(std::string& _unit,std::string& _unitLaTeX,std::string& _variableLaTeX,Real _unitConversion) unit(_unit),unitLaTeX(_unitLaTeX),unitConversion(_unitConversion),variableLaTeX(_variableLaTeX) const = 0; + } virtual std::string getName() const = 0; virtual bool reduceData(const SpatialCell* cell,char* buffer); @@ -72,10 +78,10 @@ namespace DRO { virtual bool setSpatialCell(const SpatialCell* cell) = 0; protected: - std::string _unit; - std::string _unitLaTeX; - std::string _variableLaTeX; - Real _unitConversion; + std::string unit; + std::string unitLaTeX; + std::string variableLaTeX; + Real unitConversion; }; diff --git a/iowrite.cpp b/iowrite.cpp index 9876da6fd..69947052a 100644 --- a/iowrite.cpp +++ b/iowrite.cpp @@ -345,7 +345,7 @@ bool writeDataReducer(const dccrg::Dccrg& } // Request variable unit metadata: unit, latex-formatted unit, and conversion factor to SI - if (dataReducer.getUnitMetadata(dataReducerIndex,unitString,unitStringLaTeX,variableStringLaTeX, unitConversionFactor) == false) { + if (dataReducer.getMetadata(dataReducerIndex,unitString,unitStringLaTeX,variableStringLaTeX,unitConversionFactor) == false) { cerr << "ERROR when requesting unit metadata from DRO " << dataReducerIndex << endl; phiprof::stop("DRO_"+variableName); return false; From 164624ba9917c9d5dd7c1b87ff484d6a723ccf96 Mon Sep 17 00:00:00 2001 From: Markus Battarbee Date: Tue, 21 May 2019 09:40:05 +0300 Subject: [PATCH 470/602] Output conversion factor as string, fixed fsgrid reducer metadata output --- datareduction/datareducer.cpp | 148 ++++++++++++------------ datareduction/datareducer.h | 4 +- datareduction/datareductionoperator.cpp | 4 + datareduction/datareductionoperator.h | 6 +- iowrite.cpp | 3 +- 5 files changed, 86 insertions(+), 79 deletions(-) diff --git a/datareduction/datareducer.cpp b/datareduction/datareducer.cpp index 809f94080..8cbdc034e 100644 --- a/datareduction/datareducer.cpp +++ b/datareduction/datareducer.cpp @@ -74,7 +74,7 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti return retval; } )); - outputReducer->addMetadata(outputReducer->size()-1,"T","$\\mathrm{T}$","$B$",1.0); + outputReducer->addMetadata(outputReducer->size()-1,"T","$\\mathrm{T}$","$B$","1.0"); continue; } if(*it == "fg_BackgroundB" || *it == "BackgroundB") { // Static (typically dipole) magnetic field part @@ -106,7 +106,7 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti return retval; } )); - outputReducer->addMetadata(outputReducer->size()-1,"T","$\\mathrm{T}$","$B_\\mathrm{bg}$",1.0); + outputReducer->addMetadata(outputReducer->size()-1,"T","$\\mathrm{T}$","$B_\\mathrm{bg}$","1.0"); continue; } if(*it == "fg_PerturbedB" || *it == "PerturbedB") { // Fluctuating magnetic field part @@ -138,7 +138,7 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti return retval; } )); - outputReducer->addMetadata(outputReducer->size()-1,"T","$\\mathrm{T}$","$B_\\mathrm{per}$)",1.0); + outputReducer->addMetadata(outputReducer->size()-1,"T","$\\mathrm{T}$","$B_\\mathrm{per}$)","1.0"); continue; } if(*it == "fg_E" || *it== "E") { // Bulk electric field at Yee-lattice locations @@ -170,12 +170,12 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti return retval; } )); - outputReducer->addMetadata(outputReducer->size()-1,"V/m","$\\mathrm{V}\\,\\mathrm{m}^{-1}$","$E$",1.0); + outputReducer->addMetadata(outputReducer->size()-1,"V/m","$\\mathrm{V}\\,\\mathrm{m}^{-1}$","$E$","1.0"); continue; } if(*it == "vg_Rhom" || *it == "Rhom") { // Overall mass density (summed over all populations) outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("rhom",CellParams::RHOM,1)); - outputReducer->addMetadata(outputReducer->size()-1,"kg/m^3","$\\mathrm{kg}\\,\\mathrm{m}^{-3}$","$\\rho_\\mathrm{m}$",1.0); + outputReducer->addMetadata(outputReducer->size()-1,"kg/m^3","$\\mathrm{kg}\\,\\mathrm{m}^{-3}$","$\\rho_\\mathrm{m}$","1.0"); continue; } if(*it == "fg_Rhom") { // Overall mass density (summed over all populations) @@ -205,12 +205,12 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti return retval; } )); - outputReducer->addMetadata(outputReducer->size()-1,"kg/m^3","$\\mathrm{kg}\\,\\mathrm{m}^{-3}$","$\\rho_\\mathrm{m}$",1.0); + outputReducer->addMetadata(outputReducer->size()-1,"kg/m^3","$\\mathrm{kg}\\,\\mathrm{m}^{-3}$","$\\rho_\\mathrm{m}$","1.0"); continue; } if(*it == "vg_Rhoq" || *it == "Rhoq") { // Overall charge density (summed over all populations) outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("rhoq",CellParams::RHOQ,1)); - outputReducer->addMetadata(outputReducer->size()-1,"C/m^3","$\\mathrm{C}\\,\\mathrm{m}^{-3}$","$\\rho_\\mathrm{q}$",1.0); + outputReducer->addMetadata(outputReducer->size()-1,"C/m^3","$\\mathrm{C}\\,\\mathrm{m}^{-3}$","$\\rho_\\mathrm{q}$","1.0"); continue; } if(*it == "fg_Rhoq") { // Overall charge density (summed over all populations) @@ -240,7 +240,7 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti return retval; } )); - outputReducer->addMetadata(outputReducer->size()-1,"C/m^3","$\\mathrm{C}\\,\\mathrm{m}^{-3}$","$\\rho_\\mathrm{q}$",1.0); + outputReducer->addMetadata(outputReducer->size()-1,"C/m^3","$\\mathrm{C}\\,\\mathrm{m}^{-3}$","$\\rho_\\mathrm{q}$","1.0"); continue; } if(*it == "populations_Rho") { // Per-population particle number density @@ -248,14 +248,14 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti species::Species& species=getObjectWrapper().particleSpecies[i]; const std::string& pop = species.name; outputReducer->addOperator(new DRO::DataReductionOperatorPopulations(pop + "/rho", i, offsetof(spatial_cell::Population, RHO), 1)); - outputReducer->addMetadata(outputReducer->size()-1,"1/m^3","$\\mathrm{m}^{-3}$","$\\n_\\mathrm{"+pop+"}$",1.0); + outputReducer->addMetadata(outputReducer->size()-1,"1/m^3","$\\mathrm{m}^{-3}$","$\\n_\\mathrm{"+pop+"}$","1.0"); } continue; } if(*it == "V" || *it == "vg_V") { // Overall effective bulk density defining the center-of-mass frame from all populations outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("V",CellParams::VX,3)); - outputReducer->addMetadata(outputReducer->size()-1,"m/s","$\\mathrm{m}\\,\\mathrm{s}^{-1}$","$V$",1.0); + outputReducer->addMetadata(outputReducer->size()-1,"m/s","$\\mathrm{m}\\,\\mathrm{s}^{-1}$","$V$","1.0"); continue; } if(*it == "fg_V") { // Overall effective bulk density defining the center-of-mass frame from all populations @@ -287,7 +287,7 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti return retval; } )); - outputReducer->addMetadata(outputReducer->size()-1,"m/s","$\\mathrm{m}\\,\\mathrm{s}^{-1}$","$V$",1.0); + outputReducer->addMetadata(outputReducer->size()-1,"m/s","$\\mathrm{m}\\,\\mathrm{s}^{-1}$","$V$","1.0"); continue; } if(*it == "populations_V") { // Per population bulk velocities @@ -295,7 +295,7 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti species::Species& species=getObjectWrapper().particleSpecies[i]; const std::string& pop = species.name; outputReducer->addOperator(new DRO::DataReductionOperatorPopulations(pop + "/V", i, offsetof(spatial_cell::Population, V), 3)); - outputReducer->addMetadata(outputReducer->size()-1,"m/s","$\\mathrm{m}\\,\\mathrm{s}^{-1}$","$V_\\mathrm{"+pop+"}$",1.0); + outputReducer->addMetadata(outputReducer->size()-1,"m/s","$\\mathrm{m}\\,\\mathrm{s}^{-1}$","$V_\\mathrm{"+pop+"}$","1.0"); } continue; } @@ -304,13 +304,13 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti species::Species& species=getObjectWrapper().particleSpecies[i]; const std::string& pop = species.name; outputReducer->addOperator(new DRO::VariableRhoBackstream(i)); - outputReducer->addMetadata(outputReducer->size()-1,"1/m^3","$\\mathrm{m}^{-3}$","$n_\\mathrm{"+pop+",st}$",1.0); outputReducer->addOperator(new DRO::VariableVBackstream(i)); - outputReducer->addMetadata(outputReducer->size()-1,"m/s","$\\mathrm{m}\\,\\mathrm{s}^{-1}$","$V_\\mathrm{"+pop+",st}$",1.0); outputReducer->addOperator(new DRO::VariablePTensorBackstreamDiagonal(i)); - outputReducer->addMetadata(outputReducer->size()-1,"Pa","$\\mathrm{Pa}$","$\\mathcal{P}_\\mathrm{"+pop+",st}$",1.0); outputReducer->addOperator(new DRO::VariablePTensorBackstreamOffDiagonal(i)); - outputReducer->addMetadata(outputReducer->size()-1,"Pa","$\\mathrm{Pa}$","$\\mathcal{\\tilde{P}}_\\mathrm{"+pop+",st}$",1.0); + outputReducer->addMetadata(outputReducer->size()-4,"1/m^3","$\\mathrm{m}^{-3}$","$n_\\mathrm{"+pop+",st}$","1.0"); + outputReducer->addMetadata(outputReducer->size()-3,"m/s","$\\mathrm{m}\\,\\mathrm{s}^{-1}$","$V_\\mathrm{"+pop+",st}$","1.0"); + outputReducer->addMetadata(outputReducer->size()-2,"Pa","$\\mathrm{Pa}$","$\\mathcal{P}_\\mathrm{"+pop+",st}$","1.0"); + outputReducer->addMetadata(outputReducer->size()-1,"Pa","$\\mathrm{Pa}$","$\\mathcal{\\tilde{P}}_\\mathrm{"+pop+",st}$","1.0"); } continue; } @@ -319,13 +319,13 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti species::Species& species=getObjectWrapper().particleSpecies[i]; const std::string& pop = species.name; outputReducer->addOperator(new DRO::VariableRhoNonBackstream(i)); - outputReducer->addMetadata(outputReducer->size()-1,"1/m^3","$\\mathrm{m}^{-3}$","$n_\\mathrm{"+pop+",th}$",1.0); outputReducer->addOperator(new DRO::VariableVNonBackstream(i)); - outputReducer->addMetadata(outputReducer->size()-1,"m/s","$\\mathrm{m}\\,\\mathrm{s}^{-1}$","$V_\\mathrm{"+pop+",th}$",1.0); outputReducer->addOperator(new DRO::VariablePTensorNonBackstreamDiagonal(i)); - outputReducer->addMetadata(outputReducer->size()-1,"Pa","$\\mathrm{Pa}$","$\\mathcal{P}_\\mathrm{"+pop+",th}$",1.0); outputReducer->addOperator(new DRO::VariablePTensorNonBackstreamOffDiagonal(i)); - outputReducer->addMetadata(outputReducer->size()-1,"Pa","$\\mathrm{Pa}$","$\\mathcal{\\tilde{P}}_\\mathrm{"+pop+",th}$",1.0); + outputReducer->addMetadata(outputReducer->size()-4,"1/m^3","$\\mathrm{m}^{-3}$","$n_\\mathrm{"+pop+",th}$","1.0"); + outputReducer->addMetadata(outputReducer->size()-3,"m/s","$\\mathrm{m}\\,\\mathrm{s}^{-1}$","$V_\\mathrm{"+pop+",th}$","1.0"); + outputReducer->addMetadata(outputReducer->size()-2,"Pa","$\\mathrm{Pa}$","$\\mathcal{P}_\\mathrm{"+pop+",th}$","1.0"); + outputReducer->addMetadata(outputReducer->size()-1,"Pa","$\\mathrm{Pa}$","$\\mathcal{\\tilde{P}}_\\mathrm{"+pop+",th}$","1.0"); } continue; } @@ -335,7 +335,7 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti species::Species& species=getObjectWrapper().particleSpecies[i]; const std::string& pop = species.name; outputReducer->addOperator(new DRO::VariableEffectiveSparsityThreshold(i)); - outputReducer->addMetadata(outputReducer->size()-1,"s^3/m^6","$\\mathrm{m}^{-6}\\,\\mathrm{s}^{3}$","$f_\\mathrm{"+pop+",min}$",1.0); + outputReducer->addMetadata(outputReducer->size()-1,"s^3/m^6","$\\mathrm{m}^{-6}\\,\\mathrm{s}^{3}$","$f_\\mathrm{"+pop+",min}$","1.0"); } continue; } @@ -345,20 +345,20 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti species::Species& species=getObjectWrapper().particleSpecies[i]; const std::string& pop = species.name; outputReducer->addOperator(new DRO::DataReductionOperatorPopulations(pop + "/rho_loss_adjust", i, offsetof(spatial_cell::Population, RHOLOSSADJUST), 1)); - outputReducer->addMetadata(outputReducer->size()-1,"1/m^3","$\\mathrm{m}^{-3}$","$\\Delta_\\mathrm{loss} n_\\mathrm{"+pop+"}$",1.0); + outputReducer->addMetadata(outputReducer->size()-1,"1/m^3","$\\mathrm{m}^{-3}$","$\\Delta_\\mathrm{loss} n_\\mathrm{"+pop+"}$","1.0"); } continue; } if(*it == "LBweight" || *it == "vg_LBweight") { // Load balance metric for LB debugging outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("LB_weight",CellParams::LBWEIGHTCOUNTER,1)); - outputReducer->addMetadata(outputReducer->size()-1,"","","$\\mathrm{LB weight}$",0.0); + outputReducer->addMetadata(outputReducer->size()-1,"","","$\\mathrm{LB weight}$",""); continue; } if(*it == "MaxVdt") { // Overall maximum timestep constraint as calculated by the velocity space vlasov update outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("max_v_dt",CellParams::MAXVDT,1)); - outputReducer->addMetadata(outputReducer->size()-1,"s","$\\mathrm{s}$","$\\Delta t_\\mathrm{V,max}$",1.0); + outputReducer->addMetadata(outputReducer->size()-1,"s","$\\mathrm{s}$","$\\Delta t_\\mathrm{V,max}$","1.0"); continue; } if(*it == "populations_MaxVdt") { @@ -367,14 +367,14 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti species::Species& species=getObjectWrapper().particleSpecies[i]; const std::string& pop = species.name; outputReducer->addOperator(new DRO::DataReductionOperatorPopulations(pop + "/MaxVdt", i, offsetof(spatial_cell::Population, max_dt[1]), 1)); - outputReducer->addMetadata(outputReducer->size()-1,"s","$\\mathrm{s}$","$\\Delta t_\\mathrm{"+pop+",V,max}$",1.0); + outputReducer->addMetadata(outputReducer->size()-1,"s","$\\mathrm{s}$","$\\Delta t_\\mathrm{"+pop+",V,max}$","1.0"); } continue; } if(*it == "MaxRdt") { // Overall maximum timestep constraint as calculated by the real space vlasov update outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("max_r_dt",CellParams::MAXRDT,1)); - outputReducer->addMetadata(outputReducer->size()-1,"s","$\\mathrm{s}$","$\\Delta t_\\mathrm{R,max}$",1.0); + outputReducer->addMetadata(outputReducer->size()-1,"s","$\\mathrm{s}$","$\\Delta t_\\mathrm{R,max}$","1.0"); continue; } if(*it == "populations_MaxRdt") { @@ -383,7 +383,7 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti species::Species& species=getObjectWrapper().particleSpecies[i]; const std::string& pop = species.name; outputReducer->addOperator(new DRO::DataReductionOperatorPopulations(pop + "/MaxRdt", i, offsetof(spatial_cell::Population, max_dt[0]), 1)); - outputReducer->addMetadata(outputReducer->size()-1,"s","$\\mathrm{s}$","$\\Delta t_\\mathrm{"+pop+",R,max}$",1.0); + outputReducer->addMetadata(outputReducer->size()-1,"s","$\\mathrm{s}$","$\\Delta t_\\mathrm{"+pop+",R,max}$","1.0"); } continue; } @@ -393,13 +393,17 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti species::Species& species=getObjectWrapper().particleSpecies[i]; const std::string& pop = species.name; outputReducer->addOperator(new DRO::VariableEnergyDensity(i)); - outputReducer->addMetadata(outputReducer->size()-1,"eV/cm^3","$\\mathrm{eV}\\,\\mathrm{cm}^{-3}$","$U_\\mathrm{"+pop+"}$",(1.0e-6)/physicalconstants::CHARGE); + std::stringstream conversion; + conversion << (1.0e-6)/physicalconstants::CHARGE; + outputReducer->addMetadata(outputReducer->size()-1,"eV/cm^3","$\\mathrm{eV}\\,\\mathrm{cm}^{-3}$","$U_\\mathrm{"+pop+"}$",conversion.str()); } continue; } // For precipitation: - // outputReducer->addMetadata(outputReducer->size()-1,"1/(cm^2 sr s eV)","$\\mathrm{cm}^{-2}\\,\\mathrm{sr}^{-1}\\,\\mathrm{s}^{-1}\\,\\mathrm{eV}^{-1}$","$\\mathcal{F}_\\mathrm{"+pop+"}$",(1.0e-4)*physicalconstants::CHARGE); + //std::stringstream conversion; + //conversion << (1.0e-4)*physicalconstants::CHARGE; + // outputReducer->addMetadata(outputReducer->size()-1,"1/(cm^2 sr s eV)","$\\mathrm{cm}^{-2}\\,\\mathrm{sr}^{-1}\\,\\mathrm{s}^{-1}\\,\\mathrm{eV}^{-1}$","$\\mathcal{F}_\\mathrm{"+pop+"}$",conversion.str()); if(*it == "MaxFieldsdt" || *it == "fg_MaxFieldsdt") { // Maximum timestep constraint as calculated by the fieldsolver outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("MaxFieldsdt",[]( @@ -428,13 +432,13 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti return retval; } )); - outputReducer->addMetadata(outputReducer->size()-1,"s","$\\mathrm{s}$","$\\Delta t_\\mathrm{f,max}$",1.0); + outputReducer->addMetadata(outputReducer->size()-1,"s","$\\mathrm{s}$","$\\Delta t_\\mathrm{f,max}$","1.0"); continue; } if(*it == "MPIrank" || *it == "vg_rank") { // Map of spatial decomposition of the DCCRG grid into MPI ranks outputReducer->addOperator(new DRO::MPIrank); - outputReducer->addMetadata(outputReducer->size()-1,"","","$\\mathrm{MPI rank}$",0.0); + outputReducer->addMetadata(outputReducer->size()-1,"","","$\\mathrm{MPI rank}$",""); continue; } if(*it == "FsGridRank" || *it == "fg_rank") { @@ -456,13 +460,13 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti return retval; } )); - outputReducer->addMetadata(outputReducer->size()-1,"","","$\\mathrm{fGrid rank}$",0.0); + outputReducer->addMetadata(outputReducer->size()-1,"","","$\\mathrm{fGrid rank}$",""); continue; } if(*it == "BoundaryType" || *it == "vg_BoundaryType") { // Type of boundarycells outputReducer->addOperator(new DRO::BoundaryType); - outputReducer->addMetadata(outputReducer->size()-1,"","","$\\mathrm{vGrid Boundary type}$",0.0); + outputReducer->addMetadata(outputReducer->size()-1,"","","$\\mathrm{vGrid Boundary type}$",""); continue; } if(*it == "fg_BoundaryType") { @@ -493,13 +497,13 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti return retval; } )); - outputReducer->addMetadata(outputReducer->size()-1,"","","$\\mathrm{fGrid Boundary type}$",0.0); + outputReducer->addMetadata(outputReducer->size()-1,"","","$\\mathrm{fGrid Boundary type}$",""); continue; } if(*it == "BoundaryLayer" || *it == "vg_BoundaryLayer") { // For boundaries with multiple layers: layer count per cell outputReducer->addOperator(new DRO::BoundaryLayer); - outputReducer->addMetadata(outputReducer->size()-1,"","","$\\mathrm{vGrid Boundary layer}$",0.0); + outputReducer->addMetadata(outputReducer->size()-1,"","","$\\mathrm{vGrid Boundary layer}$",""); continue; } if(*it == "fg_BoundaryLayer") { @@ -530,7 +534,7 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti return retval; } )); - outputReducer->addMetadata(outputReducer->size()-1,"","","$\\mathrm{fGrid Boundary layer}$",0.0); + outputReducer->addMetadata(outputReducer->size()-1,"","","$\\mathrm{fGrid Boundary layer}$",""); continue; } if (*it == "populations_Blocks") { @@ -539,14 +543,14 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti species::Species& species=getObjectWrapper().particleSpecies[i]; const std::string& pop = species.name; outputReducer->addOperator(new DRO::Blocks(i)); - outputReducer->addMetadata(outputReducer->size()-1,"","","$\\mathrm{"+pop+" blocks}$",0.0); + outputReducer->addMetadata(outputReducer->size()-1,"","","$\\mathrm{"+pop+" blocks}$",""); } continue; } if(*it == "fSaved") { // Boolean marker whether a velocity space is saved in a given spatial cell outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("fSaved",CellParams::ISCELLSAVINGF,1)); - outputReducer->addMetadata(outputReducer->size()-1,"","","$f(v)_\\mathrm{saved}$",0.0); + outputReducer->addMetadata(outputReducer->size()-1,"","","$f(v)_\\mathrm{saved}$",""); continue; } if(*it == "populations_accSubcycles") { @@ -555,14 +559,14 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti species::Species& species=getObjectWrapper().particleSpecies[i]; const std::string& pop = species.name; outputReducer->addOperator(new DRO::DataReductionOperatorPopulations(pop + "/acc_subcycles", i, offsetof(spatial_cell::Population, ACCSUBCYCLES), 1)); - outputReducer->addMetadata(outputReducer->size()-1,"","","$\\mathrm{"+pop+" Acc subcycles}$",0.0); + outputReducer->addMetadata(outputReducer->size()-1,"","","$\\mathrm{"+pop+" Acc subcycles}$",""); } continue; } if(*it == "VolE" || *it == "vg_VolE") { // Volume-averaged E field outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("E_vol",CellParams::EXVOL,3)); - outputReducer->addMetadata(outputReducer->size()-1,"V/m","$\\mathrm{V}\\,\\mathrm{m}^{-1}$","$E_\\mathrm{vol,vg}$",1.0); + outputReducer->addMetadata(outputReducer->size()-1,"V/m","$\\mathrm{V}\\,\\mathrm{m}^{-1}$","$E_\\mathrm{vol,vg}$","1.0"); continue; } if(*it == "fg_VolE") { @@ -594,24 +598,24 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti return retval; } )); - outputReducer->addMetadata(outputReducer->size()-1,"V/m","$\\mathrm{V}\\,\\mathrm{m}^{-1}$","$E_\\mathrm{vol,fg}$",1.0); + outputReducer->addMetadata(outputReducer->size()-1,"V/m","$\\mathrm{V}\\,\\mathrm{m}^{-1}$","$E_\\mathrm{vol,fg}$","1.0"); continue; } if(*it == "fg_HallE") { // TODO: Add outputreducer to get EHALL from fsgrid - // outputReducer->addMetadata(outputReducer->size()-1,"V/m","$\\mathrm{V}\\,\\mathrm{m}^{-1}$","$E_\\mathrm{Hall}$",1.0); + // outputReducer->addMetadata(outputReducer->size()-1,"V/m","$\\mathrm{V}\\,\\mathrm{m}^{-1}$","$E_\\mathrm{Hall}$","1.0"); continue; } if(*it =="GradPeE") { // Electron pressure gradient contribution to the generalized ohm's law outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("EGRADPE",CellParams::EXGRADPE,3)); - outputReducer->addMetadata(outputReducer->size()-1,"V/m","$\\mathrm{V}\\,\\mathrm{m}^{-1}$","$E_{\\del P_\\mathrm{e}}$",1.0); + outputReducer->addMetadata(outputReducer->size()-1,"V/m","$\\mathrm{V}\\,\\mathrm{m}^{-1}$","$E_{\\del P_\\mathrm{e}}$","1.0"); continue; } if(*it == "VolB" || *it == "vg_VolB") { // Volume-averaged magnetic field outputReducer->addOperator(new DRO::VariableBVol); - outputReducer->addMetadata(outputReducer->size()-1,"T","$\\mathrm{T}$","$B_\\mathrm{vol,vg}$",1.0); + outputReducer->addMetadata(outputReducer->size()-1,"T","$\\mathrm{T}$","$B_\\mathrm{vol,vg}$","1.0"); continue; } if(*it == "fg_VolB") { // Static (typically dipole) magnetic field part @@ -646,23 +650,23 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti return retval; } )); - outputReducer->addMetadata(outputReducer->size()-1,"T","$\\mathrm{T}$","$B_\\mathrm{vol,fg}$",1.0); + outputReducer->addMetadata(outputReducer->size()-1,"T","$\\mathrm{T}$","$B_\\mathrm{vol,fg}$","1.0"); continue; } if(*it == "BackgroundVolB") { outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("BGB_vol",CellParams::BGBXVOL,3)); - outputReducer->addMetadata(outputReducer->size()-1,"T","$\\mathrm{T}$","$B_\\mathrm{vol,vg,bg}$",1.0); + outputReducer->addMetadata(outputReducer->size()-1,"T","$\\mathrm{T}$","$B_\\mathrm{vol,vg,bg}$","1.0"); continue; } if(*it == "PerturbedVolB") { outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("PERB_vol",CellParams::PERBXVOL,3)); - outputReducer->addMetadata(outputReducer->size()-1,"T","$\\mathrm{T}$","$B_\\mathrm{vol,vg,per}$",1.0); + outputReducer->addMetadata(outputReducer->size()-1,"T","$\\mathrm{T}$","$B_\\mathrm{vol,vg,per}$","1.0"); continue; } if(*it == "Pressure" || *it== "vg_Pressure") { // Overall scalar pressure from all populations outputReducer->addOperator(new DRO::VariablePressureSolver); - outputReducer->addMetadata(outputReducer->size()-1,"Pa","$\\mathrm{Pa}$","$P_\\mathrm{solver}$",1.0); + outputReducer->addMetadata(outputReducer->size()-1,"Pa","$\\mathrm{Pa}$","$P_\\mathrm{solver}$","1.0"); continue; } if(*it == "fg_Pressure") { @@ -694,7 +698,7 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti return retval; } )); - outputReducer->addMetadata(outputReducer->size()-1,"Pa","$\\mathrm{Pa}$","$P_\\mathrm{fg}$",1.0); + outputReducer->addMetadata(outputReducer->size()-1,"Pa","$\\mathrm{Pa}$","$P_\\mathrm{fg}$","1.0"); continue; } if(*it == "populations_PTensor") { @@ -703,42 +707,42 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti species::Species& species=getObjectWrapper().particleSpecies[i]; const std::string& pop = species.name; outputReducer->addOperator(new DRO::VariablePTensorDiagonal(i)); - outputReducer->addMetadata(outputReducer->size()-1,"Pa","$\\mathrm{Pa}$","$\\mathcal{P}_\\mathrm{"+pop+"}$",1.0); + outputReducer->addMetadata(outputReducer->size()-1,"Pa","$\\mathrm{Pa}$","$\\mathcal{P}_\\mathrm{"+pop+"}$","1.0"); outputReducer->addOperator(new DRO::VariablePTensorOffDiagonal(i)); - outputReducer->addMetadata(outputReducer->size()-1,"Pa","$\\mathrm{Pa}$","$\\mathcal{\\tilde{P}}_\\mathrm{"+pop+"}$",1.0); + outputReducer->addMetadata(outputReducer->size()-1,"Pa","$\\mathrm{Pa}$","$\\mathcal{\\tilde{P}}_\\mathrm{"+pop+"}$","1.0"); } continue; } if(*it == "BVOLderivs") { // Volume-averaged derivatives outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dPERBXVOLdy",bvolderivatives::dPERBXVOLdy,1)); - outputReducer->addMetadata(outputReducer->size()-1,"T/m","$\\mathrm{T}\\,\\mathrm{m}^{-1}$","$\\Delta B_{X,\\mathrm{per,vol,vg}} (\\Delta Y)^{-1}$",1.0); outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dPERBXVOLdz",bvolderivatives::dPERBXVOLdz,1)); - outputReducer->addMetadata(outputReducer->size()-1,"T/m","$\\mathrm{T}\\,\\mathrm{m}^{-1}$","$\\Delta B_{X,\\mathrm{per,vol,vg}} (\\Delta Z)^{-1}$",1.0); outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dPERBYVOLdx",bvolderivatives::dPERBYVOLdx,1)); - outputReducer->addMetadata(outputReducer->size()-1,"T/m","$\\mathrm{T}\\,\\mathrm{m}^{-1}$","$\\Delta B_{Y,\\mathrm{per,vol,vg}} (\\Delta X)^{-1}$",1.0); outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dPERBYVOLdz",bvolderivatives::dPERBYVOLdz,1)); - outputReducer->addMetadata(outputReducer->size()-1,"T/m","$\\mathrm{T}\\,\\mathrm{m}^{-1}$","$\\Delta B_{Y,\\mathrm{per,vol,vg}} (\\Delta Z)^{-1}$",1.0); outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dPERBZVOLdx",bvolderivatives::dPERBZVOLdx,1)); - outputReducer->addMetadata(outputReducer->size()-1,"T/m","$\\mathrm{T}\\,\\mathrm{m}^{-1}$","$\\Delta B_{Z,\\mathrm{per,vol,vg}} (\\Delta X)^{-1}$",1.0); outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dPERBZVOLdy",bvolderivatives::dPERBZVOLdy,1)); - outputReducer->addMetadata(outputReducer->size()-1,"T/m","$\\mathrm{T}\\,\\mathrm{m}^{-1}$","$\\Delta B_{Z,\\mathrm{per,vol,vg}} (\\Delta Y)^{-1}$",1.0); + outputReducer->addMetadata(outputReducer->size()-6,"T/m","$\\mathrm{T}\\,\\mathrm{m}^{-1}$","$\\Delta B_{X,\\mathrm{per,vol,vg}} (\\Delta Y)^{-1}$","1.0"); + outputReducer->addMetadata(outputReducer->size()-5,"T/m","$\\mathrm{T}\\,\\mathrm{m}^{-1}$","$\\Delta B_{X,\\mathrm{per,vol,vg}} (\\Delta Z)^{-1}$","1.0"); + outputReducer->addMetadata(outputReducer->size()-4,"T/m","$\\mathrm{T}\\,\\mathrm{m}^{-1}$","$\\Delta B_{Y,\\mathrm{per,vol,vg}} (\\Delta X)^{-1}$","1.0"); + outputReducer->addMetadata(outputReducer->size()-3,"T/m","$\\mathrm{T}\\,\\mathrm{m}^{-1}$","$\\Delta B_{Y,\\mathrm{per,vol,vg}} (\\Delta Z)^{-1}$","1.0"); + outputReducer->addMetadata(outputReducer->size()-2,"T/m","$\\mathrm{T}\\,\\mathrm{m}^{-1}$","$\\Delta B_{Z,\\mathrm{per,vol,vg}} (\\Delta X)^{-1}$","1.0"); + outputReducer->addMetadata(outputReducer->size()-1,"T/m","$\\mathrm{T}\\,\\mathrm{m}^{-1}$","$\\Delta B_{Z,\\mathrm{per,vol,vg}} (\\Delta Y)^{-1}$","1.0"); continue; } if(*it == "vg_GridCoordinates") { // Spatial coordinates for each cell outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("vg_X",CellParams::XCRD,1)); - outputReducer->addMetadata(outputReducer->size()-1,"m","$\\mathrm{m}$","$X_\\mathrm{vg}$",1.0); outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("vg_Y",CellParams::YCRD,1)); - outputReducer->addMetadata(outputReducer->size()-1,"m","$\\mathrm{m}$","$Y_\\mathrm{vg}$",1.0); outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("vg_Z",CellParams::ZCRD,1)); - outputReducer->addMetadata(outputReducer->size()-1,"m","$\\mathrm{m}$","$Z_\\mathrm{vg}$",1.0); outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("vg_DX",CellParams::DX,1)); - outputReducer->addMetadata(outputReducer->size()-1,"m","$\\mathrm{m}$","$\\delta X_\\mathrm{vg}$",1.0); outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("vg_DY",CellParams::DY,1)); - outputReducer->addMetadata(outputReducer->size()-1,"m","$\\mathrm{m}$","$\\delta Y_\\mathrm{vg}$",1.0); outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("vg_DZ",CellParams::DZ,1)); - outputReducer->addMetadata(outputReducer->size()-1,"m","$\\mathrm{m}$","$\\delta Z_\\mathrm{vg}$",1.0); + outputReducer->addMetadata(outputReducer->size()-6,"m","$\\mathrm{m}$","$X_\\mathrm{vg}$","1.0"); + outputReducer->addMetadata(outputReducer->size()-5,"m","$\\mathrm{m}$","$Y_\\mathrm{vg}$","1.0"); + outputReducer->addMetadata(outputReducer->size()-4,"m","$\\mathrm{m}$","$Z_\\mathrm{vg}$","1.0"); + outputReducer->addMetadata(outputReducer->size()-3,"m","$\\mathrm{m}$","$\\delta X_\\mathrm{vg}$","1.0"); + outputReducer->addMetadata(outputReducer->size()-2,"m","$\\mathrm{m}$","$\\delta Y_\\mathrm{vg}$","1.0"); + outputReducer->addMetadata(outputReducer->size()-1,"m","$\\mathrm{m}$","$\\delta Z_\\mathrm{vg}$","1.0"); continue; } if(*it == "fg_GridCoordinates") { @@ -768,7 +772,7 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti return retval; } )); - outputReducer->addMetadata(outputReducer->size()-1,"m","$\\mathrm{m}$","$X_\\mathrm{fg}$",1.0); + outputReducer->addMetadata(outputReducer->size()-1,"m","$\\mathrm{m}$","$X_\\mathrm{fg}$","1.0"); outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fg_Y",[]( FsGrid< std::array, 2>& perBGrid, FsGrid< std::array, 2>& EGrid, @@ -795,7 +799,7 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti return retval; } )); - outputReducer->addMetadata(outputReducer->size()-1,"m","$\\mathrm{m}$","$Y_\\mathrm{fg}$",1.0); + outputReducer->addMetadata(outputReducer->size()-1,"m","$\\mathrm{m}$","$Y_\\mathrm{fg}$","1.0"); outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fg_Z",[]( FsGrid< std::array, 2>& perBGrid, FsGrid< std::array, 2>& EGrid, @@ -822,7 +826,7 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti return retval; } )); - outputReducer->addMetadata(outputReducer->size()-1,"m","$\\mathrm{m}$","$Z_\\mathrm{fg}$",1.0); + outputReducer->addMetadata(outputReducer->size()-1,"m","$\\mathrm{m}$","$Z_\\mathrm{fg}$","1.0"); outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fg_DX",[]( FsGrid< std::array, 2>& perBGrid, FsGrid< std::array, 2>& EGrid, @@ -849,7 +853,7 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti return retval; } )); - outputReducer->addMetadata(outputReducer->size()-1,"m","$\\mathrm{m}$","$\\delta X_\\mathrm{fg}$",1.0); + outputReducer->addMetadata(outputReducer->size()-1,"m","$\\mathrm{m}$","$\\delta X_\\mathrm{fg}$","1.0"); outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fg_DY",[]( FsGrid< std::array, 2>& perBGrid, FsGrid< std::array, 2>& EGrid, @@ -876,7 +880,7 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti return retval; } )); - outputReducer->addMetadata(outputReducer->size()-1,"m","$\\mathrm{m}$","$\\delta Y_\\mathrm{fg}$",1.0); + outputReducer->addMetadata(outputReducer->size()-1,"m","$\\mathrm{m}$","$\\delta Y_\\mathrm{fg}$","1.0"); outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fg_DZ",[]( FsGrid< std::array, 2>& perBGrid, FsGrid< std::array, 2>& EGrid, @@ -903,12 +907,12 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti return retval; } )); - outputReducer->addMetadata(outputReducer->size()-1,"m","$\\mathrm{m}$","$\\delta Z_\\mathrm{fg}$",1.0); + outputReducer->addMetadata(outputReducer->size()-1,"m","$\\mathrm{m}$","$\\delta Z_\\mathrm{fg}$","1.0"); continue; } if (*it == "MeshData") { outputReducer->addOperator(new DRO::VariableMeshData); - outputReducer->addMetadata(outputReducer->size()-1,"","","\\mathrm{Mesh data}$",0.0); + outputReducer->addMetadata(outputReducer->size()-1,"","","\\mathrm{Mesh data}$",""); continue; } // After all the continue; statements one should never land here. @@ -1064,7 +1068,7 @@ bool DataReducer::getDataVectorInfo(const unsigned int& operatorID,std::string& * @param conversionFactor floating point conversion factor between DRO result and SI units * @return If true, the given metadata was added successfully. */ -bool DataReducer::addMetadata(const unsigned int operatorID, std::string unit,std::string unitLaTeX,std::string variableLaTeX,Real unitConversion) { +bool DataReducer::addMetadata(const unsigned int operatorID, std::string unit,std::string unitLaTeX,std::string variableLaTeX,std::string unitConversion) { if (operatorID >= operators.size()) return false; return operators[operatorID]->setUnitMetadata(unit,unitLaTeX,variableLaTeX,unitConversion); } @@ -1076,7 +1080,7 @@ bool DataReducer::addMetadata(const unsigned int operatorID, std::string unit,st * @param unitConversion Floating point value of conversion factor to SI units * @return If true, DataReductionOperator was found and it returned sensible values. */ -bool DataReducer::getMetadata(const unsigned int& operatorID,std::string& unit,std::string& unitLaTeX,std::string& variableLaTeX,Real& unitConversion) const { +bool DataReducer::getMetadata(const unsigned int& operatorID,std::string& unit,std::string& unitLaTeX,std::string& variableLaTeX,std::string& unitConversion) const { if (operatorID >= operators.size()) return false; return operators[operatorID]->getUnitMetadata(unit, unitLaTeX, variableLaTeX, unitConversion); } diff --git a/datareduction/datareducer.h b/datareduction/datareducer.h index ac79f1551..a8816e079 100644 --- a/datareduction/datareducer.h +++ b/datareduction/datareducer.h @@ -44,8 +44,8 @@ class DataReducer { bool addOperator(DRO::DataReductionOperator* op); bool getDataVectorInfo(const unsigned int& operatorID,std::string& dataType, unsigned int& dataSize,unsigned int& vectorSize) const; - bool addMetadata(const unsigned int operatorID,std::string unit,std::string unitLaTeX,std::string variableLaTeX,Real unitConversion); - bool getMetadata(const unsigned int& operatorID,std::string& unit,std::string& unitLaTeX,std::string& variableLaTeX,Real& unitConversion) const; + bool addMetadata(const unsigned int operatorID,std::string unit,std::string unitLaTeX,std::string variableLaTeX,std::string unitConversion); + bool getMetadata(const unsigned int& operatorID,std::string& unit,std::string& unitLaTeX,std::string& variableLaTeX,std::string& unitConversion) const; std::string getName(const unsigned int& operatorID) const; bool handlesWriting(const unsigned int& operatorID) const; diff --git a/datareduction/datareductionoperator.cpp b/datareduction/datareductionoperator.cpp index b4c4be605..3471c023e 100644 --- a/datareduction/datareductionoperator.cpp +++ b/datareduction/datareductionoperator.cpp @@ -144,6 +144,10 @@ namespace DRO { std::map attribs; attribs["mesh"]=meshName; attribs["name"]=variableName; + attribs["unit"]=unit; + attribs["unitLaTeX"]=unitLaTeX; + attribs["unitConversion"]=unitConversion; + attribs["variableLaTeX"]=variableLaTeX; std::vector varBuffer = lambda(perBGrid,EGrid,EHallGrid,EGradPeGrid,momentsGrid,dPerBGrid,dMomentsGrid,BgBGrid,volGrid,technicalGrid); diff --git a/datareduction/datareductionoperator.h b/datareduction/datareductionoperator.h index 325ce452b..a9809a5ea 100644 --- a/datareduction/datareductionoperator.h +++ b/datareduction/datareductionoperator.h @@ -57,14 +57,14 @@ namespace DRO { virtual ~DataReductionOperator(); virtual bool getDataVectorInfo(std::string& dataType,unsigned int& dataSize,unsigned int& vectorSize) const = 0; - virtual bool getUnitMetadata(std::string& _unit,std::string& _unitLaTeX,std::string& _variableLaTeX,Real _unitConversion) { + virtual bool getUnitMetadata(std::string& _unit,std::string& _unitLaTeX,std::string& _variableLaTeX,std::string& _unitConversion) { _unit=unit; _unitLaTeX=unitLaTeX; _unitConversion=unitConversion; _variableLaTeX=variableLaTeX; return true; }; - virtual bool setUnitMetadata(std::string& _unit,std::string& _unitLaTeX,std::string& _variableLaTeX,Real _unitConversion) { + virtual bool setUnitMetadata(std::string& _unit,std::string& _unitLaTeX,std::string& _variableLaTeX,std::string& _unitConversion) { unit = _unit; unitLaTeX = _unitLaTeX; unitConversion = _unitConversion; @@ -81,7 +81,7 @@ namespace DRO { std::string unit; std::string unitLaTeX; std::string variableLaTeX; - Real unitConversion; + std::string unitConversion; }; diff --git a/iowrite.cpp b/iowrite.cpp index 69947052a..4de52492e 100644 --- a/iowrite.cpp +++ b/iowrite.cpp @@ -318,9 +318,8 @@ bool writeDataReducer(const dccrg::Dccrg& int dataReducerIndex, Writer& vlsvWriter){ map attribs; - string variableName,dataType,unitString,unitStringLaTeX, variableStringLaTeX; + string variableName,dataType,unitString,unitStringLaTeX, variableStringLaTeX, unitConversionFactor; bool success=true; - Real unitConversionFactor; const string meshName = "SpatialGrid"; variableName = dataReducer.getName(dataReducerIndex); From 560a66229fef45567c84db220cbf6f897cadf657 Mon Sep 17 00:00:00 2001 From: Markus Battarbee Date: Tue, 21 May 2019 10:03:04 +0300 Subject: [PATCH 471/602] Cleanup of output variable list --- datareduction/datareducer.cpp | 4 ++-- parameters.cpp | 10 +++++----- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/datareduction/datareducer.cpp b/datareduction/datareducer.cpp index 8cbdc034e..017601403 100644 --- a/datareduction/datareducer.cpp +++ b/datareduction/datareducer.cpp @@ -469,7 +469,7 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti outputReducer->addMetadata(outputReducer->size()-1,"","","$\\mathrm{vGrid Boundary type}$",""); continue; } - if(*it == "fg_BoundaryType") { + if(*it == "FsGridBoundaryType" || *it == "fg_BoundaryType") { // Type of boundarycells as stored in FSGrid outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("FsGridBoundaryType",[]( FsGrid< std::array, 2>& perBGrid, @@ -506,7 +506,7 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti outputReducer->addMetadata(outputReducer->size()-1,"","","$\\mathrm{vGrid Boundary layer}$",""); continue; } - if(*it == "fg_BoundaryLayer") { + if(*it == "FsGridBoundaryLayer" || *it == "fg_BoundaryLayer") { // Type of boundarycells as stored in FSGrid outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("FsGridBoundaryLayer",[]( FsGrid< std::array, 2>& perBGrid, diff --git a/parameters.cpp b/parameters.cpp index 0c022970f..eb3e049f4 100644 --- a/parameters.cpp +++ b/parameters.cpp @@ -216,8 +216,8 @@ bool Parameters::addParameters(){ // Output variable parameters // NOTE Do not remove the : before the list of variable names as this is parsed by tools/check_vlasiator_cfg.sh Readparameters::addComposing("variables.output", std::string()+"List of data reduction operators (DROs) to add to the grid file output. Each variable to be added has to be on a new line output = XXX. "+ - "Available (20190514): "+ - "B fg_B BackgroundB vg_BackgroundB fg_BackgroundB PerturbedB vg_PerturbedB fg_PerturbedB "+ + "Available (20190521): "+ + "B fg_B BackgroundB fg_BackgroundB PerturbedB fg_PerturbedB "+ "E fg_E "+ "Rhom vg_Rhom fg_Rhom Rhoq vg_Rhoq fg_Rhoq populations_Rho "+ "V vg_V fg_V populations_V "+ @@ -226,13 +226,13 @@ bool Parameters::addParameters(){ "populations_EnergyDensity "+ "LBweight MaxVdt MaxRdt populations_MaxVdt populations_MaxRdt MaxFieldsdt "+ "MPIrank vg_rank FsGridRank fg_rank "+ - "FsGridBoundaryType BoundaryType vg_BoundaryType fg_BoundaryType BoundaryLayer vg_BoundaryLayer fg_BoundaryLayer "+ + "FsGridBoundaryType BoundaryType vg_BoundaryType fg_BoundaryType FsGridBoundaryLayer BoundaryLayer vg_BoundaryLayer fg_BoundaryLayer "+ "populations_Blocks fSaved "+ "populations_accSubcycles "+ "VolE vg_VolE fg_VolE HallE GradPeE VolB vg_VolB fg_VolB BackgroundVolB PerturbedVolB "+ "Pressure vg_Pressure fg_Pressure populations_PTensor "+ - "derivs BVOLderivs "+ - "GridCoordinates BackgroundVolE MeshData"); + "BVOLderivs "+ + "vg_GridCoordinates fg_GridCoordinates MeshData"); // NOTE Do not remove the : before the list of variable names as this is parsed by tools/check_vlasiator_cfg.sh Readparameters::addComposing("variables.diagnostic", std::string()+"List of data reduction operators (DROs) to add to the diagnostic runtime output. Each variable to be added has to be on a new line diagnostic = XXX. "+ From f941a392ee8a04a2ff9fa3e701307b61600e67af Mon Sep 17 00:00:00 2001 From: Markus Battarbee Date: Tue, 21 May 2019 10:15:07 +0300 Subject: [PATCH 472/602] one missing merge conflict sorted --- datareduction/datareducer.cpp | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/datareduction/datareducer.cpp b/datareduction/datareducer.cpp index 62edff4b6..afc862123 100644 --- a/datareduction/datareducer.cpp +++ b/datareduction/datareducer.cpp @@ -402,6 +402,8 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti if(*it == "populations_PrecipitationFlux") { // Per-population precipitation differential flux for(unsigned int i =0; i < getObjectWrapper().particleSpecies.size(); i++) { + species::Species& species=getObjectWrapper().particleSpecies[i]; + const std::string& pop = species.name; outputReducer->addOperator(new DRO::VariablePrecipitationDiffFlux(i)); std::stringstream conversion; conversion << (1.0e-4)*physicalconstants::CHARGE; @@ -606,11 +608,6 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti outputReducer->addMetadata(outputReducer->size()-1,"V/m","$\\mathrm{V}\\,\\mathrm{m}^{-1}$","$E_\\mathrm{vol,fg}$","1.0"); continue; } -<<<<<<< HEAD - if(*it == "fg_HallE") { - // TODO: Add outputreducer to get EHALL from fsgrid - // outputReducer->addMetadata(outputReducer->size()-1,"V/m","$\\mathrm{V}\\,\\mathrm{m}^{-1}$","$E_\\mathrm{Hall}$","1.0"); -======= if(*it == "HallE" || *it == "fg_HallE") { for(int index=0; indexaddMetadata(outputReducer->size()-1,"V/m","$\\mathrm{V}\\,\\mathrm{m}^{-1}$","$E_\\mathrm{Hall}$","1.0"); } ->>>>>>> dev continue; } if(*it =="GradPeE") { From bb614cfa4f1e7cf16adf354d82a4833627495a54 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Tue, 21 May 2019 12:53:55 +0300 Subject: [PATCH 473/602] Changed Vec type arrays that get allocated dynamically to std::vector> types. --- vlasovsolver/cpu_trans_map_amr.cpp | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/vlasovsolver/cpu_trans_map_amr.cpp b/vlasovsolver/cpu_trans_map_amr.cpp index 8fbcba984..a35bea699 100644 --- a/vlasovsolver/cpu_trans_map_amr.cpp +++ b/vlasovsolver/cpu_trans_map_amr.cpp @@ -3,6 +3,7 @@ #include "vec.h" #include "../grid.h" #include "../object_wrapper.h" +#include "../memoryallocation.h" #include "cpu_trans_map_amr.hpp" #include "cpu_trans_map.hpp" @@ -520,7 +521,8 @@ void propagatePencil(Vec* dz, Vec* values, const uint dimension, const uint nTargetNeighborsPerPencil = 1; // Vector buffer where we write data, initialized to 0*/ - Vec targetValues[(lengthOfPencil + 2 * nTargetNeighborsPerPencil) * WID3 / VECL]; + std::vector> targetValues; + targetValues.reserve((lengthOfPencil + 2 * nTargetNeighborsPerPencil) * WID3 / VECL); for (uint i = 0; i < (lengthOfPencil + 2 * nTargetNeighborsPerPencil) * WID3 / VECL; i++) { @@ -732,7 +734,7 @@ void copy_trans_block_data_amr( // Copy volume averages of this block from all spatial cells: for (int b = -VLASOV_STENCIL_WIDTH; b < lengthOfPencil + VLASOV_STENCIL_WIDTH; b++) { if(blockDataPointer[b + VLASOV_STENCIL_WIDTH] != NULL) { - Realv blockValues[WID3]; + Realf blockValues[WID3]; const Realf* block_data = blockDataPointer[b + VLASOV_STENCIL_WIDTH]; // Copy data to a temporary array and transpose values so that mapping is along k direction. // spatial source_neighbors already taken care of when @@ -1100,7 +1102,8 @@ bool trans_map_1d_amr(const dccrg::Dccrg& std::vector targetBlockData((pencils.sumOfLengths + 2 * pencils.N) * WID3); // Allocate vectorized targetvecdata sum(lengths of pencils)*WID3 / VECL) // Add padding by 2 for each pencil - Vec targetVecData[(pencils.sumOfLengths + 2 * pencils.N) * WID3 / VECL]; + std::vector> targetVecData; + targetVecData.reserve((pencils.sumOfLengths + 2 * pencils.N) * WID3 / VECL); // Initialize targetvecdata to 0 for( uint i = 0; i < (pencils.sumOfLengths + 2 * pencils.N) * WID3 / VECL; i++ ) { @@ -1132,7 +1135,8 @@ bool trans_map_1d_amr(const dccrg::Dccrg& computeSpatialSourceCellsForPencil(mpiGrid, pencils, pencili, dimension, sourceCells.data()); // dz is the cell size in the direction of the pencil - Vec dz[sourceCells.size()]; + std::vector> dz; + dz.reserve(sourceLength); uint i = 0; for(auto cell: sourceCells) { switch (dimension) { @@ -1152,15 +1156,16 @@ bool trans_map_1d_amr(const dccrg::Dccrg& // Allocate source data: sourcedata> sourceVecData; + sourceVecData.reserve(sourceLength * WID3 / VECL); // load data(=> sourcedata) / (proper xy reconstruction in future) - copy_trans_block_data_amr(sourceCells.data(), blockGID, L, sourceVecData, + copy_trans_block_data_amr(sourceCells.data(), blockGID, L, sourceVecData.data(), cellid_transpose, popID); // Dz and sourceVecData are both padded by VLASOV_STENCIL_WIDTH // Dz has 1 value/cell, sourceVecData has WID3 values/cell - propagatePencil(dz, sourceVecData, dimension, blockGID, dt, vmesh, L); + propagatePencil(dz.data(), sourceVecData.data(), dimension, blockGID, dt, vmesh, L); if (printTargets) std::cout << "Target cells for pencil " << pencili << ", rank " << myRank << ": "; // sourcedata => targetdata[this pencil]) @@ -1231,7 +1236,7 @@ bool trans_map_1d_amr(const dccrg::Dccrg& // Loop over cells in pencil +- 1 padded cell for ( uint celli = 0; celli < targetLength; ++celli ) { - Realv vector[VECL]; + Realf vector[VECL]; // Loop over 1st vspace dimension for (uint k = 0; k < WID; ++k) { // Loop over 2nd vspace dimension From a7f6da691e828a47e10c11dd0a5a753c7ccf8c3d Mon Sep 17 00:00:00 2001 From: ykempf Date: Tue, 21 May 2019 14:59:26 +0300 Subject: [PATCH 474/602] Added AMR support to Fluctuations. --- projects/Fluctuations/Fluctuations.cpp | 80 ++++++++++++++++++++++++++ projects/Fluctuations/Fluctuations.h | 1 + 2 files changed, 81 insertions(+) diff --git a/projects/Fluctuations/Fluctuations.cpp b/projects/Fluctuations/Fluctuations.cpp index b891aa382..7cc5dab64 100644 --- a/projects/Fluctuations/Fluctuations.cpp +++ b/projects/Fluctuations/Fluctuations.cpp @@ -212,5 +212,85 @@ namespace projects { centerPoints.push_back(V0); return centerPoints; } + + bool Fluctuations::refineSpatialCells( dccrg::Dccrg& mpiGrid ) const { + + int myRank; + MPI_Comm_rank(MPI_COMM_WORLD,&myRank); + + if(myRank == MASTER_RANK) std::cout << "Maximum refinement level is " << mpiGrid.mapping.get_maximum_refinement_level() << +std::endl; + + std::vector refineSuccess; + + for (int i = 0; i < 2 * P::amrBoxHalfWidthX; ++i) { + for (int j = 0; j < 2 * P::amrBoxHalfWidthY; ++j) { + for (int k = 0; k < 2 * P::amrBoxHalfWidthZ; ++k) { + + std::array xyz; + xyz[0] = P::amrBoxCenterX + (0.5 + i - P::amrBoxHalfWidthX) * P::dx_ini; + xyz[1] = P::amrBoxCenterY + (0.5 + j - P::amrBoxHalfWidthY) * P::dy_ini; + xyz[2] = P::amrBoxCenterZ + (0.5 + k - P::amrBoxHalfWidthZ) * P::dz_ini; + + CellID myCell = mpiGrid.get_existing_cell(xyz); + if (mpiGrid.refine_completely_at(xyz)) { +#ifndef NDEBUG + std::cout << "Rank " << myRank << " is refining cell " << myCell << std::endl; +#endif + } + } + } + } + std::vector refinedCells = mpiGrid.stop_refining(true); + if(myRank == MASTER_RANK) std::cout << "Finished first level of refinement" << std::endl; +#ifndef NDEBUG + if(refinedCells.size() > 0) { + std::cout << "Refined cells produced by rank " << myRank << " are: "; + for (auto cellid : refinedCells) { + std::cout << cellid << " "; + } + std::cout << std::endl; + } +#endif + + mpiGrid.balance_load(); + + if(mpiGrid.get_maximum_refinement_level() > 1) { + + for (int i = 0; i < 2 * P::amrBoxHalfWidthX; ++i) { + for (int j = 0; j < 2 * P::amrBoxHalfWidthY; ++j) { + for (int k = 0; k < 2 * P::amrBoxHalfWidthZ; ++k) { + + std::array xyz; + xyz[0] = P::amrBoxCenterX + 0.5 * (0.5 + i - P::amrBoxHalfWidthX) * P::dx_ini; + xyz[1] = P::amrBoxCenterY + 0.5 * (0.5 + j - P::amrBoxHalfWidthY) * P::dy_ini; + xyz[2] = P::amrBoxCenterZ + 0.5 * (0.5 + k - P::amrBoxHalfWidthZ) * P::dz_ini; + + CellID myCell = mpiGrid.get_existing_cell(xyz); + if (mpiGrid.refine_completely_at(xyz)) { +#ifndef NDEBUG + std::cout << "Rank " << myRank << " is refining cell " << myCell << std::endl; +#endif + } + } + } + } + + std::vector refinedCells = mpiGrid.stop_refining(true); + if(myRank == MASTER_RANK) std::cout << "Finished second level of refinement" << std::endl; +#ifndef NDEBUG + if(refinedCells.size() > 0) { + std::cout << "Refined cells produced by rank " << myRank << " are: "; + for (auto cellid : refinedCells) { + std::cout << cellid << " "; + } + std::cout << std::endl; + } +#endif + mpiGrid.balance_load(); + } + + return true; + } } // namespace projects diff --git a/projects/Fluctuations/Fluctuations.h b/projects/Fluctuations/Fluctuations.h index 26c64a3cd..5fd023ae4 100644 --- a/projects/Fluctuations/Fluctuations.h +++ b/projects/Fluctuations/Fluctuations.h @@ -68,6 +68,7 @@ namespace projects { creal& vx, creal& vy, creal& vz, creal& dvx, creal& dvy, creal& dvz,const uint popID ) const; + bool refineSpatialCells( dccrg::Dccrg& mpiGrid ) const; Real BX0; Real BY0; From 80c9fa8155025717ff8bdb11d0ebce409e0d2ea0 Mon Sep 17 00:00:00 2001 From: ykempf Date: Tue, 21 May 2019 15:12:13 +0300 Subject: [PATCH 475/602] Moved Fluctuations/Flowthrough refineSpatialCells to projects.cpp. --- projects/Flowthrough/Flowthrough.cpp | 79 ------------------------ projects/Flowthrough/Flowthrough.h | 1 - projects/Fluctuations/Fluctuations.cpp | 80 ------------------------ projects/Fluctuations/Fluctuations.h | 1 - projects/project.cpp | 85 ++++++++++++++++++++++++-- 5 files changed, 79 insertions(+), 167 deletions(-) diff --git a/projects/Flowthrough/Flowthrough.cpp b/projects/Flowthrough/Flowthrough.cpp index a1541ca0d..9deb584d8 100644 --- a/projects/Flowthrough/Flowthrough.cpp +++ b/projects/Flowthrough/Flowthrough.cpp @@ -227,83 +227,4 @@ namespace projects { return centerPoints; } - bool Flowthrough::refineSpatialCells( dccrg::Dccrg& mpiGrid ) const { - - int myRank; - MPI_Comm_rank(MPI_COMM_WORLD,&myRank); - - if(myRank == MASTER_RANK) std::cout << "Maximum refinement level is " << mpiGrid.mapping.get_maximum_refinement_level() << std::endl; - - std::vector refineSuccess; - - for (int i = 0; i < 2 * P::amrBoxHalfWidthX; ++i) { - for (int j = 0; j < 2 * P::amrBoxHalfWidthY; ++j) { - for (int k = 0; k < 2 * P::amrBoxHalfWidthZ; ++k) { - - std::array xyz; - xyz[0] = P::amrBoxCenterX + (0.5 + i - P::amrBoxHalfWidthX) * P::dx_ini; - xyz[1] = P::amrBoxCenterY + (0.5 + j - P::amrBoxHalfWidthY) * P::dy_ini; - xyz[2] = P::amrBoxCenterZ + (0.5 + k - P::amrBoxHalfWidthZ) * P::dz_ini; - - CellID myCell = mpiGrid.get_existing_cell(xyz); - if (mpiGrid.refine_completely_at(xyz)) { -#ifndef NDEBUG - std::cout << "Rank " << myRank << " is refining cell " << myCell << std::endl; -#endif - } - } - } - } - std::vector refinedCells = mpiGrid.stop_refining(true); - if(myRank == MASTER_RANK) std::cout << "Finished first level of refinement" << endl; -#ifndef NDEBUG - if(refinedCells.size() > 0) { - std::cout << "Refined cells produced by rank " << myRank << " are: "; - for (auto cellid : refinedCells) { - std::cout << cellid << " "; - } - std::cout << endl; - } -#endif - - mpiGrid.balance_load(); - - if(mpiGrid.get_maximum_refinement_level() > 1) { - - for (int i = 0; i < 2 * P::amrBoxHalfWidthX; ++i) { - for (int j = 0; j < 2 * P::amrBoxHalfWidthY; ++j) { - for (int k = 0; k < 2 * P::amrBoxHalfWidthZ; ++k) { - - std::array xyz; - xyz[0] = P::amrBoxCenterX + 0.5 * (0.5 + i - P::amrBoxHalfWidthX) * P::dx_ini; - xyz[1] = P::amrBoxCenterY + 0.5 * (0.5 + j - P::amrBoxHalfWidthY) * P::dy_ini; - xyz[2] = P::amrBoxCenterZ + 0.5 * (0.5 + k - P::amrBoxHalfWidthZ) * P::dz_ini; - - CellID myCell = mpiGrid.get_existing_cell(xyz); - if (mpiGrid.refine_completely_at(xyz)) { -#ifndef NDEBUG - std::cout << "Rank " << myRank << " is refining cell " << myCell << std::endl; -#endif - } - } - } - } - - std::vector refinedCells = mpiGrid.stop_refining(true); - if(myRank == MASTER_RANK) std::cout << "Finished second level of refinement" << endl; -#ifndef NDEBUG - if(refinedCells.size() > 0) { - std::cout << "Refined cells produced by rank " << myRank << " are: "; - for (auto cellid : refinedCells) { - std::cout << cellid << " "; - } - std::cout << endl; - } -#endif - mpiGrid.balance_load(); - } - - return true; - } - } //namespace projects diff --git a/projects/Flowthrough/Flowthrough.h b/projects/Flowthrough/Flowthrough.h index e58976a55..99ed4d73a 100644 --- a/projects/Flowthrough/Flowthrough.h +++ b/projects/Flowthrough/Flowthrough.h @@ -60,7 +60,6 @@ namespace projects { creal& dvx, creal& dvy, creal& dvz, const uint popID ) const; - bool refineSpatialCells( dccrg::Dccrg& mpiGrid ) const; virtual void calcCellParameters(spatial_cell::SpatialCell* cell,creal& t); virtual Real calcPhaseSpaceDensity( creal& x, creal& y, creal& z, diff --git a/projects/Fluctuations/Fluctuations.cpp b/projects/Fluctuations/Fluctuations.cpp index 7cc5dab64..b891aa382 100644 --- a/projects/Fluctuations/Fluctuations.cpp +++ b/projects/Fluctuations/Fluctuations.cpp @@ -212,85 +212,5 @@ namespace projects { centerPoints.push_back(V0); return centerPoints; } - - bool Fluctuations::refineSpatialCells( dccrg::Dccrg& mpiGrid ) const { - - int myRank; - MPI_Comm_rank(MPI_COMM_WORLD,&myRank); - - if(myRank == MASTER_RANK) std::cout << "Maximum refinement level is " << mpiGrid.mapping.get_maximum_refinement_level() << -std::endl; - - std::vector refineSuccess; - - for (int i = 0; i < 2 * P::amrBoxHalfWidthX; ++i) { - for (int j = 0; j < 2 * P::amrBoxHalfWidthY; ++j) { - for (int k = 0; k < 2 * P::amrBoxHalfWidthZ; ++k) { - - std::array xyz; - xyz[0] = P::amrBoxCenterX + (0.5 + i - P::amrBoxHalfWidthX) * P::dx_ini; - xyz[1] = P::amrBoxCenterY + (0.5 + j - P::amrBoxHalfWidthY) * P::dy_ini; - xyz[2] = P::amrBoxCenterZ + (0.5 + k - P::amrBoxHalfWidthZ) * P::dz_ini; - - CellID myCell = mpiGrid.get_existing_cell(xyz); - if (mpiGrid.refine_completely_at(xyz)) { -#ifndef NDEBUG - std::cout << "Rank " << myRank << " is refining cell " << myCell << std::endl; -#endif - } - } - } - } - std::vector refinedCells = mpiGrid.stop_refining(true); - if(myRank == MASTER_RANK) std::cout << "Finished first level of refinement" << std::endl; -#ifndef NDEBUG - if(refinedCells.size() > 0) { - std::cout << "Refined cells produced by rank " << myRank << " are: "; - for (auto cellid : refinedCells) { - std::cout << cellid << " "; - } - std::cout << std::endl; - } -#endif - - mpiGrid.balance_load(); - - if(mpiGrid.get_maximum_refinement_level() > 1) { - - for (int i = 0; i < 2 * P::amrBoxHalfWidthX; ++i) { - for (int j = 0; j < 2 * P::amrBoxHalfWidthY; ++j) { - for (int k = 0; k < 2 * P::amrBoxHalfWidthZ; ++k) { - - std::array xyz; - xyz[0] = P::amrBoxCenterX + 0.5 * (0.5 + i - P::amrBoxHalfWidthX) * P::dx_ini; - xyz[1] = P::amrBoxCenterY + 0.5 * (0.5 + j - P::amrBoxHalfWidthY) * P::dy_ini; - xyz[2] = P::amrBoxCenterZ + 0.5 * (0.5 + k - P::amrBoxHalfWidthZ) * P::dz_ini; - - CellID myCell = mpiGrid.get_existing_cell(xyz); - if (mpiGrid.refine_completely_at(xyz)) { -#ifndef NDEBUG - std::cout << "Rank " << myRank << " is refining cell " << myCell << std::endl; -#endif - } - } - } - } - - std::vector refinedCells = mpiGrid.stop_refining(true); - if(myRank == MASTER_RANK) std::cout << "Finished second level of refinement" << std::endl; -#ifndef NDEBUG - if(refinedCells.size() > 0) { - std::cout << "Refined cells produced by rank " << myRank << " are: "; - for (auto cellid : refinedCells) { - std::cout << cellid << " "; - } - std::cout << std::endl; - } -#endif - mpiGrid.balance_load(); - } - - return true; - } } // namespace projects diff --git a/projects/Fluctuations/Fluctuations.h b/projects/Fluctuations/Fluctuations.h index 5fd023ae4..26c64a3cd 100644 --- a/projects/Fluctuations/Fluctuations.h +++ b/projects/Fluctuations/Fluctuations.h @@ -68,7 +68,6 @@ namespace projects { creal& vx, creal& vy, creal& vz, creal& dvx, creal& dvy, creal& dvz,const uint popID ) const; - bool refineSpatialCells( dccrg::Dccrg& mpiGrid ) const; Real BX0; Real BY0; diff --git a/projects/project.cpp b/projects/project.cpp index dc14e1b0e..150a59fb6 100644 --- a/projects/project.cpp +++ b/projects/project.cpp @@ -515,13 +515,86 @@ namespace projects { Base class function prints a warning and does nothing. */ bool Project::refineSpatialCells( dccrg::Dccrg& mpiGrid ) const { - int rank; - MPI_Comm_rank(MPI_COMM_WORLD,&rank); - if (rank == MASTER_RANK) { - cerr << "(Project.cpp) Base class 'refineSpatialCells' in " << __FILE__ << ":" << __LINE__ << " called. This function does nothing." << endl; + int myRank; + MPI_Comm_rank(MPI_COMM_WORLD,&myRank); + if (myRank == MASTER_RANK) { + cerr << "(Project.cpp) Base class 'refineSpatialCells' in " << __FILE__ << ":" << __LINE__ << " called. Make sure that this is correct." << endl; } - - return false; + + MPI_Comm_rank(MPI_COMM_WORLD,&myRank); + + if(myRank == MASTER_RANK) std::cout << "Maximum refinement level is " << mpiGrid.mapping.get_maximum_refinement_level() << std::endl; + + std::vector refineSuccess; + + for (int i = 0; i < 2 * P::amrBoxHalfWidthX; ++i) { + for (int j = 0; j < 2 * P::amrBoxHalfWidthY; ++j) { + for (int k = 0; k < 2 * P::amrBoxHalfWidthZ; ++k) { + + std::array xyz; + xyz[0] = P::amrBoxCenterX + (0.5 + i - P::amrBoxHalfWidthX) * P::dx_ini; + xyz[1] = P::amrBoxCenterY + (0.5 + j - P::amrBoxHalfWidthY) * P::dy_ini; + xyz[2] = P::amrBoxCenterZ + (0.5 + k - P::amrBoxHalfWidthZ) * P::dz_ini; + + CellID myCell = mpiGrid.get_existing_cell(xyz); + if (mpiGrid.refine_completely_at(xyz)) { + #ifndef NDEBUG + std::cout << "Rank " << myRank << " is refining cell " << myCell << std::endl; + #endif + } + } + } + } + std::vector refinedCells = mpiGrid.stop_refining(true); + if(myRank == MASTER_RANK) std::cout << "Finished first level of refinement" << endl; + #ifndef NDEBUG + if(refinedCells.size() > 0) { + std::cout << "Refined cells produced by rank " << myRank << " are: "; + for (auto cellid : refinedCells) { + std::cout << cellid << " "; + } + std::cout << endl; + } + #endif + + mpiGrid.balance_load(); + + if(mpiGrid.get_maximum_refinement_level() > 1) { + + for (int i = 0; i < 2 * P::amrBoxHalfWidthX; ++i) { + for (int j = 0; j < 2 * P::amrBoxHalfWidthY; ++j) { + for (int k = 0; k < 2 * P::amrBoxHalfWidthZ; ++k) { + + std::array xyz; + xyz[0] = P::amrBoxCenterX + 0.5 * (0.5 + i - P::amrBoxHalfWidthX) * P::dx_ini; + xyz[1] = P::amrBoxCenterY + 0.5 * (0.5 + j - P::amrBoxHalfWidthY) * P::dy_ini; + xyz[2] = P::amrBoxCenterZ + 0.5 * (0.5 + k - P::amrBoxHalfWidthZ) * P::dz_ini; + + CellID myCell = mpiGrid.get_existing_cell(xyz); + if (mpiGrid.refine_completely_at(xyz)) { + #ifndef NDEBUG + std::cout << "Rank " << myRank << " is refining cell " << myCell << std::endl; + #endif + } + } + } + } + + std::vector refinedCells = mpiGrid.stop_refining(true); + if(myRank == MASTER_RANK) std::cout << "Finished second level of refinement" << endl; + #ifndef NDEBUG + if(refinedCells.size() > 0) { + std::cout << "Refined cells produced by rank " << myRank << " are: "; + for (auto cellid : refinedCells) { + std::cout << cellid << " "; + } + std::cout << endl; + } + #endif + mpiGrid.balance_load(); + } + + return true; } Project* createProject() { From 0a2e7bdd0c1ed8ef6429e2926a2e11c0b9621ef0 Mon Sep 17 00:00:00 2001 From: Markus Battarbee Date: Tue, 21 May 2019 15:37:29 +0300 Subject: [PATCH 476/602] replaced backstreaming with nonthermal and nonbackstreaming with thermal. Consolidated output variable names, made them all lowercase. --- datareduction/datareducer.cpp | 264 ++++++++--------- datareduction/datareductionoperator.cpp | 367 ++++++++++++------------ datareduction/datareductionoperator.h | 56 ++-- object_wrapper.cpp | 20 +- particle_species.h | 4 +- 5 files changed, 357 insertions(+), 354 deletions(-) diff --git a/datareduction/datareducer.cpp b/datareduction/datareducer.cpp index afc862123..92b1ba119 100644 --- a/datareduction/datareducer.cpp +++ b/datareduction/datareducer.cpp @@ -26,6 +26,7 @@ #include "datareducer.h" #include "../common.h" #include "dro_populations.h" +#include using namespace std; void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosticReducer) @@ -42,8 +43,11 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti unit, unit in LaTeX formulation, variable in LaTeX formulation, conversion factor */ - if(*it == "fg_B" || *it == "B") { // Bulk magnetic field at Yee-Lattice locations - outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fg_B",[]( + // Sidestep mixed case errors + const std::string lowercase = boost::algorithm::to_lower_copy(*it); + + if(lowercase == "fg_b" || lowercase == "b") { // Bulk magnetic field at Yee-Lattice locations + outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fg_b",[]( FsGrid< std::array, 2>& perBGrid, FsGrid< std::array, 2>& EGrid, FsGrid< std::array, 2>& EHallGrid, @@ -77,8 +81,8 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti outputReducer->addMetadata(outputReducer->size()-1,"T","$\\mathrm{T}$","$B$","1.0"); continue; } - if(*it == "fg_BackgroundB" || *it == "BackgroundB") { // Static (typically dipole) magnetic field part - outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fg_background_B",[]( + if(lowercase == "fg_backgroundb" || lowercase == "backgroundb" || lowercase == "fg_b_background") { // Static (typically dipole) magnetic field part + outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fg_b_background",[]( FsGrid< std::array, 2>& perBGrid, FsGrid< std::array, 2>& EGrid, FsGrid< std::array, 2>& EHallGrid, @@ -109,8 +113,8 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti outputReducer->addMetadata(outputReducer->size()-1,"T","$\\mathrm{T}$","$B_\\mathrm{bg}$","1.0"); continue; } - if(*it == "fg_PerturbedB" || *it == "PerturbedB") { // Fluctuating magnetic field part - outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fg_perturbed_B",[]( + if(lowercase == "fg_perturbedb" || lowercase == "perturbedb" || lowercase == "fg_b_perturbed") { // Fluctuating magnetic field part + outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fg_b_perturbed",[]( FsGrid< std::array, 2>& perBGrid, FsGrid< std::array, 2>& EGrid, FsGrid< std::array, 2>& EHallGrid, @@ -141,8 +145,8 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti outputReducer->addMetadata(outputReducer->size()-1,"T","$\\mathrm{T}$","$B_\\mathrm{per}$)","1.0"); continue; } - if(*it == "fg_E" || *it== "E") { // Bulk electric field at Yee-lattice locations - outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fg_E",[]( + if(lowercase == "fg_e" || lowercase == "e") { // Bulk electric field at Yee-lattice locations + outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fg_e",[]( FsGrid< std::array, 2>& perBGrid, FsGrid< std::array, 2>& EGrid, FsGrid< std::array, 2>& EHallGrid, @@ -173,12 +177,12 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti outputReducer->addMetadata(outputReducer->size()-1,"V/m","$\\mathrm{V}\\,\\mathrm{m}^{-1}$","$E$","1.0"); continue; } - if(*it == "vg_Rhom" || *it == "Rhom") { // Overall mass density (summed over all populations) - outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("rhom",CellParams::RHOM,1)); + if(lowercase == "vg_rhom" || lowercase == "rhom") { // Overall mass density (summed over all populations) + outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("vg_rhom",CellParams::RHOM,1)); outputReducer->addMetadata(outputReducer->size()-1,"kg/m^3","$\\mathrm{kg}\\,\\mathrm{m}^{-3}$","$\\rho_\\mathrm{m}$","1.0"); continue; } - if(*it == "fg_Rhom") { // Overall mass density (summed over all populations) + if(lowercase == "fg_rhom") { // Overall mass density (summed over all populations) outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fg_rhom",[]( FsGrid< std::array, 2>& perBGrid, FsGrid< std::array, 2>& EGrid, @@ -208,12 +212,12 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti outputReducer->addMetadata(outputReducer->size()-1,"kg/m^3","$\\mathrm{kg}\\,\\mathrm{m}^{-3}$","$\\rho_\\mathrm{m}$","1.0"); continue; } - if(*it == "vg_Rhoq" || *it == "Rhoq") { // Overall charge density (summed over all populations) - outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("rhoq",CellParams::RHOQ,1)); + if(lowercase == "vg_rhoq" || lowercase == "rhoq") { // Overall charge density (summed over all populations) + outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("vg_rhoq",CellParams::RHOQ,1)); outputReducer->addMetadata(outputReducer->size()-1,"C/m^3","$\\mathrm{C}\\,\\mathrm{m}^{-3}$","$\\rho_\\mathrm{q}$","1.0"); continue; } - if(*it == "fg_Rhoq") { // Overall charge density (summed over all populations) + if(lowercase == "fg_rhoq") { // Overall charge density (summed over all populations) outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fg_rhoq",[]( FsGrid< std::array, 2>& perBGrid, FsGrid< std::array, 2>& EGrid, @@ -243,23 +247,23 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti outputReducer->addMetadata(outputReducer->size()-1,"C/m^3","$\\mathrm{C}\\,\\mathrm{m}^{-3}$","$\\rho_\\mathrm{q}$","1.0"); continue; } - if(*it == "populations_Rho") { // Per-population particle number density + if(lowercase == "populations_rho" || lowercase == "populations_vg_rho") { // Per-population particle number density for(unsigned int i =0; i < getObjectWrapper().particleSpecies.size(); i++) { species::Species& species=getObjectWrapper().particleSpecies[i]; const std::string& pop = species.name; - outputReducer->addOperator(new DRO::DataReductionOperatorPopulations(pop + "/rho", i, offsetof(spatial_cell::Population, RHO), 1)); + outputReducer->addOperator(new DRO::DataReductionOperatorPopulations(pop + "/vg_rho", i, offsetof(spatial_cell::Population, RHO), 1)); outputReducer->addMetadata(outputReducer->size()-1,"1/m^3","$\\mathrm{m}^{-3}$","$\\n_\\mathrm{"+pop+"}$","1.0"); } continue; } - if(*it == "V" || *it == "vg_V") { // Overall effective bulk density defining the center-of-mass frame from all populations - outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("V",CellParams::VX,3)); + if(lowercase == "v" || lowercase == "vg_v") { // Overall effective bulk density defining the center-of-mass frame from all populations + outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("vg_v",CellParams::VX,3)); outputReducer->addMetadata(outputReducer->size()-1,"m/s","$\\mathrm{m}\\,\\mathrm{s}^{-1}$","$V$","1.0"); continue; } - if(*it == "fg_V") { // Overall effective bulk density defining the center-of-mass frame from all populations - outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fg_V",[]( + if(lowercase == "fg_v") { // Overall effective bulk density defining the center-of-mass frame from all populations + outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fg_v",[]( FsGrid< std::array, 2>& perBGrid, FsGrid< std::array, 2>& EGrid, FsGrid< std::array, 2>& EHallGrid, @@ -290,38 +294,38 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti outputReducer->addMetadata(outputReducer->size()-1,"m/s","$\\mathrm{m}\\,\\mathrm{s}^{-1}$","$V$","1.0"); continue; } - if(*it == "populations_V") { // Per population bulk velocities + if(lowercase == "populations_v" || lowercase == "populations_vg_v") { // Per population bulk velocities for(unsigned int i =0; i < getObjectWrapper().particleSpecies.size(); i++) { species::Species& species=getObjectWrapper().particleSpecies[i]; const std::string& pop = species.name; - outputReducer->addOperator(new DRO::DataReductionOperatorPopulations(pop + "/V", i, offsetof(spatial_cell::Population, V), 3)); + outputReducer->addOperator(new DRO::DataReductionOperatorPopulations(pop + "/vg_v", i, offsetof(spatial_cell::Population, V), 3)); outputReducer->addMetadata(outputReducer->size()-1,"m/s","$\\mathrm{m}\\,\\mathrm{s}^{-1}$","$V_\\mathrm{"+pop+"}$","1.0"); } continue; } - if(*it == "populations_moments_Backstream") { // Per-population moments of the backstreaming part + if(lowercase == "populations_moments_backstream" || lowercase == "populations_moments_nonthermal" || lowercase == "populations_vg_moments_nonthermal") { // Per-population moments of the backstreaming part for(unsigned int i =0; i < getObjectWrapper().particleSpecies.size(); i++) { species::Species& species=getObjectWrapper().particleSpecies[i]; const std::string& pop = species.name; - outputReducer->addOperator(new DRO::VariableRhoBackstream(i)); - outputReducer->addOperator(new DRO::VariableVBackstream(i)); - outputReducer->addOperator(new DRO::VariablePTensorBackstreamDiagonal(i)); - outputReducer->addOperator(new DRO::VariablePTensorBackstreamOffDiagonal(i)); - outputReducer->addMetadata(outputReducer->size()-4,"1/m^3","$\\mathrm{m}^{-3}$","$n_\\mathrm{"+pop+",st}$","1.0"); - outputReducer->addMetadata(outputReducer->size()-3,"m/s","$\\mathrm{m}\\,\\mathrm{s}^{-1}$","$V_\\mathrm{"+pop+",st}$","1.0"); - outputReducer->addMetadata(outputReducer->size()-2,"Pa","$\\mathrm{Pa}$","$\\mathcal{P}_\\mathrm{"+pop+",st}$","1.0"); - outputReducer->addMetadata(outputReducer->size()-1,"Pa","$\\mathrm{Pa}$","$\\mathcal{\\tilde{P}}_\\mathrm{"+pop+",st}$","1.0"); + outputReducer->addOperator(new DRO::VariableRhoNonthermal(i)); + outputReducer->addOperator(new DRO::VariableVNonthermal(i)); + outputReducer->addOperator(new DRO::VariablePTensorNonthermalDiagonal(i)); + outputReducer->addOperator(new DRO::VariablePTensorNonthermalOffDiagonal(i)); + outputReducer->addMetadata(outputReducer->size()-4,"1/m^3","$\\mathrm{m}^{-3}$","$n_\\mathrm{"+pop+",nt}$","1.0"); + outputReducer->addMetadata(outputReducer->size()-3,"m/s","$\\mathrm{m}\\,\\mathrm{s}^{-1}$","$V_\\mathrm{"+pop+",nt}$","1.0"); + outputReducer->addMetadata(outputReducer->size()-2,"Pa","$\\mathrm{Pa}$","$\\mathcal{P}_\\mathrm{"+pop+",nt}$","1.0"); + outputReducer->addMetadata(outputReducer->size()-1,"Pa","$\\mathrm{Pa}$","$\\mathcal{\\tilde{P}}_\\mathrm{"+pop+",nt}$","1.0"); } continue; } - if(*it == "populations_moments_NonBackstream") { // Per-population moments of the non-backstreaming (thermal?) part. + if(lowercase == "populations_moments_nonbackstream" || lowercase == "populations_moments_thermal" || lowercase == "populations_vg_moments_thermal") { // Per-population moments of the non-backstreaming (thermal?) part. for(unsigned int i =0; i < getObjectWrapper().particleSpecies.size(); i++) { species::Species& species=getObjectWrapper().particleSpecies[i]; const std::string& pop = species.name; - outputReducer->addOperator(new DRO::VariableRhoNonBackstream(i)); - outputReducer->addOperator(new DRO::VariableVNonBackstream(i)); - outputReducer->addOperator(new DRO::VariablePTensorNonBackstreamDiagonal(i)); - outputReducer->addOperator(new DRO::VariablePTensorNonBackstreamOffDiagonal(i)); + outputReducer->addOperator(new DRO::VariableRhoThermal(i)); + outputReducer->addOperator(new DRO::VariableVThermal(i)); + outputReducer->addOperator(new DRO::VariablePTensorThermalDiagonal(i)); + outputReducer->addOperator(new DRO::VariablePTensorThermalOffDiagonal(i)); outputReducer->addMetadata(outputReducer->size()-4,"1/m^3","$\\mathrm{m}^{-3}$","$n_\\mathrm{"+pop+",th}$","1.0"); outputReducer->addMetadata(outputReducer->size()-3,"m/s","$\\mathrm{m}\\,\\mathrm{s}^{-1}$","$V_\\mathrm{"+pop+",th}$","1.0"); outputReducer->addMetadata(outputReducer->size()-2,"Pa","$\\mathrm{Pa}$","$\\mathcal{P}_\\mathrm{"+pop+",th}$","1.0"); @@ -329,7 +333,7 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti } continue; } - if(*it == "populations_MinValue" || *it == "populations_EffectiveSparsityThreshold") { + if(lowercase == "populations_minvalue" || lowercase == "populations_effectivesparsitythreshold" || lowercase == "populations_vg_effectivesparsitythreshold") { // Effective sparsity threshold affecting each cell, if dynamic threshould algorithm is used for(unsigned int i =0; i < getObjectWrapper().particleSpecies.size(); i++) { species::Species& species=getObjectWrapper().particleSpecies[i]; @@ -339,55 +343,55 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti } continue; } - if(*it == "populations_RhoLossAdjust") { + if(lowercase == "populations_rholossadjust" || lowercase == "populations_rho_loss_adjust" || lowercase == "populations_vg_rho_loss_adjust") { // Accumulated lost particle number, per population, in each cell, since last restart for(unsigned int i =0; i < getObjectWrapper().particleSpecies.size(); i++) { species::Species& species=getObjectWrapper().particleSpecies[i]; const std::string& pop = species.name; - outputReducer->addOperator(new DRO::DataReductionOperatorPopulations(pop + "/rho_loss_adjust", i, offsetof(spatial_cell::Population, RHOLOSSADJUST), 1)); + outputReducer->addOperator(new DRO::DataReductionOperatorPopulations(pop + "/vg_rho_loss_adjust", i, offsetof(spatial_cell::Population, RHOLOSSADJUST), 1)); outputReducer->addMetadata(outputReducer->size()-1,"1/m^3","$\\mathrm{m}^{-3}$","$\\Delta_\\mathrm{loss} n_\\mathrm{"+pop+"}$","1.0"); } continue; } - if(*it == "LBweight" || *it == "vg_LBweight") { + if(lowercase == "lbweight" || lowercase == "vg_lbweight" || lowercase == "vg_loadbalanceweight" || lowercase == "vg_loadbalance_weight") { // Load balance metric for LB debugging - outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("LB_weight",CellParams::LBWEIGHTCOUNTER,1)); + outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("vg_loadbalance_weight",CellParams::LBWEIGHTCOUNTER,1)); outputReducer->addMetadata(outputReducer->size()-1,"","","$\\mathrm{LB weight}$",""); continue; } - if(*it == "MaxVdt") { + if(lowercase == "maxvdt" || lowercase == "vg_maxdt_acceleration") { // Overall maximum timestep constraint as calculated by the velocity space vlasov update - outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("max_v_dt",CellParams::MAXVDT,1)); + outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("vg_maxdt_acceleration",CellParams::MAXVDT,1)); outputReducer->addMetadata(outputReducer->size()-1,"s","$\\mathrm{s}$","$\\Delta t_\\mathrm{V,max}$","1.0"); continue; } - if(*it == "populations_MaxVdt") { + if(lowercase == "populations_maxvdt" || lowercase == "populations_vg_maxdt_acceleration" || lowercase == "populations_maxdt_acceleration") { // Per-population maximum timestep constraint as calculated by the velocity space vlasov update for(unsigned int i =0; i < getObjectWrapper().particleSpecies.size(); i++) { species::Species& species=getObjectWrapper().particleSpecies[i]; const std::string& pop = species.name; - outputReducer->addOperator(new DRO::DataReductionOperatorPopulations(pop + "/MaxVdt", i, offsetof(spatial_cell::Population, max_dt[1]), 1)); + outputReducer->addOperator(new DRO::DataReductionOperatorPopulations(pop + "/vg_maxdt_acceleration", i, offsetof(spatial_cell::Population, max_dt[1]), 1)); outputReducer->addMetadata(outputReducer->size()-1,"s","$\\mathrm{s}$","$\\Delta t_\\mathrm{"+pop+",V,max}$","1.0"); } continue; } - if(*it == "MaxRdt") { + if(lowercase == "maxrdt") { // Overall maximum timestep constraint as calculated by the real space vlasov update - outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("max_r_dt",CellParams::MAXRDT,1)); + outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("vg_maxdt_translation",CellParams::MAXRDT,1)); outputReducer->addMetadata(outputReducer->size()-1,"s","$\\mathrm{s}$","$\\Delta t_\\mathrm{R,max}$","1.0"); continue; } - if(*it == "populations_MaxRdt") { + if(lowercase == "populations_maxrdt" || lowercase == "populations_vg_maxdt_translation" || lowercase == "populations_maxdt_translation") { // Per-population maximum timestep constraint as calculated by the real space vlasov update for(unsigned int i =0; i < getObjectWrapper().particleSpecies.size(); i++) { species::Species& species=getObjectWrapper().particleSpecies[i]; const std::string& pop = species.name; - outputReducer->addOperator(new DRO::DataReductionOperatorPopulations(pop + "/MaxRdt", i, offsetof(spatial_cell::Population, max_dt[0]), 1)); + outputReducer->addOperator(new DRO::DataReductionOperatorPopulations(pop + "/vg_maxdt_translation", i, offsetof(spatial_cell::Population, max_dt[0]), 1)); outputReducer->addMetadata(outputReducer->size()-1,"s","$\\mathrm{s}$","$\\Delta t_\\mathrm{"+pop+",R,max}$","1.0"); } continue; } - if(*it == "populations_EnergyDensity") { + if(lowercase == "populations_energydensity" || lowercase == "populations_vg_energydensity") { // Per-population energy density in three energy ranges for(unsigned int i =0; i < getObjectWrapper().particleSpecies.size(); i++) { species::Species& species=getObjectWrapper().particleSpecies[i]; @@ -399,7 +403,7 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti } continue; } - if(*it == "populations_PrecipitationFlux") { + if(lowercase == "populations_precipitationflux" || lowercase == "populations_vg_precipitationdifferentialflux" || lowercase == "populations_precipitationdifferentialflux") { // Per-population precipitation differential flux for(unsigned int i =0; i < getObjectWrapper().particleSpecies.size(); i++) { species::Species& species=getObjectWrapper().particleSpecies[i]; @@ -411,9 +415,9 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti } continue; } - if(*it == "MaxFieldsdt" || *it == "fg_MaxFieldsdt") { + if(lowercase == "maxfieldsdt" || lowercase == "fg_maxfieldsdt") { // Maximum timestep constraint as calculated by the fieldsolver - outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("MaxFieldsdt",[]( + outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fg_maxdt_fieldsolver",[]( FsGrid< std::array, 2>& perBGrid, FsGrid< std::array, 2>& EGrid, FsGrid< std::array, 2>& EHallGrid, @@ -442,15 +446,15 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti outputReducer->addMetadata(outputReducer->size()-1,"s","$\\mathrm{s}$","$\\Delta t_\\mathrm{f,max}$","1.0"); continue; } - if(*it == "MPIrank" || *it == "vg_rank") { + if(lowercase == "mpirank" || lowercase == "vg_rank") { // Map of spatial decomposition of the DCCRG grid into MPI ranks outputReducer->addOperator(new DRO::MPIrank); outputReducer->addMetadata(outputReducer->size()-1,"","","$\\mathrm{MPI rank}$",""); continue; } - if(*it == "FsGridRank" || *it == "fg_rank") { + if(lowercase == "fsgridrank" || lowercase == "fg_rank") { // Map of spatial decomposition of the FsGrid into MPI ranks - outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("FsGridRank",[]( + outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fg_rank",[]( FsGrid< std::array, 2>& perBGrid, FsGrid< std::array, 2>& EGrid, FsGrid< std::array, 2>& EHallGrid, @@ -470,15 +474,15 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti outputReducer->addMetadata(outputReducer->size()-1,"","","$\\mathrm{fGrid rank}$",""); continue; } - if(*it == "BoundaryType" || *it == "vg_BoundaryType") { + if(lowercase == "boundarytype" || lowercase == "vg_boundarytype") { // Type of boundarycells outputReducer->addOperator(new DRO::BoundaryType); outputReducer->addMetadata(outputReducer->size()-1,"","","$\\mathrm{vGrid Boundary type}$",""); continue; } - if(*it == "FsGridBoundaryType" || *it == "fg_BoundaryType") { + if(lowercase == "fsgridboundarytype" || lowercase == "fg_boundarytype") { // Type of boundarycells as stored in FSGrid - outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("FsGridBoundaryType",[]( + outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fg_boundarytype",[]( FsGrid< std::array, 2>& perBGrid, FsGrid< std::array, 2>& EGrid, FsGrid< std::array, 2>& EHallGrid, @@ -507,15 +511,15 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti outputReducer->addMetadata(outputReducer->size()-1,"","","$\\mathrm{fGrid Boundary type}$",""); continue; } - if(*it == "BoundaryLayer" || *it == "vg_BoundaryLayer") { + if(lowercase == "boundarylayer" || lowercase == "vg_boundarylayer") { // For boundaries with multiple layers: layer count per cell outputReducer->addOperator(new DRO::BoundaryLayer); outputReducer->addMetadata(outputReducer->size()-1,"","","$\\mathrm{vGrid Boundary layer}$",""); continue; } - if(*it == "FsGridBoundaryLayer" || *it == "fg_BoundaryLayer") { + if(lowercase == "fsgridboundarylayer" || lowercase == "fg_boundarylayer") { // Type of boundarycells as stored in FSGrid - outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("FsGridBoundaryLayer",[]( + outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fg_boundarylayer",[]( FsGrid< std::array, 2>& perBGrid, FsGrid< std::array, 2>& EGrid, FsGrid< std::array, 2>& EHallGrid, @@ -544,7 +548,7 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti outputReducer->addMetadata(outputReducer->size()-1,"","","$\\mathrm{fGrid Boundary layer}$",""); continue; } - if (*it == "populations_Blocks") { + if(lowercase == "populations_blocks" || lowercase == "populations_vg_blocks") { // Per-population velocity space block counts for(unsigned int i =0; i < getObjectWrapper().particleSpecies.size(); i++) { species::Species& species=getObjectWrapper().particleSpecies[i]; @@ -554,30 +558,30 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti } continue; } - if(*it == "fSaved") { + if(lowercase == "fsaved" || lowercase == "vg_fsaved" || lowercase == "vg_f_saved") { // Boolean marker whether a velocity space is saved in a given spatial cell - outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("fSaved",CellParams::ISCELLSAVINGF,1)); + outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("vg_f_saved",CellParams::ISCELLSAVINGF,1)); outputReducer->addMetadata(outputReducer->size()-1,"","","$f(v)_\\mathrm{saved}$",""); continue; } - if(*it == "populations_accSubcycles") { + if(lowercase == "populations_accsubcycles" || lowercase == "populations_acceleration_subcycles" || lowercase == "populations_vg_acceleration_subcycles") { // Per-population number of subcycles performed for velocity space update for(unsigned int i =0; i < getObjectWrapper().particleSpecies.size(); i++) { species::Species& species=getObjectWrapper().particleSpecies[i]; const std::string& pop = species.name; - outputReducer->addOperator(new DRO::DataReductionOperatorPopulations(pop + "/acc_subcycles", i, offsetof(spatial_cell::Population, ACCSUBCYCLES), 1)); + outputReducer->addOperator(new DRO::DataReductionOperatorPopulations(pop + "/vg_acceleration_subcycles", i, offsetof(spatial_cell::Population, ACCSUBCYCLES), 1)); outputReducer->addMetadata(outputReducer->size()-1,"","","$\\mathrm{"+pop+" Acc subcycles}$",""); } continue; } - if(*it == "VolE" || *it == "vg_VolE") { + if(lowercase == "vole" || lowercase == "vg_vole" || lowercase == "evol" || lowercase == "vg_e_vol" || lowercase == "e_vol") { // Volume-averaged E field - outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("E_vol",CellParams::EXVOL,3)); + outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("vg_e_vol",CellParams::EXVOL,3)); outputReducer->addMetadata(outputReducer->size()-1,"V/m","$\\mathrm{V}\\,\\mathrm{m}^{-1}$","$E_\\mathrm{vol,vg}$","1.0"); continue; } - if(*it == "fg_VolE") { - outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fg_volB",[]( + if(lowercase == "fg_vole" || lowercase == "fg_e_vol" || lowercase == "fg_evol") { + outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fg_e_vol",[]( FsGrid< std::array, 2>& perBGrid, FsGrid< std::array, 2>& EGrid, FsGrid< std::array, 2>& EHallGrid, @@ -608,9 +612,9 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti outputReducer->addMetadata(outputReducer->size()-1,"V/m","$\\mathrm{V}\\,\\mathrm{m}^{-1}$","$E_\\mathrm{vol,fg}$","1.0"); continue; } - if(*it == "HallE" || *it == "fg_HallE") { + if(lowercase == "halle" || lowercase == "fg_halle" || lowercase == "fg_e_hall") { for(int index=0; indexaddOperator(new DRO::DataReductionOperatorFsGrid(reducer_name,[index]( FsGrid< std::array, 2>& perBGrid, FsGrid< std::array, 2>& EGrid, @@ -637,24 +641,24 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti return retval; } )); - outputReducer->addMetadata(outputReducer->size()-1,"V/m","$\\mathrm{V}\\,\\mathrm{m}^{-1}$","$E_\\mathrm{Hall}$","1.0"); + outputReducer->addMetadata(outputReducer->size()-1,"V/m","$\\mathrm{V}\\,\\mathrm{m}^{-1}$","$E_\\mathrm{Hall,"+std::to_string(index)+"}$","1.0"); } continue; } - if(*it =="GradPeE") { + if(lowercase =="gradpee" || lowercase == "e_gradpe" || lowercase == "vg_e_gradpe") { // Electron pressure gradient contribution to the generalized ohm's law - outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("EGRADPE",CellParams::EXGRADPE,3)); + outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("vg_e_gradpe",CellParams::EXGRADPE,3)); outputReducer->addMetadata(outputReducer->size()-1,"V/m","$\\mathrm{V}\\,\\mathrm{m}^{-1}$","$E_{\\del P_\\mathrm{e}}$","1.0"); continue; } - if(*it == "VolB" || *it == "vg_VolB") { + if(lowercase == "volb" || lowercase == "vg_volb" || lowercase == "b_vol" || lowercase == "bvol" || lowercase == "vg_bvol" || lowercase == "vg_b_vol") { // Volume-averaged magnetic field outputReducer->addOperator(new DRO::VariableBVol); outputReducer->addMetadata(outputReducer->size()-1,"T","$\\mathrm{T}$","$B_\\mathrm{vol,vg}$","1.0"); continue; } - if(*it == "fg_VolB") { // Static (typically dipole) magnetic field part - outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fg_volB",[]( + if(lowercase == "fg_volb" || lowercase == "fg_bvol" || lowercase == "fg_b_vol") { // Static (typically dipole) magnetic field part + outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fg_b_vol",[]( FsGrid< std::array, 2>& perBGrid, FsGrid< std::array, 2>& EGrid, FsGrid< std::array, 2>& EHallGrid, @@ -688,25 +692,25 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti outputReducer->addMetadata(outputReducer->size()-1,"T","$\\mathrm{T}$","$B_\\mathrm{vol,fg}$","1.0"); continue; } - if(*it == "BackgroundVolB") { - outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("BGB_vol",CellParams::BGBXVOL,3)); + if(lowercase == "backgroundvolb" || lowercase == "vg_b_background_vol") { + outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("vg_b_background_vol",CellParams::BGBXVOL,3)); outputReducer->addMetadata(outputReducer->size()-1,"T","$\\mathrm{T}$","$B_\\mathrm{vol,vg,bg}$","1.0"); continue; } - if(*it == "PerturbedVolB") { - outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("PERB_vol",CellParams::PERBXVOL,3)); + if(lowercase == "perturbedvolb" || lowercase == "vg_b_perturbed_vol") { + outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("vg_b_perturbed_vol",CellParams::PERBXVOL,3)); outputReducer->addMetadata(outputReducer->size()-1,"T","$\\mathrm{T}$","$B_\\mathrm{vol,vg,per}$","1.0"); continue; } - if(*it == "Pressure" || *it== "vg_Pressure") { + if(lowercase == "pressure" || lowercase == "vg_pressure") { // Overall scalar pressure from all populations outputReducer->addOperator(new DRO::VariablePressureSolver); outputReducer->addMetadata(outputReducer->size()-1,"Pa","$\\mathrm{Pa}$","$P_\\mathrm{solver}$","1.0"); continue; } - if(*it == "fg_Pressure") { + if(lowercase == "fg_pressure") { // Overall scalar pressure from all populations - outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fg_Pressure",[]( + outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fg_pressure",[]( FsGrid< std::array, 2>& perBGrid, FsGrid< std::array, 2>& EGrid, FsGrid< std::array, 2>& EHallGrid, @@ -736,7 +740,7 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti outputReducer->addMetadata(outputReducer->size()-1,"Pa","$\\mathrm{Pa}$","$P_\\mathrm{fg}$","1.0"); continue; } - if(*it == "populations_PTensor") { + if(lowercase == "populations_ptensor") { // Per-population pressure tensor, stored as diagonal and offdiagonal components for(unsigned int i =0; i < getObjectWrapper().particleSpecies.size(); i++) { species::Species& species=getObjectWrapper().particleSpecies[i]; @@ -748,14 +752,14 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti } continue; } - if(*it == "BVOLderivs") { + if(lowercase == "bvolderivs" || lowercase == "b_vol_derivatives" || lowercase == "b_vol_derivs") { // Volume-averaged derivatives - outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dPERBXVOLdy",bvolderivatives::dPERBXVOLdy,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dPERBXVOLdz",bvolderivatives::dPERBXVOLdz,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dPERBYVOLdx",bvolderivatives::dPERBYVOLdx,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dPERBYVOLdz",bvolderivatives::dPERBYVOLdz,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dPERBZVOLdx",bvolderivatives::dPERBZVOLdx,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("dPERBZVOLdy",bvolderivatives::dPERBZVOLdy,1)); + outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("vg_dperbxvoldy",bvolderivatives::dPERBXVOLdy,1)); + outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("vg_dperbxvoldz",bvolderivatives::dPERBXVOLdz,1)); + outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("vg_dperbyvoldx",bvolderivatives::dPERBYVOLdx,1)); + outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("vg_dperbyvoldz",bvolderivatives::dPERBYVOLdz,1)); + outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("vg_dperbzvoldx",bvolderivatives::dPERBZVOLdx,1)); + outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("vg_dperbzvoldy",bvolderivatives::dPERBZVOLdy,1)); outputReducer->addMetadata(outputReducer->size()-6,"T/m","$\\mathrm{T}\\,\\mathrm{m}^{-1}$","$\\Delta B_{X,\\mathrm{per,vol,vg}} (\\Delta Y)^{-1}$","1.0"); outputReducer->addMetadata(outputReducer->size()-5,"T/m","$\\mathrm{T}\\,\\mathrm{m}^{-1}$","$\\Delta B_{X,\\mathrm{per,vol,vg}} (\\Delta Z)^{-1}$","1.0"); outputReducer->addMetadata(outputReducer->size()-4,"T/m","$\\mathrm{T}\\,\\mathrm{m}^{-1}$","$\\Delta B_{Y,\\mathrm{per,vol,vg}} (\\Delta X)^{-1}$","1.0"); @@ -764,14 +768,14 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti outputReducer->addMetadata(outputReducer->size()-1,"T/m","$\\mathrm{T}\\,\\mathrm{m}^{-1}$","$\\Delta B_{Z,\\mathrm{per,vol,vg}} (\\Delta Y)^{-1}$","1.0"); continue; } - if(*it == "vg_GridCoordinates") { + if(lowercase == "vg_gridcoordinates") { // Spatial coordinates for each cell - outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("vg_X",CellParams::XCRD,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("vg_Y",CellParams::YCRD,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("vg_Z",CellParams::ZCRD,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("vg_DX",CellParams::DX,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("vg_DY",CellParams::DY,1)); - outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("vg_DZ",CellParams::DZ,1)); + outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("vg_x",CellParams::XCRD,1)); + outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("vg_y",CellParams::YCRD,1)); + outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("vg_z",CellParams::ZCRD,1)); + outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("vg_dx",CellParams::DX,1)); + outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("vg_dy",CellParams::DY,1)); + outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("vg_dz",CellParams::DZ,1)); outputReducer->addMetadata(outputReducer->size()-6,"m","$\\mathrm{m}$","$X_\\mathrm{vg}$","1.0"); outputReducer->addMetadata(outputReducer->size()-5,"m","$\\mathrm{m}$","$Y_\\mathrm{vg}$","1.0"); outputReducer->addMetadata(outputReducer->size()-4,"m","$\\mathrm{m}$","$Z_\\mathrm{vg}$","1.0"); @@ -780,8 +784,8 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti outputReducer->addMetadata(outputReducer->size()-1,"m","$\\mathrm{m}$","$\\delta Z_\\mathrm{vg}$","1.0"); continue; } - if(*it == "fg_GridCoordinates") { - outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fg_X",[]( + if(lowercase == "fg_gridcoordinates") { + outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fg_x",[]( FsGrid< std::array, 2>& perBGrid, FsGrid< std::array, 2>& EGrid, FsGrid< std::array, 2>& EHallGrid, @@ -808,7 +812,7 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti } )); outputReducer->addMetadata(outputReducer->size()-1,"m","$\\mathrm{m}$","$X_\\mathrm{fg}$","1.0"); - outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fg_Y",[]( + outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fg_y",[]( FsGrid< std::array, 2>& perBGrid, FsGrid< std::array, 2>& EGrid, FsGrid< std::array, 2>& EHallGrid, @@ -835,7 +839,7 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti } )); outputReducer->addMetadata(outputReducer->size()-1,"m","$\\mathrm{m}$","$Y_\\mathrm{fg}$","1.0"); - outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fg_Z",[]( + outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fg_z",[]( FsGrid< std::array, 2>& perBGrid, FsGrid< std::array, 2>& EGrid, FsGrid< std::array, 2>& EHallGrid, @@ -862,7 +866,7 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti } )); outputReducer->addMetadata(outputReducer->size()-1,"m","$\\mathrm{m}$","$Z_\\mathrm{fg}$","1.0"); - outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fg_DX",[]( + outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fg_dx",[]( FsGrid< std::array, 2>& perBGrid, FsGrid< std::array, 2>& EGrid, FsGrid< std::array, 2>& EHallGrid, @@ -880,7 +884,7 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti } )); outputReducer->addMetadata(outputReducer->size()-1,"m","$\\mathrm{m}$","$\\delta X_\\mathrm{fg}$","1.0"); - outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fg_DY",[]( + outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fg_dy",[]( FsGrid< std::array, 2>& perBGrid, FsGrid< std::array, 2>& EGrid, FsGrid< std::array, 2>& EHallGrid, @@ -898,7 +902,7 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti } )); outputReducer->addMetadata(outputReducer->size()-1,"m","$\\mathrm{m}$","$\\delta Y_\\mathrm{fg}$","1.0"); - outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fg_DZ",[]( + outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fg_dz",[]( FsGrid< std::array, 2>& perBGrid, FsGrid< std::array, 2>& EGrid, FsGrid< std::array, 2>& EHallGrid, @@ -918,7 +922,7 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti outputReducer->addMetadata(outputReducer->size()-1,"m","$\\mathrm{m}$","$\\delta Z_\\mathrm{fg}$","1.0"); continue; } - if (*it == "MeshData") { + if(lowercase == "meshdata") { outputReducer->addOperator(new DRO::VariableMeshData); outputReducer->addMetadata(outputReducer->size()-1,"","","\\mathrm{Mesh data}$",""); continue; @@ -936,19 +940,23 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti for (it = P::diagnosticVariableList.begin(); it != P::diagnosticVariableList.end(); it++) { - if (*it == "populations_Blocks") { + + // Sidestep mixed case errors + const std::string lowercase = boost::algorithm::to_lower_copy(*it); + + if(lowercase == "populations_blocks") { // Per-population total block counts for(unsigned int i =0; i < getObjectWrapper().particleSpecies.size(); i++) { diagnosticReducer->addOperator(new DRO::Blocks(i)); } continue; } - if(*it == "Rhom") { + if(lowercase == "rhom") { // Overall mass density - diagnosticReducer->addOperator(new DRO::DataReductionOperatorCellParams("rho",CellParams::RHOM,1)); + diagnosticReducer->addOperator(new DRO::DataReductionOperatorCellParams("rhom",CellParams::RHOM,1)); continue; } - if(*it == "populations_RhoLossAdjust") { + if(lowercase == "populations_rholossadjust" || lowercase == "populations_vg_rho_loss_adjust" || lowercase == "populations_rho_loss_adjust") { // Per-particle overall lost particle number for(unsigned int i =0; i < getObjectWrapper().particleSpecies.size(); i++) { species::Species& species=getObjectWrapper().particleSpecies[i]; @@ -957,51 +965,51 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti } continue; } - //if(*it == "RhoLossVelBoundary") { + //if(lowercase == "rholossvelboundary") { // diagnosticReducer->addOperator(new DRO::DataReductionOperatorCellParams("rho_loss_velocity_boundary",CellParams::RHOLOSSVELBOUNDARY,1)); // continue; //} - if(*it == "LBweight") { - diagnosticReducer->addOperator(new DRO::DataReductionOperatorCellParams("LB_weight",CellParams::LBWEIGHTCOUNTER,1)); + if(lowercase == "lbweight" || lowercase == "") { + diagnosticReducer->addOperator(new DRO::DataReductionOperatorCellParams("loadbalance_weight",CellParams::LBWEIGHTCOUNTER,1)); continue; } - if(*it == "MaxVdt") { - diagnosticReducer->addOperator(new DRO::DataReductionOperatorCellParams("max_v_dt",CellParams::MAXVDT,1)); + if(lowercase == "maxvdt") { + diagnosticReducer->addOperator(new DRO::DataReductionOperatorCellParams("maxdt_acceleration",CellParams::MAXVDT,1)); continue; } - if(*it == "MaxRdt") { - diagnosticReducer->addOperator(new DRO::DataReductionOperatorCellParams("max_r_dt",CellParams::MAXRDT,1)); + if(lowercase == "maxrdt") { + diagnosticReducer->addOperator(new DRO::DataReductionOperatorCellParams("maxdt_translation",CellParams::MAXRDT,1)); continue; } - if(*it == "MaxFieldsdt") { - diagnosticReducer->addOperator(new DRO::DataReductionOperatorCellParams("max_fields_dt",CellParams::MAXFDT,1)); + if(lowercase == "maxfieldsdt") { + diagnosticReducer->addOperator(new DRO::DataReductionOperatorCellParams("maxdt_fieldsolver",CellParams::MAXFDT,1)); continue; } - if(*it == "populations_MaxDistributionFunction") { + if(lowercase == "populations_maxdistributionfunction") { for(unsigned int i =0; i < getObjectWrapper().particleSpecies.size(); i++) { diagnosticReducer->addOperator(new DRO::MaxDistributionFunction(i)); } continue; } - if(*it == "populations_MinDistributionFunction") { + if(lowercase == "populations_mindistributionfunction") { for(unsigned int i =0; i < getObjectWrapper().particleSpecies.size(); i++) { diagnosticReducer->addOperator(new DRO::MinDistributionFunction(i)); } continue; } - if(*it == "populations_MaxRdt") { + if(lowercase == "populations_maxrdt") { for(unsigned int i =0; i < getObjectWrapper().particleSpecies.size(); i++) { species::Species& species=getObjectWrapper().particleSpecies[i]; const std::string& pop = species.name; - diagnosticReducer->addOperator(new DRO::DataReductionOperatorPopulations(pop + "/Blocks", i, offsetof(spatial_cell::Population, max_dt[0]), 1)); + diagnosticReducer->addOperator(new DRO::DataReductionOperatorPopulations(pop + "/maxdt_translation", i, offsetof(spatial_cell::Population, max_dt[0]), 1)); } continue; } - if(*it == "populations_MaxVdt") { + if(lowercase == "populations_maxvdt") { for(unsigned int i =0; i < getObjectWrapper().particleSpecies.size(); i++) { species::Species& species=getObjectWrapper().particleSpecies[i]; const std::string& pop = species.name; - diagnosticReducer->addOperator(new DRO::DataReductionOperatorPopulations(pop + "/Blocks", i, offsetof(spatial_cell::Population, max_dt[1]), 1)); + diagnosticReducer->addOperator(new DRO::DataReductionOperatorPopulations(pop + "/maxdt_acceleration", i, offsetof(spatial_cell::Population, max_dt[1]), 1)); } continue; } diff --git a/datareduction/datareductionoperator.cpp b/datareduction/datareductionoperator.cpp index 78cbb850a..ce2449fe6 100644 --- a/datareduction/datareductionoperator.cpp +++ b/datareduction/datareductionoperator.cpp @@ -185,7 +185,7 @@ namespace DRO { return true; } - std::string VariableBVol::getName() const {return "B_vol";} + std::string VariableBVol::getName() const {return "vg_b_vol";} bool VariableBVol::reduceData(const SpatialCell* cell,char* buffer) { const char* ptr = reinterpret_cast(B); @@ -218,7 +218,7 @@ namespace DRO { return true; } - std::string MPIrank::getName() const {return "MPI_rank";} + std::string MPIrank::getName() const {return "vg_rank";} bool MPIrank::reduceData(const SpatialCell* cell,char* buffer) { const char* ptr = reinterpret_cast(&mpiRank); @@ -245,7 +245,7 @@ namespace DRO { return true; } - std::string BoundaryType::getName() const {return "Boundary_type";} + std::string BoundaryType::getName() const {return "vg_boundarytype";} bool BoundaryType::reduceData(const SpatialCell* cell,char* buffer) { const char* ptr = reinterpret_cast(&boundaryType); @@ -270,7 +270,7 @@ namespace DRO { return true; } - std::string BoundaryLayer::getName() const {return "Boundary_layer";} + std::string BoundaryLayer::getName() const {return "vg_boundarylayer";} bool BoundaryLayer::reduceData(const SpatialCell* cell,char* buffer) { const char* ptr = reinterpret_cast(&boundaryLayer); @@ -296,7 +296,7 @@ namespace DRO { return true; } - std::string Blocks::getName() const {return popName + "/Blocks";} + std::string Blocks::getName() const {return popName + "/vg_blocks";} bool Blocks::reduceData(const SpatialCell* cell,char* buffer) { const char* ptr = reinterpret_cast(&nBlocks); @@ -318,7 +318,7 @@ namespace DRO { VariablePressureSolver::VariablePressureSolver(): DataReductionOperator() { } VariablePressureSolver::~VariablePressureSolver() { } - std::string VariablePressureSolver::getName() const {return "Pressure";} + std::string VariablePressureSolver::getName() const {return "vg_pressure";} bool VariablePressureSolver::getDataVectorInfo(std::string& dataType,unsigned int& dataSize,unsigned int& vectorSize) const { dataType = "float"; @@ -349,7 +349,7 @@ namespace DRO { } VariablePTensorDiagonal::~VariablePTensorDiagonal() { } - std::string VariablePTensorDiagonal::getName() const {return popName + "/PTensorDiagonal";} + std::string VariablePTensorDiagonal::getName() const {return popName + "/vg_ptensor_diagonal";} bool VariablePTensorDiagonal::getDataVectorInfo(std::string& dataType,unsigned int& dataSize,unsigned int& vectorSize) const { dataType = "float"; @@ -423,7 +423,7 @@ namespace DRO { } VariablePTensorOffDiagonal::~VariablePTensorOffDiagonal() { } - std::string VariablePTensorOffDiagonal::getName() const {return popName + "/PTensorOffDiagonal";} + std::string VariablePTensorOffDiagonal::getName() const {return popName + "/vg_ptensor_offdiagonal";} bool VariablePTensorOffDiagonal::getDataVectorInfo(std::string& dataType,unsigned int& dataSize,unsigned int& vectorSize) const { dataType = "float"; @@ -492,13 +492,13 @@ namespace DRO { return true; } - // YK maximum value of the distribution function + // YK maximum value of the distribution function (diagnostic) MaxDistributionFunction::MaxDistributionFunction(cuint _popID): DataReductionOperator(),popID(_popID) { popName=getObjectWrapper().particleSpecies[popID].name; } MaxDistributionFunction::~MaxDistributionFunction() { } - std::string MaxDistributionFunction::getName() const {return popName + "/MaximumDistributionFunctionValue";} + std::string MaxDistributionFunction::getName() const {return popName + "/maximumdistributionfunctionvalue";} bool MaxDistributionFunction::getDataVectorInfo(std::string& dataType,unsigned int& dataSize,unsigned int& vectorSize) const { dataType = "float"; @@ -546,13 +546,13 @@ namespace DRO { } - // YK minimum value of the distribution function + // YK minimum value of the distribution function (diagnostic) MinDistributionFunction::MinDistributionFunction(cuint _popID): DataReductionOperator(),popID(_popID) { popName=getObjectWrapper().particleSpecies[popID].name; } MinDistributionFunction::~MinDistributionFunction() { } - std::string MinDistributionFunction::getName() const {return popName + "/MinimumDistributionFunctionValue";} + std::string MinDistributionFunction::getName() const {return popName + "/minimumdistributionfunctionvalue";} bool MinDistributionFunction::getDataVectorInfo(std::string& dataType,unsigned int& dataSize,unsigned int& vectorSize) const { dataType = "float"; @@ -601,20 +601,20 @@ namespace DRO { /******* Helper functions for finding the velocity cell indices or IDs within a single velocity block - either belonging to the backstreaming or the non-backstreaming population. + either belonging to the thermal or the non-thermal population. There is some code duplication here, but as these helper functions are called within threads for block separately, it's preferable to have them fast even at the cost of code repetition. ********/ - //Helper function for getting the velocity cell ids that are a part of the backstream population: - static void getBackstreamVelocityCells( + //Helper function for getting the velocity cell ids that are a part of the nonthermal population: + static void getNonthermalVelocityCells( const Real* block_parameters, vector & vCellIds, cuint popID ) { creal HALF = 0.5; - const std::array backstreamV = getObjectWrapper().particleSpecies[popID].backstreamV; - creal backstreamRadius = getObjectWrapper().particleSpecies[popID].backstreamRadius; + const std::array thermalV = getObjectWrapper().particleSpecies[popID].thermalV; + creal thermalRadius = getObjectWrapper().particleSpecies[popID].thermalRadius; // Go through every velocity cell (i, j, k are indices) for (uint k = 0; k < WID; ++k) for (uint j = 0; j < WID; ++j) for (uint i = 0; i < WID; ++i) { // Get the vx, vy, vz coordinates of the velocity cell @@ -622,48 +622,48 @@ namespace DRO { const Real VY = block_parameters[BlockParams::VYCRD] + (j + HALF) * block_parameters[BlockParams::DVY]; const Real VZ = block_parameters[BlockParams::VZCRD] + (k + HALF) * block_parameters[BlockParams::DVZ]; // Compare the distance of the velocity cell from the center of the maxwellian distribution to the radius of the maxwellian distribution - if( ( (backstreamV[0] - VX) * (backstreamV[0] - VX) - + (backstreamV[1] - VY) * (backstreamV[1] - VY) - + (backstreamV[2] - VZ) * (backstreamV[2] - VZ) ) + if( ( (thermalV[0] - VX) * (thermalV[0] - VX) + + (thermalV[1] - VY) * (thermalV[1] - VY) + + (thermalV[2] - VZ) * (thermalV[2] - VZ) ) > - backstreamRadius*backstreamRadius ) { - //The velocity cell is a part of the backstream population: + thermalRadius*thermalRadius ) { + //The velocity cell is a part of the nonthermal population: vCellIds.push_back(cellIndex(i,j,k)); } } } - //Helper function for getting the velocity cell ids that are a part of the backstream population: - static void getNonBackstreamVelocityCells( + //Helper function for getting the velocity cell ids that are a part of the nonthermal population: + static void getThermalVelocityCells( const Real* block_parameters, vector & vCellIds, cuint popID ) { creal HALF = 0.5; - const std::array backstreamV = getObjectWrapper().particleSpecies[popID].backstreamV; - creal backstreamRadius = getObjectWrapper().particleSpecies[popID].backstreamRadius; + const std::array thermalV = getObjectWrapper().particleSpecies[popID].thermalV; + creal thermalRadius = getObjectWrapper().particleSpecies[popID].thermalRadius; for (uint k = 0; k < WID; ++k) for (uint j = 0; j < WID; ++j) for (uint i = 0; i < WID; ++i) { const Real VX = block_parameters[BlockParams::VXCRD] + (i + HALF) * block_parameters[BlockParams::DVX]; const Real VY = block_parameters[BlockParams::VYCRD] + (j + HALF) * block_parameters[BlockParams::DVY]; const Real VZ = block_parameters[BlockParams::VZCRD] + (k + HALF) * block_parameters[BlockParams::DVZ]; - if( ( (backstreamV[0] - VX) * (backstreamV[0] - VX) - + (backstreamV[1] - VY) * (backstreamV[1] - VY) - + (backstreamV[2] - VZ) * (backstreamV[2] - VZ) ) + if( ( (thermalV[0] - VX) * (thermalV[0] - VX) + + (thermalV[1] - VY) * (thermalV[1] - VY) + + (thermalV[2] - VZ) * (thermalV[2] - VZ) ) <= - backstreamRadius*backstreamRadius ) { - //The velocity cell is not a part of the backstream population: + thermalRadius*thermalRadius ) { + //The velocity cell is not a part of the nonthermal population: vCellIds.push_back(cellIndex(i,j,k)); } } } - //Helper function for getting the velocity cell indices that are a part of the backstream population: - static void getBackstreamVelocityCellIndices( + //Helper function for getting the velocity cell indices that are a part of the nonthermal population: + static void getNonthermalVelocityCellIndices( const Real* block_parameters, vector> & vCellIndices, cuint popID ) { creal HALF = 0.5; - const std::array backstreamV = getObjectWrapper().particleSpecies[popID].backstreamV; - creal backstreamRadius = getObjectWrapper().particleSpecies[popID].backstreamRadius; + const std::array thermalV = getObjectWrapper().particleSpecies[popID].thermalV; + creal thermalRadius = getObjectWrapper().particleSpecies[popID].thermalRadius; // Go through a block's every velocity cell for (uint k = 0; k < WID; ++k) for (uint j = 0; j < WID; ++j) for (uint i = 0; i < WID; ++i) { // Get the coordinates of the velocity cell (e.g. VX = block_vx_min_coordinates + (velocity_cell_indice_x+0.5)*length_of_velocity_cell_in_x_direction @@ -671,26 +671,26 @@ namespace DRO { const Real VY = block_parameters[BlockParams::VYCRD] + (j + HALF) * block_parameters[BlockParams::DVY]; const Real VZ = block_parameters[BlockParams::VZCRD] + (k + HALF) * block_parameters[BlockParams::DVZ]; // Calculate the distance of the velocity cell from the center of the maxwellian distribution and compare it to the approximate radius of the maxwellian distribution - if( ( (backstreamV[0] - VX) * (backstreamV[0] - VX) - + (backstreamV[1] - VY) * (backstreamV[1] - VY) - + (backstreamV[2] - VZ) * (backstreamV[2] - VZ) ) + if( ( (thermalV[0] - VX) * (thermalV[0] - VX) + + (thermalV[1] - VY) * (thermalV[1] - VY) + + (thermalV[2] - VZ) * (thermalV[2] - VZ) ) > - backstreamRadius*backstreamRadius ) { - //The velocity cell is a part of the backstream population because it is not within the radius: + thermalRadius*thermalRadius ) { + //The velocity cell is a part of the nonthermal population because it is not within the radius: const array indices{{i, j, k}}; vCellIndices.push_back( indices ); } } } - //Helper function for getting the velocity cell indices that are not a part of the backstream population: - static void getNonBackstreamVelocityCellIndices( + //Helper function for getting the velocity cell indices that are not a part of the nonthermal population: + static void getThermalVelocityCellIndices( const Real* block_parameters, vector> & vCellIndices, cuint popID ) { creal HALF = 0.5; - const std::array backstreamV = getObjectWrapper().particleSpecies[popID].backstreamV; - creal backstreamRadius = getObjectWrapper().particleSpecies[popID].backstreamRadius; + const std::array thermalV = getObjectWrapper().particleSpecies[popID].thermalV; + creal thermalRadius = getObjectWrapper().particleSpecies[popID].thermalRadius; // Go through a block's every velocity cell for (uint k = 0; k < WID; ++k) for (uint j = 0; j < WID; ++j) for (uint i = 0; i < WID; ++i) { // Get the coordinates of the velocity cell (e.g. VX = block_vx_min_coordinates + (velocity_cell_indice_x+0.5)*length_of_velocity_cell_in_x_direction @@ -698,12 +698,12 @@ namespace DRO { const Real VY = block_parameters[BlockParams::VYCRD] + (j + HALF) * block_parameters[BlockParams::DVY]; const Real VZ = block_parameters[BlockParams::VZCRD] + (k + HALF) * block_parameters[BlockParams::DVZ]; // Calculate the distance of the velocity cell from the center of the maxwellian distribution and compare it to the approximate radius of the maxwellian distribution - if( ( (backstreamV[0] - VX) * (backstreamV[0] - VX) - + (backstreamV[1] - VY) * (backstreamV[1] - VY) - + (backstreamV[2] - VZ) * (backstreamV[2] - VZ) ) + if( ( (thermalV[0] - VX) * (thermalV[0] - VX) + + (thermalV[1] - VY) * (thermalV[1] - VY) + + (thermalV[2] - VZ) * (thermalV[2] - VZ) ) <= - backstreamRadius*backstreamRadius ) { - //The velocity cell is not a part of the backstream population because it is within the radius: + thermalRadius*thermalRadius ) { + //The velocity cell is part of the thermal population because it is within the radius: const array indices{{i, j, k}}; vCellIndices.push_back( indices ); } @@ -713,10 +713,10 @@ namespace DRO { /******** Next level of helper functions - these include threading and calculate zeroth or first velocity moments or the diagonal / off-diagonal pressure tensor components for - backstreaming or non-backstreaming populations ********/ + thermal or non-thermal populations ********/ - //Calculates rho backstream or rho non backstream - static void rhoBackstreamCalculation( const SpatialCell * cell, const bool calculateBackstream, cuint popID, Real & rho ) { + //Calculates rho thermal or rho non-thermal + static void rhoNonthermalCalculation( const SpatialCell * cell, const bool calculateNonthermal, cuint popID, Real & rho ) { const Real HALF = 0.5; # pragma omp parallel { @@ -733,10 +733,10 @@ namespace DRO { * parameters[n * BlockParams::N_VELOCITY_BLOCK_PARAMS + BlockParams::DVZ]; vector< uint64_t > vCells; //Velocity cell ids vCells.clear(); - if ( calculateBackstream == true ) { - getBackstreamVelocityCells(¶meters[n * BlockParams::N_VELOCITY_BLOCK_PARAMS], vCells, popID); + if ( calculateNonthermal == true ) { + getNonthermalVelocityCells(¶meters[n * BlockParams::N_VELOCITY_BLOCK_PARAMS], vCells, popID); } else { - getNonBackstreamVelocityCells(¶meters[n * BlockParams::N_VELOCITY_BLOCK_PARAMS], vCells, popID); + getThermalVelocityCells(¶meters[n * BlockParams::N_VELOCITY_BLOCK_PARAMS], vCells, popID); } for( vector< uint64_t >::const_iterator it = vCells.begin(); it != vCells.end(); ++it ) { //velocity cell id = *it @@ -755,7 +755,7 @@ namespace DRO { return; } - static void VBackstreamCalculation( const SpatialCell * cell, const bool calculateBackstream, cuint popID, Real * V ) { + static void VNonthermalCalculation( const SpatialCell * cell, const bool calculateNonthermal, cuint popID, Real * V ) { const Real HALF = 0.5; // Make sure the V is initialized V[0] = 0; @@ -779,14 +779,14 @@ namespace DRO { = parameters[n * BlockParams::N_VELOCITY_BLOCK_PARAMS + BlockParams::DVX] * parameters[n * BlockParams::N_VELOCITY_BLOCK_PARAMS + BlockParams::DVY] * parameters[n * BlockParams::N_VELOCITY_BLOCK_PARAMS + BlockParams::DVZ]; - // Get the velocity cell indices of the cells that are a part of the backstream population + // Get the velocity cell indices of the cells that are a part of the nonthermal population vector< array > vCellIndices; vCellIndices.clear(); // Save indices to the std::vector - if( calculateBackstream == true ) { - getBackstreamVelocityCellIndices(¶meters[n * BlockParams::N_VELOCITY_BLOCK_PARAMS], vCellIndices, popID); + if( calculateNonthermal == true ) { + getNonthermalVelocityCellIndices(¶meters[n * BlockParams::N_VELOCITY_BLOCK_PARAMS], vCellIndices, popID); } else { - getNonBackstreamVelocityCellIndices(¶meters[n * BlockParams::N_VELOCITY_BLOCK_PARAMS], vCellIndices, popID); + getThermalVelocityCellIndices(¶meters[n * BlockParams::N_VELOCITY_BLOCK_PARAMS], vCellIndices, popID); } // We have now fethced all of the needed velocity cell indices, so now go through them: for( vector< array >::const_iterator it = vCellIndices.begin(); it != vCellIndices.end(); ++it ) { @@ -826,8 +826,8 @@ namespace DRO { return; } - static void PTensorDiagonalBackstreamCalculations( const SpatialCell * cell, - const bool calculateBackstream, + static void PTensorDiagonalNonthermalCalculations( const SpatialCell * cell, + const bool calculateNonthermal, const Real averageVX, const Real averageVY, const Real averageVZ, @@ -851,10 +851,10 @@ namespace DRO { * parameters[n * BlockParams::N_VELOCITY_BLOCK_PARAMS + BlockParams::DVZ]; vector< array > vCellIndices; vCellIndices.clear(); - if( calculateBackstream == true ) { - getBackstreamVelocityCellIndices(¶meters[n * BlockParams::N_VELOCITY_BLOCK_PARAMS], vCellIndices, popID); + if( calculateNonthermal == true ) { + getNonthermalVelocityCellIndices(¶meters[n * BlockParams::N_VELOCITY_BLOCK_PARAMS], vCellIndices, popID); } else { - getNonBackstreamVelocityCellIndices(¶meters[n * BlockParams::N_VELOCITY_BLOCK_PARAMS], vCellIndices, popID); + getThermalVelocityCellIndices(¶meters[n * BlockParams::N_VELOCITY_BLOCK_PARAMS], vCellIndices, popID); } for( vector< array >::const_iterator it = vCellIndices.begin(); it != vCellIndices.end(); ++it ) { //Go through every velocity cell: @@ -887,8 +887,8 @@ namespace DRO { return; } - static void PTensorOffDiagonalBackstreamCalculations( const SpatialCell * cell, - const bool calculateBackstream, + static void PTensorOffDiagonalNonthermalCalculations( const SpatialCell * cell, + const bool calculateNonthermal, const Real averageVX, const Real averageVY, const Real averageVZ, @@ -911,10 +911,10 @@ namespace DRO { * parameters[n * BlockParams::N_VELOCITY_BLOCK_PARAMS + BlockParams::DVY] * parameters[n * BlockParams::N_VELOCITY_BLOCK_PARAMS + BlockParams::DVZ]; vector< array > vCellIndices; - if( calculateBackstream == true ) { - getBackstreamVelocityCellIndices(¶meters[n * BlockParams::N_VELOCITY_BLOCK_PARAMS], vCellIndices, popID); + if( calculateNonthermal == true ) { + getNonthermalVelocityCellIndices(¶meters[n * BlockParams::N_VELOCITY_BLOCK_PARAMS], vCellIndices, popID); } else { - getNonBackstreamVelocityCellIndices(¶meters[n * BlockParams::N_VELOCITY_BLOCK_PARAMS], vCellIndices, popID); + getThermalVelocityCellIndices(¶meters[n * BlockParams::N_VELOCITY_BLOCK_PARAMS], vCellIndices, popID); } for( vector< array >::const_iterator it = vCellIndices.begin(); it != vCellIndices.end(); ++it ) { //Go through every velocity cell: @@ -947,14 +947,14 @@ namespace DRO { } /********* - End velocity moment / backstream/non-backstreamn helper functions + End velocity moment / thermal/non-thermal helper functions *********/ VariableMeshData::VariableMeshData(): DataReductionOperatorHandlesWriting() { } VariableMeshData::~VariableMeshData() { } - std::string VariableMeshData::getName() const {return "MeshData";} + std::string VariableMeshData::getName() const {return "vg_meshdata";} bool VariableMeshData::getDataVectorInfo(std::string& dataType,unsigned int& dataSize,unsigned int& vectorSize) const { return true; @@ -992,162 +992,157 @@ namespace DRO { return success; } - // Rho backstream: - VariableRhoBackstream::VariableRhoBackstream(cuint _popID): DataReductionOperator(),popID(_popID) { + // Rho nonthermal: + VariableRhoNonthermal::VariableRhoNonthermal(cuint _popID): DataReductionOperator(),popID(_popID) { popName = getObjectWrapper().particleSpecies[popID].name; - doSkip = (getObjectWrapper().particleSpecies[popID].backstreamRadius == 0.0) ? true : false; + doSkip = (getObjectWrapper().particleSpecies[popID].thermalRadius == 0.0) ? true : false; } - VariableRhoBackstream::~VariableRhoBackstream() { } + VariableRhoNonthermal::~VariableRhoNonthermal() { } - std::string VariableRhoBackstream::getName() const {return popName + "/RhoBackstream";} + std::string VariableRhoNonthermal::getName() const {return popName + "/vg_rho_nonthermal";} - bool VariableRhoBackstream::getDataVectorInfo(std::string& dataType,unsigned int& dataSize,unsigned int& vectorSize) const { + bool VariableRhoNonthermal::getDataVectorInfo(std::string& dataType,unsigned int& dataSize,unsigned int& vectorSize) const { dataType = "float"; dataSize = sizeof(Real); vectorSize = (doSkip == true) ? 0 : 1; return true; } - // Adding rho backstream calculations to Vlasiator. - bool VariableRhoBackstream::reduceData(const SpatialCell* cell,char* buffer) { - const bool calculateBackstream = true; - rhoBackstreamCalculation( cell, calculateBackstream, popID, RhoBackstream ); - const char* ptr = reinterpret_cast(&RhoBackstream); + bool VariableRhoNonthermal::reduceData(const SpatialCell* cell,char* buffer) { + const bool calculateNonthermal = true; + rhoNonthermalCalculation( cell, calculateNonthermal, popID, RhoNonthermal ); + const char* ptr = reinterpret_cast(&RhoNonthermal); for (uint i = 0; i < sizeof(Real); ++i) buffer[i] = ptr[i]; return true; } - bool VariableRhoBackstream::setSpatialCell(const SpatialCell* cell) { - RhoBackstream = 0.0; + bool VariableRhoNonthermal::setSpatialCell(const SpatialCell* cell) { + RhoNonthermal = 0.0; return true; } - - // Rho non backstream: - VariableRhoNonBackstream::VariableRhoNonBackstream(cuint _popID): DataReductionOperator(),popID(_popID) { + // Rho thermal: + VariableRhoThermal::VariableRhoThermal(cuint _popID): DataReductionOperator(),popID(_popID) { popName = getObjectWrapper().particleSpecies[popID].name; - doSkip = (getObjectWrapper().particleSpecies[popID].backstreamRadius == 0.0) ? true : false; + doSkip = (getObjectWrapper().particleSpecies[popID].thermalRadius == 0.0) ? true : false; } - VariableRhoNonBackstream::~VariableRhoNonBackstream() { } + VariableRhoThermal::~VariableRhoThermal() { } - std::string VariableRhoNonBackstream::getName() const {return popName + "/RhoNonBackstream";} + std::string VariableRhoThermal::getName() const {return popName + "/vg_rho_thermal";} - bool VariableRhoNonBackstream::getDataVectorInfo(std::string& dataType,unsigned int& dataSize,unsigned int& vectorSize) const { + bool VariableRhoThermal::getDataVectorInfo(std::string& dataType,unsigned int& dataSize,unsigned int& vectorSize) const { dataType = "float"; dataSize = sizeof(Real); vectorSize = (doSkip == true) ? 0 : 1; return true; } - // Rho non backstream calculation. - bool VariableRhoNonBackstream::reduceData(const SpatialCell* cell,char* buffer) { - const bool calculateBackstream = false; //We don't want backstream - rhoBackstreamCalculation( cell, calculateBackstream, popID, Rho ); - const char* ptr = reinterpret_cast(&Rho); + bool VariableRhoThermal::reduceData(const SpatialCell* cell,char* buffer) { + const bool calculateNonthermal = false; //We don't want nonthermal + rhoNonthermalCalculation( cell, calculateNonthermal, popID, RhoThermal ); + const char* ptr = reinterpret_cast(&RhoThermal); for (uint i = 0; i < sizeof(Real); ++i) buffer[i] = ptr[i]; return true; } - bool VariableRhoNonBackstream::setSpatialCell(const SpatialCell* cell) { - Rho = 0.0; + bool VariableRhoThermal::setSpatialCell(const SpatialCell* cell) { + RhoThermal = 0.0; return true; } - // v backstream: - VariableVBackstream::VariableVBackstream(cuint _popID): DataReductionOperator(),popID(_popID) { + // v nonthermal: + VariableVNonthermal::VariableVNonthermal(cuint _popID): DataReductionOperator(),popID(_popID) { popName = getObjectWrapper().particleSpecies[popID].name; - doSkip = (getObjectWrapper().particleSpecies[popID].backstreamRadius == 0.0) ? true : false; + doSkip = (getObjectWrapper().particleSpecies[popID].thermalRadius == 0.0) ? true : false; } - VariableVBackstream::~VariableVBackstream() { } + VariableVNonthermal::~VariableVNonthermal() { } - std::string VariableVBackstream::getName() const {return popName + "/VBackstream";} + std::string VariableVNonthermal::getName() const {return popName + "/vg_v_nonthermal";} - bool VariableVBackstream::getDataVectorInfo(std::string& dataType,unsigned int& dataSize,unsigned int& vectorSize) const { + bool VariableVNonthermal::getDataVectorInfo(std::string& dataType,unsigned int& dataSize,unsigned int& vectorSize) const { dataType = "float"; dataSize = sizeof(Real); vectorSize = (doSkip == true) ? 0 : 3; return true; } - // Adding v backstream calculations to Vlasiator. - bool VariableVBackstream::reduceData(const SpatialCell* cell,char* buffer) { - const bool calculateBackstream = true; - //Calculate v backstream - VBackstreamCalculation( cell, calculateBackstream, popID, VBackstream ); - const uint VBackstreamSize = 3; - const char* ptr = reinterpret_cast(&VBackstream); - for (uint i = 0; i < VBackstreamSize*sizeof(Real); ++i) buffer[i] = ptr[i]; + bool VariableVNonthermal::reduceData(const SpatialCell* cell,char* buffer) { + const bool calculateNonthermal = true; + //Calculate v nonthermal + VNonthermalCalculation( cell, calculateNonthermal, popID, VNonthermal ); + const uint VNonthermalSize = 3; + const char* ptr = reinterpret_cast(&VNonthermal); + for (uint i = 0; i < VNonthermalSize*sizeof(Real); ++i) buffer[i] = ptr[i]; return true; } - bool VariableVBackstream::setSpatialCell(const SpatialCell* cell) { + bool VariableVNonthermal::setSpatialCell(const SpatialCell* cell) { // Initialize values for( uint i = 0; i < 3; ++i ) { - VBackstream[i] = 0.0; + VNonthermal[i] = 0.0; } return true; } - //v non backstream: - VariableVNonBackstream::VariableVNonBackstream(cuint _popID): DataReductionOperator(),popID(_popID) { + //v thermal: + VariableVThermal::VariableVThermal(cuint _popID): DataReductionOperator(),popID(_popID) { popName = getObjectWrapper().particleSpecies[popID].name; - doSkip = (getObjectWrapper().particleSpecies[popID].backstreamRadius == 0.0) ? true : false; + doSkip = (getObjectWrapper().particleSpecies[popID].thermalRadius == 0.0) ? true : false; } - VariableVNonBackstream::~VariableVNonBackstream() { } + VariableVThermal::~VariableVThermal() { } - std::string VariableVNonBackstream::getName() const {return popName + "/VNonBackstream";} + std::string VariableVThermal::getName() const {return popName + "/vg_v_thermal";} - bool VariableVNonBackstream::getDataVectorInfo(std::string& dataType,unsigned int& dataSize,unsigned int& vectorSize) const { + bool VariableVThermal::getDataVectorInfo(std::string& dataType,unsigned int& dataSize,unsigned int& vectorSize) const { dataType = "float"; dataSize = sizeof(Real); vectorSize = (doSkip == true) ? 0 : 3; return true; } - // Adding v non backstream calculations to Vlasiator. - bool VariableVNonBackstream::reduceData(const SpatialCell* cell,char* buffer) { - const bool calculateBackstream = false; - //Calculate v backstream - VBackstreamCalculation( cell, calculateBackstream, popID, V ); + bool VariableVThermal::reduceData(const SpatialCell* cell,char* buffer) { + const bool calculateNonthermal = false; + //Calculate v nonthermal + VNonthermalCalculation( cell, calculateNonthermal, popID, VThermal ); const uint vectorSize = 3; - const char* ptr = reinterpret_cast(&V); + const char* ptr = reinterpret_cast(&VThermal); for (uint i = 0; i < vectorSize*sizeof(Real); ++i) buffer[i] = ptr[i]; return true; } - bool VariableVNonBackstream::setSpatialCell(const SpatialCell* cell) { + bool VariableVThermal::setSpatialCell(const SpatialCell* cell) { // Initialize values for( uint i = 0; i < 3; ++i ) { - V[i] = 0.0; + VThermal[i] = 0.0; } return true; } - // Adding pressure calculations for backstream population to Vlasiator. + // Adding pressure calculations for nonthermal population to Vlasiator. // p_ij = m/3 * integral((v - )_i(v - )_j * f(r,v) dV) // Pressure tensor 6 components (11, 22, 33, 23, 13, 12) added by YK - // Split into VariablePTensorBackstreamDiagonal (11, 22, 33) - // and VariablePTensorOffDiagonal (23, 13, 12) - VariablePTensorBackstreamDiagonal::VariablePTensorBackstreamDiagonal(cuint _popID): DataReductionOperator(),popID(_popID) { + // Split into VariablePTensorNonthermalDiagonal (11, 22, 33) + // and VariablePTensorNonthermalOffDiagonal (23, 13, 12) + VariablePTensorNonthermalDiagonal::VariablePTensorNonthermalDiagonal(cuint _popID): DataReductionOperator(),popID(_popID) { popName = getObjectWrapper().particleSpecies[popID].name; - doSkip = (getObjectWrapper().particleSpecies[popID].backstreamRadius == 0.0) ? true : false; + doSkip = (getObjectWrapper().particleSpecies[popID].thermalRadius == 0.0) ? true : false; } - VariablePTensorBackstreamDiagonal::~VariablePTensorBackstreamDiagonal() { } + VariablePTensorNonthermalDiagonal::~VariablePTensorNonthermalDiagonal() { } - std::string VariablePTensorBackstreamDiagonal::getName() const {return popName + "/PTensorBackstreamDiagonal";} + std::string VariablePTensorNonthermalDiagonal::getName() const {return popName + "/vg_ptensor_nonthermal_diagonal";} - bool VariablePTensorBackstreamDiagonal::getDataVectorInfo(std::string& dataType,unsigned int& dataSize,unsigned int& vectorSize) const { + bool VariablePTensorNonthermalDiagonal::getDataVectorInfo(std::string& dataType,unsigned int& dataSize,unsigned int& vectorSize) const { dataType = "float"; dataSize = sizeof(Real); vectorSize = (doSkip == true) ? 0 : 3; return true; } - bool VariablePTensorBackstreamDiagonal::reduceData(const SpatialCell* cell,char* buffer) { - const bool calculateBackstream = true; + bool VariablePTensorNonthermalDiagonal::reduceData(const SpatialCell* cell,char* buffer) { + const bool calculateNonthermal = true; //Calculate PTensor and save it in PTensorArray: - PTensorDiagonalBackstreamCalculations( cell, calculateBackstream, averageVX, averageVY, averageVZ, popID, PTensor ); + PTensorDiagonalNonthermalCalculations( cell, calculateNonthermal, averageVX, averageVY, averageVZ, popID, PTensor ); const uint vectorSize = 3; //Save the data into buffer: const char* ptr = reinterpret_cast(&PTensor); @@ -1155,11 +1150,11 @@ namespace DRO { return true; } - bool VariablePTensorBackstreamDiagonal::setSpatialCell(const SpatialCell* cell) { - //Get v of the backstream: + bool VariablePTensorNonthermalDiagonal::setSpatialCell(const SpatialCell* cell) { + //Get v of the nonthermal: Real V[3] = {0}; - const bool calculateBackstream = true; //We are calculating backstream - VBackstreamCalculation( cell, calculateBackstream, popID, V ); + const bool calculateNonthermal = true; //We are calculating nonthermal + VNonthermalCalculation( cell, calculateNonthermal, popID, V ); //Set the average velocities: averageVX = V[0]; averageVY = V[1]; @@ -1169,31 +1164,31 @@ namespace DRO { return true; } - // Adding pressure calculations for backstream population to Vlasiator. + // Adding pressure calculations for thermal population to Vlasiator. // p_ij = m/3 * integral((v - )_i(v - )_j * f(r,v) dV) // Pressure tensor 6 components (11, 22, 33, 23, 13, 12) added by YK - // Split into VariablePTensorNonBackstreamDiagonal (11, 22, 33) - // and VariablePTensorOffDiagonal (23, 13, 12) - VariablePTensorNonBackstreamDiagonal::VariablePTensorNonBackstreamDiagonal(cuint _popID): DataReductionOperator(),popID(_popID) { + // Split into VariablePTensorThermalDiagonal (11, 22, 33) + // and VariablePTensorThermalOffDiagonal (23, 13, 12) + VariablePTensorThermalDiagonal::VariablePTensorThermalDiagonal(cuint _popID): DataReductionOperator(),popID(_popID) { popName = getObjectWrapper().particleSpecies[popID].name; - doSkip = (getObjectWrapper().particleSpecies[popID].backstreamRadius == 0.0) ? true : false; + doSkip = (getObjectWrapper().particleSpecies[popID].thermalRadius == 0.0) ? true : false; } - VariablePTensorNonBackstreamDiagonal::~VariablePTensorNonBackstreamDiagonal() { } + VariablePTensorThermalDiagonal::~VariablePTensorThermalDiagonal() { } - std::string VariablePTensorNonBackstreamDiagonal::getName() const {return popName + "/PTensorNonBackstreamDiagonal";} + std::string VariablePTensorThermalDiagonal::getName() const {return popName + "/vg_ptensor_thermal_diagonal";} - bool VariablePTensorNonBackstreamDiagonal::getDataVectorInfo(std::string& dataType,unsigned int& dataSize,unsigned int& vectorSize) const { + bool VariablePTensorThermalDiagonal::getDataVectorInfo(std::string& dataType,unsigned int& dataSize,unsigned int& vectorSize) const { dataType = "float"; dataSize = sizeof(Real); vectorSize = (doSkip == true) ? 0 : 3; return true; } - bool VariablePTensorNonBackstreamDiagonal::reduceData(const SpatialCell* cell,char* buffer) { - const bool calculateBackstream = false; + bool VariablePTensorThermalDiagonal::reduceData(const SpatialCell* cell,char* buffer) { + const bool calculateNonthermal = false; //Calculate PTensor and save it in PTensorArray: - PTensorDiagonalBackstreamCalculations( cell, calculateBackstream, averageVX, averageVY, averageVZ, popID, PTensor ); + PTensorDiagonalNonthermalCalculations( cell, calculateNonthermal, averageVX, averageVY, averageVZ, popID, PTensor ); const uint vectorSize = 3; //Save the data into buffer: const char* ptr = reinterpret_cast(&PTensor); @@ -1201,11 +1196,11 @@ namespace DRO { return true; } - bool VariablePTensorNonBackstreamDiagonal::setSpatialCell(const SpatialCell* cell) { - //Get v of the backstream: + bool VariablePTensorThermalDiagonal::setSpatialCell(const SpatialCell* cell) { + //Get v of the thermal: Real V[3] = {0}; - const bool calculateBackstream = false; //We are not calculating backstream - VBackstreamCalculation( cell, calculateBackstream, popID, V ); + const bool calculateNonthermal = false; //We are not calculating nonthermal + VNonthermalCalculation( cell, calculateNonthermal, popID, V ); //Set the average velocities: averageVX = V[0]; averageVY = V[1]; @@ -1215,26 +1210,26 @@ namespace DRO { return true; } - VariablePTensorBackstreamOffDiagonal::VariablePTensorBackstreamOffDiagonal(cuint _popID): DataReductionOperator(),popID(_popID) { + VariablePTensorNonthermalOffDiagonal::VariablePTensorNonthermalOffDiagonal(cuint _popID): DataReductionOperator(),popID(_popID) { popName = getObjectWrapper().particleSpecies[popID].name; - doSkip = (getObjectWrapper().particleSpecies[popID].backstreamRadius == 0.0) ? true : false; + doSkip = (getObjectWrapper().particleSpecies[popID].thermalRadius == 0.0) ? true : false; } - VariablePTensorBackstreamOffDiagonal::~VariablePTensorBackstreamOffDiagonal() { } + VariablePTensorNonthermalOffDiagonal::~VariablePTensorNonthermalOffDiagonal() { } - std::string VariablePTensorBackstreamOffDiagonal::getName() const {return popName + "/PTensorBackstreamOffDiagonal";} + std::string VariablePTensorNonthermalOffDiagonal::getName() const {return popName + "/vg_ptensor_nonthermal_offdiagonal";} - bool VariablePTensorBackstreamOffDiagonal::getDataVectorInfo(std::string& dataType,unsigned int& dataSize,unsigned int& vectorSize) const { + bool VariablePTensorNonthermalOffDiagonal::getDataVectorInfo(std::string& dataType,unsigned int& dataSize,unsigned int& vectorSize) const { dataType = "float"; dataSize = sizeof(Real); vectorSize = (doSkip == true) ? 0 : 3; return true; } - bool VariablePTensorBackstreamOffDiagonal::reduceData(const SpatialCell* cell,char* buffer) { + bool VariablePTensorNonthermalOffDiagonal::reduceData(const SpatialCell* cell,char* buffer) { //Calculate PTensor for PTensorArray: - const bool calculateBackstream = true; + const bool calculateNonthermal = true; //Calculate and save: - PTensorOffDiagonalBackstreamCalculations( cell, calculateBackstream, averageVX, averageVY, averageVZ, popID, PTensor ); + PTensorOffDiagonalNonthermalCalculations( cell, calculateNonthermal, averageVX, averageVY, averageVZ, popID, PTensor ); const uint vectorSize = 3; //Input data into buffer const char* ptr = reinterpret_cast(&PTensor); @@ -1242,11 +1237,11 @@ namespace DRO { return true; } - bool VariablePTensorBackstreamOffDiagonal::setSpatialCell(const SpatialCell* cell) { - //Get v of the backstream: + bool VariablePTensorNonthermalOffDiagonal::setSpatialCell(const SpatialCell* cell) { + //Get v of the nonthermal: Real V[3] = {0}; - const bool calculateBackstream = true; //We are calculating backstream - VBackstreamCalculation( cell, calculateBackstream, popID, V ); + const bool calculateNonthermal = true; //We are calculating nonthermal + VNonthermalCalculation( cell, calculateNonthermal, popID, V ); //Set the average velocities: averageVX = V[0]; averageVY = V[1]; @@ -1255,26 +1250,26 @@ namespace DRO { return true; } - VariablePTensorNonBackstreamOffDiagonal::VariablePTensorNonBackstreamOffDiagonal(cuint _popID): DataReductionOperator(),popID(_popID) { + VariablePTensorThermalOffDiagonal::VariablePTensorThermalOffDiagonal(cuint _popID): DataReductionOperator(),popID(_popID) { popName = getObjectWrapper().particleSpecies[popID].name; - doSkip = (getObjectWrapper().particleSpecies[popID].backstreamRadius == 0.0) ? true : false; + doSkip = (getObjectWrapper().particleSpecies[popID].thermalRadius == 0.0) ? true : false; } - VariablePTensorNonBackstreamOffDiagonal::~VariablePTensorNonBackstreamOffDiagonal() { } + VariablePTensorThermalOffDiagonal::~VariablePTensorThermalOffDiagonal() { } - std::string VariablePTensorNonBackstreamOffDiagonal::getName() const {return popName + "/PTensorNonBackstreamOffDiagonal";} + std::string VariablePTensorThermalOffDiagonal::getName() const {return popName + "/vg_ptensor_thermal_offdiagonal";} - bool VariablePTensorNonBackstreamOffDiagonal::getDataVectorInfo(std::string& dataType,unsigned int& dataSize,unsigned int& vectorSize) const { + bool VariablePTensorThermalOffDiagonal::getDataVectorInfo(std::string& dataType,unsigned int& dataSize,unsigned int& vectorSize) const { dataType = "float"; dataSize = sizeof(Real); vectorSize = (doSkip == true) ? 0 : 3; return true; } - bool VariablePTensorNonBackstreamOffDiagonal::reduceData(const SpatialCell* cell,char* buffer) { + bool VariablePTensorThermalOffDiagonal::reduceData(const SpatialCell* cell,char* buffer) { //Calculate PTensor for PTensorArray: - const bool calculateBackstream = false; + const bool calculateNonthermal = false; //Calculate and save: - PTensorOffDiagonalBackstreamCalculations( cell, calculateBackstream, averageVX, averageVY, averageVZ, popID, PTensor ); + PTensorOffDiagonalNonthermalCalculations( cell, calculateNonthermal, averageVX, averageVY, averageVZ, popID, PTensor ); const uint vectorSize = 3; //Input data into buffer const char* ptr = reinterpret_cast(&PTensor); @@ -1282,11 +1277,11 @@ namespace DRO { return true; } - bool VariablePTensorNonBackstreamOffDiagonal::setSpatialCell(const SpatialCell* cell) { - //Get v of the backstream: + bool VariablePTensorThermalOffDiagonal::setSpatialCell(const SpatialCell* cell) { + //Get v of the nonthermal: Real V[3] = {0}; - const bool calculateBackstream = false; //We are not calculating backstream - VBackstreamCalculation( cell, calculateBackstream, popID, V ); + const bool calculateNonthermal = false; //We are not calculating nonthermal + VNonthermalCalculation( cell, calculateNonthermal, popID, V ); //Set the average velocities: averageVX = V[0]; averageVY = V[1]; @@ -1308,7 +1303,7 @@ namespace DRO { return true; } - std::string VariableEffectiveSparsityThreshold::getName() const {return popName + "/EffectiveSparsityThreshold";} + std::string VariableEffectiveSparsityThreshold::getName() const {return popName + "/vg_effectivesparsitythreshold";} bool VariableEffectiveSparsityThreshold::reduceData(const spatial_cell::SpatialCell* cell,char* buffer) { Real dummy; @@ -1348,7 +1343,7 @@ namespace DRO { } VariablePrecipitationDiffFlux::~VariablePrecipitationDiffFlux() { } - std::string VariablePrecipitationDiffFlux::getName() const {return popName + "/PrecipitationDiffFlux";} + std::string VariablePrecipitationDiffFlux::getName() const {return popName + "/vg_precipitationdifferentialflux";} bool VariablePrecipitationDiffFlux::getDataVectorInfo(std::string& dataType,unsigned int& dataSize,unsigned int& vectorSize) const { dataType = "float"; @@ -1485,7 +1480,7 @@ namespace DRO { } VariableEnergyDensity::~VariableEnergyDensity() { } - std::string VariableEnergyDensity::getName() const {return popName + "/EnergyDensity";} + std::string VariableEnergyDensity::getName() const {return popName + "/vg_energydensity";} bool VariableEnergyDensity::getDataVectorInfo(std::string& dataType,unsigned int& dataSize,unsigned int& vectorSize) const { dataType = "float"; diff --git a/datareduction/datareductionoperator.h b/datareduction/datareductionoperator.h index a5ec54ccc..15cfd2392 100644 --- a/datareduction/datareductionoperator.h +++ b/datareduction/datareductionoperator.h @@ -370,10 +370,10 @@ namespace DRO { }; - class VariableRhoBackstream: public DataReductionOperator { + class VariableRhoThermal: public DataReductionOperator { public: - VariableRhoBackstream(cuint popID); - virtual ~VariableRhoBackstream(); + VariableRhoThermal(cuint popID); + virtual ~VariableRhoThermal(); virtual bool getDataVectorInfo(std::string& dataType,unsigned int& dataSize,unsigned int& vectorSize) const; virtual std::string getName() const; @@ -381,16 +381,16 @@ namespace DRO { virtual bool setSpatialCell(const SpatialCell* cell); protected: - Real RhoBackstream; + Real RhoThermal; uint popID; std::string popName; bool doSkip; }; - class VariableRhoNonBackstream: public DataReductionOperator { + class VariableRhoNonthermal: public DataReductionOperator { public: - VariableRhoNonBackstream(cuint popID); - virtual ~VariableRhoNonBackstream(); + VariableRhoNonthermal(cuint popID); + virtual ~VariableRhoNonthermal(); virtual bool getDataVectorInfo(std::string& dataType,unsigned int& dataSize,unsigned int& vectorSize) const; virtual std::string getName() const; @@ -398,16 +398,16 @@ namespace DRO { virtual bool setSpatialCell(const SpatialCell* cell); protected: - Real Rho; + Real RhoNonthermal; uint popID; std::string popName; bool doSkip; }; - class VariableVBackstream: public DataReductionOperator { + class VariableVThermal: public DataReductionOperator { public: - VariableVBackstream(cuint popID); - virtual ~VariableVBackstream(); + VariableVThermal(cuint popID); + virtual ~VariableVThermal(); virtual bool getDataVectorInfo(std::string& dataType,unsigned int& dataSize,unsigned int& vectorSize) const; virtual std::string getName() const; @@ -415,16 +415,16 @@ namespace DRO { virtual bool setSpatialCell(const SpatialCell* cell); protected: - Real VBackstream[3]; + Real VThermal[3]; uint popID; std::string popName; bool doSkip; }; - class VariableVNonBackstream: public DataReductionOperator { + class VariableVNonthermal: public DataReductionOperator { public: - VariableVNonBackstream(cuint popID); - virtual ~VariableVNonBackstream(); + VariableVNonthermal(cuint popID); + virtual ~VariableVNonthermal(); virtual bool getDataVectorInfo(std::string& dataType,unsigned int& dataSize,unsigned int& vectorSize) const; virtual std::string getName() const; @@ -432,16 +432,16 @@ namespace DRO { virtual bool setSpatialCell(const SpatialCell* cell); protected: - Real V[3]; + Real VNonthermal[3]; uint popID; std::string popName; bool doSkip; }; - class VariablePTensorBackstreamDiagonal: public DataReductionOperator { + class VariablePTensorThermalDiagonal: public DataReductionOperator { public: - VariablePTensorBackstreamDiagonal(cuint popID); - virtual ~VariablePTensorBackstreamDiagonal(); + VariablePTensorThermalDiagonal(cuint popID); + virtual ~VariablePTensorThermalDiagonal(); virtual bool getDataVectorInfo(std::string& dataType,unsigned int& dataSize,unsigned int& vectorSize) const; virtual std::string getName() const; @@ -456,10 +456,10 @@ namespace DRO { bool doSkip; }; - class VariablePTensorNonBackstreamDiagonal: public DataReductionOperator { + class VariablePTensorNonthermalDiagonal: public DataReductionOperator { public: - VariablePTensorNonBackstreamDiagonal(cuint popID); - virtual ~VariablePTensorNonBackstreamDiagonal(); + VariablePTensorNonthermalDiagonal(cuint popID); + virtual ~VariablePTensorNonthermalDiagonal(); virtual bool getDataVectorInfo(std::string& dataType,unsigned int& dataSize,unsigned int& vectorSize) const; virtual std::string getName() const; @@ -474,10 +474,10 @@ namespace DRO { bool doSkip; }; - class VariablePTensorBackstreamOffDiagonal: public DataReductionOperator { + class VariablePTensorThermalOffDiagonal: public DataReductionOperator { public: - VariablePTensorBackstreamOffDiagonal(cuint popID); - virtual ~VariablePTensorBackstreamOffDiagonal(); + VariablePTensorThermalOffDiagonal(cuint popID); + virtual ~VariablePTensorThermalOffDiagonal(); virtual bool getDataVectorInfo(std::string& dataType,unsigned int& dataSize,unsigned int& vectorSize) const; virtual std::string getName() const; @@ -492,10 +492,10 @@ namespace DRO { bool doSkip; }; - class VariablePTensorNonBackstreamOffDiagonal: public DataReductionOperator { + class VariablePTensorNonthermalOffDiagonal: public DataReductionOperator { public: - VariablePTensorNonBackstreamOffDiagonal(cuint popID); - virtual ~VariablePTensorNonBackstreamOffDiagonal(); + VariablePTensorNonthermalOffDiagonal(cuint popID); + virtual ~VariablePTensorNonthermalOffDiagonal(); virtual bool getDataVectorInfo(std::string& dataType,unsigned int& dataSize,unsigned int& vectorSize) const; virtual std::string getName() const; diff --git a/object_wrapper.cpp b/object_wrapper.cpp index ed3456f81..1e58512d6 100644 --- a/object_wrapper.cpp +++ b/object_wrapper.cpp @@ -65,11 +65,11 @@ bool ObjectWrapper::addPopulationParameters() { RP::add(pop + "_vspace.vz_length","Initial number of velocity blocks in vz-direction.",1); RP::add(pop + "_vspace.max_refinement_level","Maximum allowed mesh refinement level.", 1); - // Backstreaming parameters - Readparameters::add(pop + "_backstream.vx", "Center coordinate for the maxwellian distribution. Used for calculating the backstream moments.", -500000.0); - Readparameters::add(pop + "_backstream.vy", "Center coordinate for the maxwellian distribution. Used for calculating the backstream moments.", 0.0); - Readparameters::add(pop + "_backstream.vz", "Center coordinate for the maxwellian distribution. Used for calculating the backstream moments.", 0.0); - Readparameters::add(pop + "_backstream.radius", "Radius of the maxwellian distribution. Used for calculating the backstream moments. If set to 0 (default), the backstream/non-backstream DROs are skipped.", 0.0); + // Thermal / suprathermal parameters + Readparameters::add(pop + "_thermal.vx", "Center coordinate for the maxwellian distribution. Used for calculating the suprathermal moments.", -500000.0); + Readparameters::add(pop + "_thermal.vy", "Center coordinate for the maxwellian distribution. Used for calculating the suprathermal moments.", 0.0); + Readparameters::add(pop + "_thermal.vz", "Center coordinate for the maxwellian distribution. Used for calculating the suprathermal moments.", 0.0); + Readparameters::add(pop + "_thermal.radius", "Radius of the maxwellian distribution. Used for calculating the suprathermal moments. If set to 0 (default), the thermal/suprathermal DROs are skipped.", 0.0); // Precipitation parameters Readparameters::add(pop + "_precipitation.nChannels", "Number of energy channels for precipitation differential flux evaluation", 16); @@ -161,11 +161,11 @@ bool ObjectWrapper::getParameters() { vMesh.refLevelMaxAllowed = maxRefLevel; - //Get backstream/non-backstream moments parameters - Readparameters::get(pop + "_backstream.radius", species.backstreamRadius); - Readparameters::get(pop + "_backstream.vx", species.backstreamV[0]); - Readparameters::get(pop + "_backstream.vy", species.backstreamV[1]); - Readparameters::get(pop + "_backstream.vz", species.backstreamV[2]); + //Get thermal / suprathermal moments parameters + Readparameters::get(pop + "_thermal.radius", species.thermalRadius); + Readparameters::get(pop + "_thermal.vx", species.thermalV[0]); + Readparameters::get(pop + "_thermal.vy", species.thermalV[1]); + Readparameters::get(pop + "_thermal.vz", species.thermalV[2]); //Get energy density parameters Readparameters::get(pop + "_energydensity.limit1", species.EnergyDensityLimit1); diff --git a/particle_species.h b/particle_species.h index 7eba2f2d2..da89510ef 100644 --- a/particle_species.h +++ b/particle_species.h @@ -57,8 +57,8 @@ namespace species { Real sparseDynamicMinValue1; /*!< The minimum value for the minValue*/ Real sparseDynamicMinValue2; /*!< The maximum value for the minValue*/ - Real backstreamRadius; /*!< Radius of sphere to split the distribution into backstreaming and non-backstreaming. 0 (default in cfg) disables the DRO. */ - std::array backstreamV; /*!< Centre of sphere to split the distribution into backstreaming and non-backstreaming. 0 (default in cfg) disables the DRO. */ + Real thermalRadius; /*!< Radius of sphere to split the distribution into thermal and suprathermal. 0 (default in cfg) disables the DRO. */ + std::array thermalV; /*!< Centre of sphere to split the distribution into thermal and suprathermal. 0 (default in cfg) disables the DRO. */ Real EnergyDensityLimit1; /*!< Lower bound for second Energy density bin in units of solar wind ram energy. Default 5. */ Real EnergyDensityLimit2; /*!< Lower bound forthird Energy density bin in units of solar wind ram energy. Default 10. */ From d91ce9e0fe709b04ff9a722f17898022cdea2386 Mon Sep 17 00:00:00 2001 From: Markus Battarbee Date: Tue, 21 May 2019 16:10:21 +0300 Subject: [PATCH 477/602] updated parameters.cpp help output. Lists current and deprecated forms separately. --- datareduction/datareducer.cpp | 20 +++++----- parameters.cpp | 74 ++++++++++++++++++++++++----------- 2 files changed, 62 insertions(+), 32 deletions(-) diff --git a/datareduction/datareducer.cpp b/datareduction/datareducer.cpp index 92b1ba119..214158a87 100644 --- a/datareduction/datareducer.cpp +++ b/datareduction/datareducer.cpp @@ -415,7 +415,7 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti } continue; } - if(lowercase == "maxfieldsdt" || lowercase == "fg_maxfieldsdt") { + if(lowercase == "maxfieldsdt" || lowercase == "fg_maxfieldsdt" || lowercase == "fg_maxdt_fieldsolver") { // Maximum timestep constraint as calculated by the fieldsolver outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fg_maxdt_fieldsolver",[]( FsGrid< std::array, 2>& perBGrid, @@ -740,7 +740,7 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti outputReducer->addMetadata(outputReducer->size()-1,"Pa","$\\mathrm{Pa}$","$P_\\mathrm{fg}$","1.0"); continue; } - if(lowercase == "populations_ptensor") { + if(lowercase == "populations_ptensor" || lowercase == "populations_vg_ptensor") { // Per-population pressure tensor, stored as diagonal and offdiagonal components for(unsigned int i =0; i < getObjectWrapper().particleSpecies.size(); i++) { species::Species& species=getObjectWrapper().particleSpecies[i]; @@ -752,7 +752,7 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti } continue; } - if(lowercase == "bvolderivs" || lowercase == "b_vol_derivatives" || lowercase == "b_vol_derivs") { + if(lowercase == "bvolderivs" || lowercase == "b_vol_derivs" || lowercase == "b_vol_derivatives") { // Volume-averaged derivatives outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("vg_dperbxvoldy",bvolderivatives::dPERBXVOLdy,1)); outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("vg_dperbxvoldz",bvolderivatives::dPERBXVOLdz,1)); @@ -956,7 +956,7 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti diagnosticReducer->addOperator(new DRO::DataReductionOperatorCellParams("rhom",CellParams::RHOM,1)); continue; } - if(lowercase == "populations_rholossadjust" || lowercase == "populations_vg_rho_loss_adjust" || lowercase == "populations_rho_loss_adjust") { + if(lowercase == "populations_rholossadjust" || lowercase == "populations_rho_loss_adjust") { // Per-particle overall lost particle number for(unsigned int i =0; i < getObjectWrapper().particleSpecies.size(); i++) { species::Species& species=getObjectWrapper().particleSpecies[i]; @@ -969,19 +969,19 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti // diagnosticReducer->addOperator(new DRO::DataReductionOperatorCellParams("rho_loss_velocity_boundary",CellParams::RHOLOSSVELBOUNDARY,1)); // continue; //} - if(lowercase == "lbweight" || lowercase == "") { + if(lowercase == "lbweight" || lowercase == "loadbalance_weight") { diagnosticReducer->addOperator(new DRO::DataReductionOperatorCellParams("loadbalance_weight",CellParams::LBWEIGHTCOUNTER,1)); continue; } - if(lowercase == "maxvdt") { + if(lowercase == "maxvdt" || lowercase == "maxdt_acceleration") { diagnosticReducer->addOperator(new DRO::DataReductionOperatorCellParams("maxdt_acceleration",CellParams::MAXVDT,1)); continue; } - if(lowercase == "maxrdt") { + if(lowercase == "maxrdt" || lowercase == "maxdt_translation") { diagnosticReducer->addOperator(new DRO::DataReductionOperatorCellParams("maxdt_translation",CellParams::MAXRDT,1)); continue; } - if(lowercase == "maxfieldsdt") { + if(lowercase == "maxfieldsdt" || lowercase == "maxdt_fieldsolver") { diagnosticReducer->addOperator(new DRO::DataReductionOperatorCellParams("maxdt_fieldsolver",CellParams::MAXFDT,1)); continue; } @@ -997,7 +997,7 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti } continue; } - if(lowercase == "populations_maxrdt") { + if(lowercase == "populations_maxrdt" || lowercase == "populations_maxdt_translation") { for(unsigned int i =0; i < getObjectWrapper().particleSpecies.size(); i++) { species::Species& species=getObjectWrapper().particleSpecies[i]; const std::string& pop = species.name; @@ -1005,7 +1005,7 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti } continue; } - if(lowercase == "populations_maxvdt") { + if(lowercase == "populations_maxvdt" || lowercase == "populations_maxdt_acceleration") { for(unsigned int i =0; i < getObjectWrapper().particleSpecies.size(); i++) { species::Species& species=getObjectWrapper().particleSpecies[i]; const std::string& pop = species.name; diff --git a/parameters.cpp b/parameters.cpp index 54328db91..eaf87ce07 100644 --- a/parameters.cpp +++ b/parameters.cpp @@ -215,34 +215,64 @@ bool Parameters::addParameters(){ // Output variable parameters // NOTE Do not remove the : before the list of variable names as this is parsed by tools/check_vlasiator_cfg.sh - Readparameters::addComposing("variables.output", std::string()+"List of data reduction operators (DROs) to add to the grid file output. Each variable to be added has to be on a new line output = XXX. "+ + Readparameters::addComposing("variables.output", std::string()+"List of data reduction operators (DROs) to add to the grid file output. Each variable to be added has to be on a new line output = XXX. Names are case insensitive. "+ "Available (20190521): "+ - "B fg_B BackgroundB fg_BackgroundB PerturbedB fg_PerturbedB "+ - "E fg_E "+ - "Rhom vg_Rhom fg_Rhom Rhoq vg_Rhoq fg_Rhoq populations_Rho "+ - "V vg_V fg_V populations_V "+ + "fg_b fg_b_background fg_b_perturbed fg_e "+ + "vg_rhom vg_rhoq populations_vg_rho "+ + "fg_rhom fg_rhoq "+ + "vg_v fg_v populations_vg_v "+ + "populations_vg_moments_thermal populations_vg_moments_nonthermal "+ + "populations_vg_effectivesparsitythreshold populations_vg_rho_loss_adjust "+ + "populations_vg_energydensity populations_vg_precipitationdifferentialflux "+ + "vg_maxdt_acceleration vg_maxdt_translation populations_vg_maxdt_acceleration populations_vg_maxdt_translation "+ + "fg_maxdt_fieldsolver "+ + "vg_rank fg_rank vg_loadbalance_weight "+ + "vg_boundarytype fg_boundarytype vg_boundarylayer fg_boundarylayer "+ + "populations_vg_blocks vg_f_saved "+ + "populations_vg_acceleration_subcycles "+ + "vg_e_vol fg_e_vol "+ + "fg_e_hall vg_e_gradpe fg_b_vol vg_b_vol vg_b_background_vol vg_b_perturbed_vol "+ + "vg_pressure fg_pressure populations_vg_ptensor "+ + "b_vol_derivatives "+ + "vg_gridcoordinates fg_gridcoordinates meshdata"); + + Readparameters::addComposing("variables_deprecated.output", std::string()+"List of deprecated names for data reduction operators (DROs). Names are case insensitive. "+ + "Available (20190521): "+ + "B BackgroundB fg_BackgroundB PerturbedB fg_PerturbedB "+ + "E "+ + "Rhom Rhoq populations_Rho "+ + "V populations_V "+ "populations_moments_Backstream populations_moments_NonBackstream "+ - "populations_EffectiveSparsityThreshold populations_RhoLossAdjust "+ - "populations_EnergyDensity populations_PrecipitationFlux "+ - "LBweight MaxVdt MaxRdt populations_MaxVdt populations_MaxRdt MaxFieldsdt "+ - "MPIrank vg_rank FsGridRank fg_rank "+ - "FsGridBoundaryType BoundaryType vg_BoundaryType fg_BoundaryType FsGridBoundaryLayer BoundaryLayer vg_BoundaryLayer fg_BoundaryLayer "+ - "populations_Blocks fSaved "+ - "populations_accSubcycles "+ - "VolE vg_VolE fg_VolE HallE GradPeE VolB vg_VolB fg_VolB BackgroundVolB PerturbedVolB "+ + "populations_moments_thermal populations_moments_nonthermal "+ + "populations_minvalue populations_EffectiveSparsityThreshold populations_RhoLossAdjust populations_rho_loss_adjust"+ + "populations_EnergyDensity populations_PrecipitationFlux populations_precipitationdifferentialflux"+ + "LBweight vg_lbweight vg_loadbalanceweight MaxVdt MaxRdt populations_MaxVdt populations_MaxRdt "+ + "populations_maxdt_acceleration populations_maxdt_translation MaxFieldsdt fg_maxfieldsdt"+ + "MPIrank FsGridRank "+ + "FsGridBoundaryType BoundaryType FsGridBoundaryLayer BoundaryLayer "+ + "populations_Blocks fSaved vg_fsaved"+ + "populations_accSubcycles populations_acceleration_subcycles"+ + "VolE vg_VolE Evol E_vol fg_VolE fg_Evol "+ + "HallE fg_HallE GradPeE e_gradpe VolB vg_VolB fg_VolB B_vol Bvol vg_Bvol fg_volB fg_Bvol"+ + "BackgroundVolB PerturbedVolB "+ "Pressure vg_Pressure fg_Pressure populations_PTensor "+ - "BVOLderivs "+ - "vg_GridCoordinates fg_GridCoordinates MeshData"); + "BVOLderivs b_vol_derivs"); // NOTE Do not remove the : before the list of variable names as this is parsed by tools/check_vlasiator_cfg.sh - Readparameters::addComposing("variables.diagnostic", std::string()+"List of data reduction operators (DROs) to add to the diagnostic runtime output. Each variable to be added has to be on a new line diagnostic = XXX. "+ + Readparameters::addComposing("variables.diagnostic", std::string()+"List of data reduction operators (DROs) to add to the diagnostic runtime output. Each variable to be added has to be on a new line diagnostic = XXX. Names are case insensitive. "+ + "Available (20190320): "+ + "populations_blocks "+ + "rhom populations_rho_loss_adjust"+ + "loadbalance_weight"+ + "maxdt_acceleration maxdt_translation populations_maxdt_acceleration populations_maxdt_translation "+ + "maxdt_fieldsolver "+ + "populations_maxdistributionfunction populations_mindistributionfunction"); + + Readparameters::addComposing("variables_deprecated.diagnostic", std::string()+"List of deprecated data reduction operators (DROs) to add to the diagnostic runtime output. Names are case insensitive. "+ "Available (20190320): "+ - "FluxB FluxE "+ - "populations_Blocks "+ - "Rhom populations_RhoLossAdjust "+ - "LBweight "+ - "populations_MaxVdt MaxVdt populations_MaxRdt MaxRdt MaxFieldsdt "+ - "populations_MaxDistributionFunction populations_MinDistributionFunction"); + "populations_rholossadjust"+ + "LBweight"+ + "populations_MaxVdt MaxVdt populations_MaxRdt MaxRdt MaxFieldsdt"); // bailout parameters Readparameters::add("bailout.write_restart", "If 1, write a restart file on bailout. Gets reset when sending a STOP (1) or a KILL (0).", true); From 14edcc9841def2f0c65810edab37aac4c7367e19 Mon Sep 17 00:00:00 2001 From: Urs Ganse Date: Tue, 14 May 2019 15:25:37 +0300 Subject: [PATCH 478/602] Write fsgrid mesh data and fields into restart files. --- iowrite.cpp | 114 ++++++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 110 insertions(+), 4 deletions(-) diff --git a/iowrite.cpp b/iowrite.cpp index d412a2b2e..33009da45 100644 --- a/iowrite.cpp +++ b/iowrite.cpp @@ -30,6 +30,7 @@ #include #include #include +#include #include #include #include @@ -826,7 +827,7 @@ bool writeFsGridMetadata(FsGrid< fsgrids::technical, 2>& technicalGrid, vlsv::Wr //The visit plugin expects MESH_BBOX as a keyword. We only write one //from the first rank. std::array& globalSize = technicalGrid.getGlobalSize(); - std::array boundaryBox({globalSize[0], globalSize[1], globalSize[2], + std::array boundaryBox({globalSize[0], globalSize[1], globalSize[2], 1,1,1}); if(technicalGrid.getRank() == 0) { @@ -842,15 +843,15 @@ bool writeFsGridMetadata(FsGrid< fsgrids::technical, 2>& technicalGrid, vlsv::Wr // Write three 1-dimensional arrays of node coordinates (x,y,z) for // visit to create a cartesian grid out of. std::vector xNodeCoordinates(globalSize[0]+1); - for(uint64_t i=0; i yNodeCoordinates(globalSize[1]+1); - for(uint64_t i=0; i zNodeCoordinates(globalSize[2]+1); - for(uint64_t i=0; i& mpiGrid, //Write domain sizes: if( writeDomainSizes( vlsvWriter, meshName, local_cells.size(), ghost_cells.size() ) == false ) return false; + //Write FSGrid metadata + if( writeFsGridMetadata( technicalGrid, vlsvWriter ) == false ) return false; + phiprof::stop("metadataIO"); phiprof::start("reduceddataIO"); //write out DROs we need for restarts @@ -1291,6 +1295,108 @@ bool writeRestart(dccrg::Dccrg& mpiGrid, restartReducer.addOperator(new DRO::MPIrank); restartReducer.addOperator(new DRO::BoundaryType); restartReducer.addOperator(new DRO::BoundaryLayer); + + // Fsgrid Reducers + restartReducer.addOperator(new DRO::DataReductionOperatorFsGrid("fg_EFIELD",[]( + FsGrid< std::array, 2>& perBGrid, + FsGrid< std::array, 2>& EGrid, + FsGrid< std::array, 2>& EHallGrid, + FsGrid< std::array, 2>& EGradPeGrid, + FsGrid< std::array, 2>& momentsGrid, + FsGrid< std::array, 2>& dPerBGrid, + FsGrid< std::array, 2>& dMomentsGrid, + FsGrid< std::array, 2>& BgBGrid, + FsGrid< std::array, 2>& volGrid, + FsGrid< fsgrids::technical, 2>& technicalGrid)->std::vector { + std::array& gridSize = technicalGrid.getLocalSize(); + std::vector retval(gridSize[0]*gridSize[1]*gridSize[2]*fsgrids::efield::N_EFIELD); + int index=0; + for(int z=0; z, 2>& perBGrid, + FsGrid< std::array, 2>& EGrid, + FsGrid< std::array, 2>& EHallGrid, + FsGrid< std::array, 2>& EGradPeGrid, + FsGrid< std::array, 2>& momentsGrid, + FsGrid< std::array, 2>& dPerBGrid, + FsGrid< std::array, 2>& dMomentsGrid, + FsGrid< std::array, 2>& BgBGrid, + FsGrid< std::array, 2>& volGrid, + FsGrid< fsgrids::technical, 2>& technicalGrid)->std::vector { + std::array& gridSize = technicalGrid.getLocalSize(); + std::vector retval(gridSize[0]*gridSize[1]*gridSize[2]*fsgrids::bfield::N_BFIELD); + int index=0; + for(int z=0; z, 2>& perBGrid, + FsGrid< std::array, 2>& EGrid, + FsGrid< std::array, 2>& EHallGrid, + FsGrid< std::array, 2>& EGradPeGrid, + FsGrid< std::array, 2>& momentsGrid, + FsGrid< std::array, 2>& dPerBGrid, + FsGrid< std::array, 2>& dMomentsGrid, + FsGrid< std::array, 2>& BgBGrid, + FsGrid< std::array, 2>& volGrid, + FsGrid< fsgrids::technical, 2>& technicalGrid)->std::vector { + std::array& gridSize = technicalGrid.getLocalSize(); + std::vector retval(gridSize[0]*gridSize[1]*gridSize[2]*fsgrids::bgbfield::N_BGB); + int index=0; + for(int z=0; z, 2>& perBGrid, + FsGrid< std::array, 2>& EGrid, + FsGrid< std::array, 2>& EHallGrid, + FsGrid< std::array, 2>& EGradPeGrid, + FsGrid< std::array, 2>& momentsGrid, + FsGrid< std::array, 2>& dPerBGrid, + FsGrid< std::array, 2>& dMomentsGrid, + FsGrid< std::array, 2>& BgBGrid, + FsGrid< std::array, 2>& volGrid, + FsGrid< fsgrids::technical, 2>& technicalGrid)->std::vector { + std::array& gridSize = technicalGrid.getLocalSize(); + std::vector retval(gridSize[0]*gridSize[1]*gridSize[2]*fsgrids::dperb::N_DPERB); + int index=0; + for(int z=0; z Date: Wed, 15 May 2019 14:14:48 +0300 Subject: [PATCH 479/602] Fix some leftover copy'n'paste comments and superfluous debug output. --- datareduction/datareducer.cpp | 2 +- ioread.cpp | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/datareduction/datareducer.cpp b/datareduction/datareducer.cpp index c96cfebe3..4bfacaef3 100644 --- a/datareduction/datareducer.cpp +++ b/datareduction/datareducer.cpp @@ -709,7 +709,7 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti std::array& gridSize = technicalGrid.getLocalSize(); std::vector retval(gridSize[0]*gridSize[1]*gridSize[2]); - // Iterate through fsgrid cells and extract X coordinateL + // Iterate through fsgrid cells and extract X coordinate for(int z=0; zsecond << " domains" << endl; N_domains = atoi(it->second.c_str()); } From 35bacbc7aa739768e5cd35621a454dd0968d3b0b Mon Sep 17 00:00:00 2001 From: Urs Ganse Date: Tue, 21 May 2019 16:15:56 +0300 Subject: [PATCH 480/602] Fsgrid restart reading code, currently only for same number of nodes. --- grid.cpp | 2 +- ioread.cpp | 94 ++++++++++++++++++++++++++++++++++++++++++++++++++++- ioread.h | 6 ++++ iowrite.cpp | 5 +++ 4 files changed, 105 insertions(+), 2 deletions(-) diff --git a/grid.cpp b/grid.cpp index e567bd0ec..4951a807f 100644 --- a/grid.cpp +++ b/grid.cpp @@ -187,7 +187,7 @@ void initializeGrids( if (P::isRestart) { logFile << "Restart from "<< P::restartFileName << std::endl << writeVerbose; phiprof::start("Read restart"); - if (readGrid(mpiGrid,P::restartFileName) == false) { + if (readGrid(mpiGrid,perBGrid,EGradPeGrid,momentsGrid,BgBGrid,volGrid,technicalGrid,P::restartFileName) == false) { logFile << "(MAIN) ERROR: restarting failed" << endl; exit(1); } diff --git a/ioread.cpp b/ioread.cpp index 4d70279b2..0313a587e 100644 --- a/ioread.cpp +++ b/ioread.cpp @@ -777,6 +777,78 @@ bool readCellParamsVariable( return false; } +template bool readFsGridVariable( + vlsv::ParallelReader& file, const string& variableName, int numWritingRanks, FsGrid,2>& targetGrid) { + + uint64_t arraySize; + uint64_t vectorSize; + vlsv::datatype::type dataType; + uint64_t byteSize; + list > attribs; + + attribs.push_back(make_pair("name",variableName)); + attribs.push_back(make_pair("mesh","fsgrid")); + + if (file.getArrayInfo("VARIABLE",attribs,arraySize,vectorSize,dataType,byteSize) == false) { + logFile << "(RESTART) ERROR: Failed to read " << endl << write; + return false; + } + if(! (dataType == vlsv::datatype::type::FLOAT && byteSize == sizeof(double))) { + logFile << "(RESTART) ERROR: Attempting to read fsgrid variable " << variableName << ", but it is not a double dataset." << endl << write; + return false; + } + + // Are we restarting from the same number of tasks, or a different number? + int size, myRank; + MPI_Comm_size(MPI_COMM_WORLD, &size); + MPI_Comm_rank(MPI_COMM_WORLD, &myRank); + if(size == numWritingRanks) { + // Easy case: same number of tasks => slurp it in. + std::array& localSize = targetGrid.getLocalSize(); + std::array& globalSize = targetGrid.getGlobalSize(); + + std::array decomposition; + targetGrid.computeDomainDecomposition(globalSize, size, decomposition); + + // Determine our tasks storage size + size_t storageSize = localSize[0]*localSize[1]*localSize[2]; + + // Determine offset in file by summing up all the previous tasks' sizes. + size_t localStartOffset = 0; + for(int task = 0; task < myRank; task++) { + std::array thatTasksSize; + thatTasksSize[0] = targetGrid.calcLocalSize(globalSize[0], decomposition[0], task%decomposition[0]); + thatTasksSize[1] = targetGrid.calcLocalSize(globalSize[1], decomposition[1], (task/decomposition[0])%decomposition[1]); + thatTasksSize[2] = targetGrid.calcLocalSize(globalSize[2], decomposition[2], (task/decomposition[0])/decomposition[1]); + localStartOffset += thatTasksSize[0] * thatTasksSize[1] * thatTasksSize[2]; + } + + // Read into buffer + std::vector buffer(storageSize*N); + + if(file.readArray("VARIABLE",attribs, localStartOffset*N, storageSize, (char*)buffer.data()) == false) { + logFile << "(RESTART) ERROR: Failed to read fsgrid variable " << variableName << endl << write; + return false; + } + + // Assign buffer into fsgrid + int index=0; + for(int z=0; z& mpiGrid, + FsGrid< std::array, 2>& perBGrid, + FsGrid< std::array, 2>& EGradPeGrid, + FsGrid< std::array, 2>& momentsGrid, + FsGrid< std::array, 2>& BgBGrid, + FsGrid< std::array, 2>& volGrid, + FsGrid< fsgrids::technical, 2>& technicalGrid, const std::string& name) { vector fileCells; /*< CellIds for all cells in file*/ vector nBlocks;/*< Number of blocks for all cells in file*/ @@ -1016,6 +1094,14 @@ bool exec_readGrid(dccrg::Dccrg& mpiGrid, } phiprof::stop("readBlockData"); + // Read fsgrid data back in + int fsgridInputRanks=0; + if(readScalarParameter(file,"numWritingRanks",fsgridInputRanks, MASTER_RANK, MPI_COMM_WORLD) == false) { + exitOnError(false, "(RESTART) FSGrid writing rank number not found in restart file", MPI_COMM_WORLD); + } + success = readFsGridVariable(file, "fg_BGB", fsgridInputRanks, BgBGrid); + success = readFsGridVariable(file, "fg_PERB", fsgridInputRanks, perBGrid); + success = file.close(); phiprof::stop("readGrid"); @@ -1030,7 +1116,13 @@ bool exec_readGrid(dccrg::Dccrg& mpiGrid, \param name Name of the restart file e.g. "restart.00052.vlsv" */ bool readGrid(dccrg::Dccrg& mpiGrid, + FsGrid< std::array, 2>& perBGrid, + FsGrid< std::array, 2>& EGradPeGrid, + FsGrid< std::array, 2>& momentsGrid, + FsGrid< std::array, 2>& BgBGrid, + FsGrid< std::array, 2>& volGrid, + FsGrid< fsgrids::technical, 2>& technicalGrid, const std::string& name){ //Check the vlsv version from the file: - return exec_readGrid(mpiGrid,name); + return exec_readGrid(mpiGrid,perBGrid,EGradPeGrid,momentsGrid,BgBGrid,volGrid,technicalGrid,name); } diff --git a/ioread.h b/ioread.h index 2560e0163..4f47966ae 100644 --- a/ioread.h +++ b/ioread.h @@ -37,6 +37,12 @@ \param name Name of the restart file e.g. "restart.00052.vlsv" */ bool readGrid(dccrg::Dccrg& mpiGrid, + FsGrid< std::array, 2>& perBGrid, + FsGrid< std::array, 2> & EGradPeGrid, + FsGrid< std::array, 2>& momentsGrid, + FsGrid< std::array, 2>& BgBGrid, + FsGrid< std::array, 2>& volGrid, + FsGrid< fsgrids::technical, 2>& technicalGrid, const std::string& name); diff --git a/iowrite.cpp b/iowrite.cpp index 33009da45..aee11c729 100644 --- a/iowrite.cpp +++ b/iowrite.cpp @@ -893,6 +893,11 @@ bool writeFsGridMetadata(FsGrid< fsgrids::technical, 2>& technicalGrid, vlsv::Wr std::array meshDomainSize({globalIds.size(), 0}); vlsvWriter.writeArray("MESH_DOMAIN_SIZES", xmlAttributes, 1, 2, &meshDomainSize[0]); + // how many MPI ranks we wrote from + int size; + MPI_Comm_size(MPI_COMM_WORLD, &size); + vlsvWriter.writeParameter("numWritingRanks", &size); + // Finally, write mesh object itself. xmlAttributes.clear(); xmlAttributes["name"] = meshName; From 2fd355f406bd7836e481346edad62d4412896cfc Mon Sep 17 00:00:00 2001 From: Tuomas Koskela Date: Wed, 22 May 2019 14:27:49 +0300 Subject: [PATCH 481/602] Removed reserve statements, moved the size to the vector constructor argument. --- vlasovsolver/cpu_trans_map_amr.cpp | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/vlasovsolver/cpu_trans_map_amr.cpp b/vlasovsolver/cpu_trans_map_amr.cpp index a35bea699..4e9873ad7 100644 --- a/vlasovsolver/cpu_trans_map_amr.cpp +++ b/vlasovsolver/cpu_trans_map_amr.cpp @@ -521,8 +521,7 @@ void propagatePencil(Vec* dz, Vec* values, const uint dimension, const uint nTargetNeighborsPerPencil = 1; // Vector buffer where we write data, initialized to 0*/ - std::vector> targetValues; - targetValues.reserve((lengthOfPencil + 2 * nTargetNeighborsPerPencil) * WID3 / VECL); + std::vector> targetValues((lengthOfPencil + 2 * nTargetNeighborsPerPencil) * WID3 / VECL); for (uint i = 0; i < (lengthOfPencil + 2 * nTargetNeighborsPerPencil) * WID3 / VECL; i++) { @@ -1102,8 +1101,7 @@ bool trans_map_1d_amr(const dccrg::Dccrg& std::vector targetBlockData((pencils.sumOfLengths + 2 * pencils.N) * WID3); // Allocate vectorized targetvecdata sum(lengths of pencils)*WID3 / VECL) // Add padding by 2 for each pencil - std::vector> targetVecData; - targetVecData.reserve((pencils.sumOfLengths + 2 * pencils.N) * WID3 / VECL); + std::vector> targetVecData((pencils.sumOfLengths + 2 * pencils.N) * WID3 / VECL); // Initialize targetvecdata to 0 for( uint i = 0; i < (pencils.sumOfLengths + 2 * pencils.N) * WID3 / VECL; i++ ) { @@ -1135,8 +1133,7 @@ bool trans_map_1d_amr(const dccrg::Dccrg& computeSpatialSourceCellsForPencil(mpiGrid, pencils, pencili, dimension, sourceCells.data()); // dz is the cell size in the direction of the pencil - std::vector> dz; - dz.reserve(sourceLength); + std::vector> dz(sourceLength); uint i = 0; for(auto cell: sourceCells) { switch (dimension) { @@ -1156,8 +1153,7 @@ bool trans_map_1d_amr(const dccrg::Dccrg& // Allocate source data: sourcedata> sourceVecData; - sourceVecData.reserve(sourceLength * WID3 / VECL); + std::vector> sourceVecData(sourceLength * WID3 / VECL); // load data(=> sourcedata) / (proper xy reconstruction in future) copy_trans_block_data_amr(sourceCells.data(), blockGID, L, sourceVecData.data(), From 5f24ab70cd259dc554a79cb507a09359d4b13c76 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Thu, 23 May 2019 12:11:51 +0300 Subject: [PATCH 482/602] Removed an useless temporary data array from trans_map_1d_amr. Target data was being temporarily stored in a vector type array before being copied into a regular array and then copied again into the data fields of the SpatialCell. The vector array is now gone and the data is directly written from the pencil into a regular array before it gets copied into the cells. --- vlasovsolver/cpu_trans_map_amr.cpp | 106 ++++++++++------------------- 1 file changed, 35 insertions(+), 71 deletions(-) diff --git a/vlasovsolver/cpu_trans_map_amr.cpp b/vlasovsolver/cpu_trans_map_amr.cpp index 8fbcba984..da211e1ad 100644 --- a/vlasovsolver/cpu_trans_map_amr.cpp +++ b/vlasovsolver/cpu_trans_map_amr.cpp @@ -1098,19 +1098,7 @@ bool trans_map_1d_amr(const dccrg::Dccrg& phiprof::start(t1); std::vector targetBlockData((pencils.sumOfLengths + 2 * pencils.N) * WID3); - // Allocate vectorized targetvecdata sum(lengths of pencils)*WID3 / VECL) - // Add padding by 2 for each pencil - Vec targetVecData[(pencils.sumOfLengths + 2 * pencils.N) * WID3 / VECL]; - - // Initialize targetvecdata to 0 - for( uint i = 0; i < (pencils.sumOfLengths + 2 * pencils.N) * WID3 / VECL; i++ ) { - targetVecData[i] = Vec(0.0); - } - - // TODO: There's probably a smarter way to keep track of where we are writing - // in the target data array. - uint targetDataIndex = 0; - + // Compute spatial neighbors for target cells. // For targets we need the local cells, plus a padding of 1 cell at both ends std::vector targetCells(pencils.sumOfLengths + pencils.N * 2 ); @@ -1161,46 +1149,55 @@ bool trans_map_1d_amr(const dccrg::Dccrg& // Dz and sourceVecData are both padded by VLASOV_STENCIL_WIDTH // Dz has 1 value/cell, sourceVecData has WID3 values/cell propagatePencil(dz, sourceVecData, dimension, blockGID, dt, vmesh, L); + - if (printTargets) std::cout << "Target cells for pencil " << pencili << ", rank " << myRank << ": "; - // sourcedata => targetdata[this pencil]) - for (uint i = 0; i < targetLength; i++) { - if (printTargets) { - if( targetCells[i + totalTargetLength] != NULL) { - std::cout << targetCells[i + totalTargetLength]->parameters[CellParams::CELLID] << " "; - } else { - std::cout << "NULL" << " "; - } - } + // sourceVecData => targetBlockData[this pencil]) + + // Loop over cells in pencil + for (uint icell = 0; icell < targetLength; icell++) { + // Loop over 1st vspace dimension for (uint k=0; ksysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY) { + // Get local velocity block id const vmesh::LocalID blockLID = spatial_cell->get_velocity_block_local_id(blockGID, popID); - // Check for invalid id + + // Check for invalid block id if (blockLID != vmesh::VelocityMesh::invalidLocalID()) { + // Get a pointer to the block data Realf* blockData = spatial_cell->get_data(blockLID, popID); + // Loop over velocity block cells for(int i = 0; i < WID3; i++) { blockData[i] = 0.0; @@ -1214,39 +1211,8 @@ bool trans_map_1d_amr(const dccrg::Dccrg& totalTargetLength = 0; for(uint pencili = 0; pencili < pencils.N; pencili++){ - int L = pencils.lengthOfPencils[pencili]; - uint targetLength = L + 2; - //vector pencilIds = pencils.getIds(pencili); - - // Calculate the max and min refinement levels in this pencil. - // All cells that are not on the max refinement level will be split - // Into multiple pencils. This has to be taken into account when adding - // up the contributions from each pencil. - // The most convenient way is to just count how many refinement steps the - // pencil has taken on its path. - int pencilRefLvl = pencils.path[pencili].size(); + uint targetLength = pencils.lengthOfPencils[pencili] + 2; - // Unpack the vector data - - // Loop over cells in pencil +- 1 padded cell - for ( uint celli = 0; celli < targetLength; ++celli ) { - - Realv vector[VECL]; - // Loop over 1st vspace dimension - for (uint k = 0; k < WID; ++k) { - // Loop over 2nd vspace dimension - for(uint planeVector = 0; planeVector < VEC_PER_PLANE; planeVector++) { - targetVecData[i_trans_pt_blockv(planeVector, k, totalTargetLength + celli - 1)].store(vector); - // Loop over 3rd (vectorized) vspace dimension - for (uint i = 0; i < VECL; i++) { - targetBlockData[(totalTargetLength + celli) * WID3 + - cellid_transpose[i + planeVector * VECL + k * WID2]] - = vector[i]; - } - } - } - } - // store values from targetBlockData array to the actual blocks // Loop over cells in the pencil, including the padded cells of the target array for ( uint celli = 0; celli < targetLength; celli++ ) { @@ -1258,17 +1224,16 @@ bool trans_map_1d_amr(const dccrg::Dccrg& const vmesh::LocalID blockLID = targetCell->get_velocity_block_local_id(blockGID, popID); + // Check for invalid block id if( blockLID == vmesh::VelocityMesh::invalidLocalID() ) { - // Invalid target spatial cell continue; } Realf* blockData = targetCell->get_data(blockLID, popID); // areaRatio is the reatio of the cross-section of the spatial cell to the cross-section of the pencil. - Realf areaRatio = pow(pow(2,targetCell->SpatialCell::parameters[CellParams::REFINEMENT_LEVEL] - pencils.path[pencili].size()),2);; + Realf areaRatio = pow(pow(2,targetCell->SpatialCell::parameters[CellParams::REFINEMENT_LEVEL] - pencils.path[pencili].size()),2); - // Realf checksum = 0.0; for(int i = 0; i < WID3 ; i++) { blockData[i] += targetBlockData[GID * WID3 + i] * areaRatio; } @@ -1277,13 +1242,12 @@ bool trans_map_1d_amr(const dccrg::Dccrg& totalTargetLength += targetLength; - // dealloc target data -- Should be automatic again? - } + } // closes loop over pencils phiprof::stop(t2); - } - } - } + } // Closes loop over pencil sets (inactive). targetBlockData gets implicitly deallocated here. + } // Closes loop over blocks + } // closes pragma omp parallel return true; } From 0f9bafd79f7d86e5a38beb0563cf3dc79fa422a0 Mon Sep 17 00:00:00 2001 From: Urs Ganse Date: Thu, 23 May 2019 15:42:35 +0300 Subject: [PATCH 483/602] Fix fsgrid restart read rank order. MPI standard says it's column order, I had assumed row order. --- ioread.cpp | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/ioread.cpp b/ioread.cpp index 0313a587e..e0fe44966 100644 --- a/ioread.cpp +++ b/ioread.cpp @@ -793,8 +793,8 @@ template bool readFsGridVariable( logFile << "(RESTART) ERROR: Failed to read " << endl << write; return false; } - if(! (dataType == vlsv::datatype::type::FLOAT && byteSize == sizeof(double))) { - logFile << "(RESTART) ERROR: Attempting to read fsgrid variable " << variableName << ", but it is not a double dataset." << endl << write; + if(! (dataType == vlsv::datatype::type::FLOAT && byteSize == sizeof(Real))) { + logFile << "(RESTART) ERROR: Attempting to read fsgrid variable " << variableName << ", but it is not in the same floating point format as the simulation expects (" << byteSize*8 << " bits instead of " << sizeof(Real)*8 << ")." << endl << write; return false; } @@ -817,16 +817,16 @@ template bool readFsGridVariable( size_t localStartOffset = 0; for(int task = 0; task < myRank; task++) { std::array thatTasksSize; - thatTasksSize[0] = targetGrid.calcLocalSize(globalSize[0], decomposition[0], task%decomposition[0]); - thatTasksSize[1] = targetGrid.calcLocalSize(globalSize[1], decomposition[1], (task/decomposition[0])%decomposition[1]); - thatTasksSize[2] = targetGrid.calcLocalSize(globalSize[2], decomposition[2], (task/decomposition[0])/decomposition[1]); + thatTasksSize[0] = targetGrid.calcLocalSize(globalSize[0], decomposition[0], task/decomposition[2]/decomposition[1]); + thatTasksSize[1] = targetGrid.calcLocalSize(globalSize[1], decomposition[1], (task/decomposition[2])%decomposition[1]); + thatTasksSize[2] = targetGrid.calcLocalSize(globalSize[2], decomposition[2], task%decomposition[2]); localStartOffset += thatTasksSize[0] * thatTasksSize[1] * thatTasksSize[2]; } // Read into buffer - std::vector buffer(storageSize*N); + std::vector buffer(storageSize*N); - if(file.readArray("VARIABLE",attribs, localStartOffset*N, storageSize, (char*)buffer.data()) == false) { + if(file.readArray("VARIABLE",attribs, localStartOffset, storageSize, (char*)buffer.data()) == false) { logFile << "(RESTART) ERROR: Failed to read fsgrid variable " << variableName << endl << write; return false; } @@ -836,7 +836,7 @@ template bool readFsGridVariable( for(int z=0; z& mpiGrid, if(readScalarParameter(file,"numWritingRanks",fsgridInputRanks, MASTER_RANK, MPI_COMM_WORLD) == false) { exitOnError(false, "(RESTART) FSGrid writing rank number not found in restart file", MPI_COMM_WORLD); } - success = readFsGridVariable(file, "fg_BGB", fsgridInputRanks, BgBGrid); success = readFsGridVariable(file, "fg_PERB", fsgridInputRanks, perBGrid); success = file.close(); From c71e0d9d0fafc5b76e7f63e0f05234d217f0c29e Mon Sep 17 00:00:00 2001 From: tkoskela Date: Fri, 24 May 2019 11:38:57 +0300 Subject: [PATCH 484/602] Changed to loop over index to make the code a bit shorter. --- vlasovsolver/cpu_trans_map_amr.cpp | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/vlasovsolver/cpu_trans_map_amr.cpp b/vlasovsolver/cpu_trans_map_amr.cpp index c8c1d5fe4..04dbad0b1 100644 --- a/vlasovsolver/cpu_trans_map_amr.cpp +++ b/vlasovsolver/cpu_trans_map_amr.cpp @@ -1122,21 +1122,18 @@ bool trans_map_1d_amr(const dccrg::Dccrg& // dz is the cell size in the direction of the pencil std::vector> dz(sourceLength); - uint i = 0; - for(auto cell: sourceCells) { + for(uint i = 0; i < sourceCells.size(); ++i) { switch (dimension) { case(0): - dz[i] = cell->SpatialCell::parameters[CellParams::DX]; + dz[i] = sourceCells[i]->SpatialCell::parameters[CellParams::DX]; break; case(1): - dz[i] = cell->SpatialCell::parameters[CellParams::DY]; + dz[i] = sourceCells[i]->SpatialCell::parameters[CellParams::DY]; break; case(2): - dz[i] = cell->SpatialCell::parameters[CellParams::DZ]; + dz[i] = sourceCells[i]->SpatialCell::parameters[CellParams::DZ]; break; - } - - i++; + } } // Allocate source data: sourcedata Date: Wed, 29 May 2019 11:21:41 +0300 Subject: [PATCH 485/602] Multiple fixes. Fixed B initialisation for restart. Fixed needed fsgrid restart fields (perB and E). --- MAKE/Makefile.sisu_gcc | 2 +- datareduction/datareducer.cpp | 24 ++++- datareduction/datareducer.h | 1 + datareduction/datareductionoperator.cpp | 5 +- datareduction/datareductionoperator.h | 2 + fieldsolver/ldz_main.cpp | 64 ++++++++++--- ioread.cpp | 16 +++- ioread.h | 1 + iowrite.cpp | 95 +++++-------------- projects/Alfven/Alfven.cpp | 54 ++++++----- projects/Dispersion/Dispersion.cpp | 38 ++++---- projects/Distributions/Distributions.cpp | 40 ++++---- projects/Fluctuations/Fluctuations.cpp | 30 +++--- projects/Harris/Harris.cpp | 26 +++--- projects/IPShock/IPShock.cpp | 104 +++++++++++---------- projects/KHB/KHB.cpp | 56 +++++------ projects/Magnetosphere/Magnetosphere.cpp | 4 +- projects/MultiPeak/MultiPeak.cpp | 40 ++++---- projects/Riemann1/Riemann1.cpp | 58 ++++++------ projects/Shock/Shock.cpp | 26 +++--- projects/Shocktest/Shocktest.cpp | 58 ++++++------ projects/testAmr/testAmr.cpp | 44 ++++----- projects/testHall/testHall.cpp | 26 +++--- projects/test_fp/test_fp.cpp | 114 ++++++++++++----------- vlasiator.cpp | 93 +++--------------- 25 files changed, 507 insertions(+), 514 deletions(-) diff --git a/MAKE/Makefile.sisu_gcc b/MAKE/Makefile.sisu_gcc index bbfc2c8c5..24556b7ab 100644 --- a/MAKE/Makefile.sisu_gcc +++ b/MAKE/Makefile.sisu_gcc @@ -84,4 +84,4 @@ INC_PROFILE = -I$(LIBRARY_PREFIX)/mpich2/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_V INC_EIGEN = -I$(LIBRARY_PREFIX)/eigen/ INC_DCCRG = -I$(LIBRARY_PREFIX)/dccrg_new_neighbours/ INC_VECTORCLASS = -I$(LIBRARY_PREFIX)/vectorclass -INC_FSGRID = -I$(LIBRARY_PREFIX)/fsgrid +INC_FSGRID = -I$(HOME)/fsgrid diff --git a/datareduction/datareducer.cpp b/datareduction/datareducer.cpp index 4bfacaef3..5dc2a4ec4 100644 --- a/datareduction/datareducer.cpp +++ b/datareduction/datareducer.cpp @@ -39,6 +39,7 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti if(*it == "fg_B" || *it == "B") { // Bulk magnetic field at Yee-Lattice locations outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fg_B",[]( FsGrid< std::array, 2>& perBGrid, + FsGrid< std::array, 2>& perBDt2Grid, FsGrid< std::array, 2>& EGrid, FsGrid< std::array, 2>& EHallGrid, FsGrid< std::array, 2>& EGradPeGrid, @@ -73,6 +74,7 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti if(*it == "fg_BackgroundB" || *it == "BackgroundB") { // Static (typically dipole) magnetic field part outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fg_background_B",[]( FsGrid< std::array, 2>& perBGrid, + FsGrid< std::array, 2>& perBDt2Grid, FsGrid< std::array, 2>& EGrid, FsGrid< std::array, 2>& EHallGrid, FsGrid< std::array, 2>& EGradPeGrid, @@ -104,6 +106,7 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti if(*it == "fg_PerturbedB" || *it == "PerturbedB") { // Fluctuating magnetic field part outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fg_perturbed_B",[]( FsGrid< std::array, 2>& perBGrid, + FsGrid< std::array, 2>& perBDt2Grid, FsGrid< std::array, 2>& EGrid, FsGrid< std::array, 2>& EHallGrid, FsGrid< std::array, 2>& EGradPeGrid, @@ -135,6 +138,7 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti if(*it == "fg_E" || *it== "E") { // Bulk electric field at Yee-lattice locations outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fg_E",[]( FsGrid< std::array, 2>& perBGrid, + FsGrid< std::array, 2>& perBDt2Grid, FsGrid< std::array, 2>& EGrid, FsGrid< std::array, 2>& EHallGrid, FsGrid< std::array, 2>& EGradPeGrid, @@ -170,6 +174,7 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti if(*it == "fg_Rhom") { // Overall mass density (summed over all populations) outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fg_rhom",[]( FsGrid< std::array, 2>& perBGrid, + FsGrid< std::array, 2>& perBDt2Grid, FsGrid< std::array, 2>& EGrid, FsGrid< std::array, 2>& EHallGrid, FsGrid< std::array, 2>& EGradPeGrid, @@ -203,6 +208,7 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti if(*it == "fg_Rhoq") { // Overall charge density (summed over all populations) outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fg_rhoq",[]( FsGrid< std::array, 2>& perBGrid, + FsGrid< std::array, 2>& perBDt2Grid, FsGrid< std::array, 2>& EGrid, FsGrid< std::array, 2>& EHallGrid, FsGrid< std::array, 2>& EGradPeGrid, @@ -245,6 +251,7 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti if(*it == "fg_V") { // Overall effective bulk density defining the center-of-mass frame from all populations outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fg_V",[]( FsGrid< std::array, 2>& perBGrid, + FsGrid< std::array, 2>& perBDt2Grid, FsGrid< std::array, 2>& EGrid, FsGrid< std::array, 2>& EHallGrid, FsGrid< std::array, 2>& EGradPeGrid, @@ -366,6 +373,7 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti // Maximum timestep constraint as calculated by the fieldsolver outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("MaxFieldsdt",[]( FsGrid< std::array, 2>& perBGrid, + FsGrid< std::array, 2>& perBDt2Grid, FsGrid< std::array, 2>& EGrid, FsGrid< std::array, 2>& EHallGrid, FsGrid< std::array, 2>& EGradPeGrid, @@ -401,6 +409,7 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti // Map of spatial decomposition of the FsGrid into MPI ranks outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("FsGridRank",[]( FsGrid< std::array, 2>& perBGrid, + FsGrid< std::array, 2>& perBDt2Grid, FsGrid< std::array, 2>& EGrid, FsGrid< std::array, 2>& EHallGrid, FsGrid< std::array, 2>& EGradPeGrid, @@ -427,6 +436,7 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti // Type of boundarycells as stored in FSGrid outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("FsGridBoundaryType",[]( FsGrid< std::array, 2>& perBGrid, + FsGrid< std::array, 2>& perBDt2Grid, FsGrid< std::array, 2>& EGrid, FsGrid< std::array, 2>& EHallGrid, FsGrid< std::array, 2>& EGradPeGrid, @@ -462,6 +472,7 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti // Type of boundarycells as stored in FSGrid outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("FsGridBoundaryLayer",[]( FsGrid< std::array, 2>& perBGrid, + FsGrid< std::array, 2>& perBDt2Grid, FsGrid< std::array, 2>& EGrid, FsGrid< std::array, 2>& EHallGrid, FsGrid< std::array, 2>& EGradPeGrid, @@ -517,6 +528,7 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti if(*it == "fg_VolE") { outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fg_volB",[]( FsGrid< std::array, 2>& perBGrid, + FsGrid< std::array, 2>& perBDt2Grid, FsGrid< std::array, 2>& EGrid, FsGrid< std::array, 2>& EHallGrid, FsGrid< std::array, 2>& EGradPeGrid, @@ -550,6 +562,7 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti std::string reducer_name = "fg_HallE" + std::to_string(index); outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid(reducer_name,[index]( FsGrid< std::array, 2>& perBGrid, + FsGrid< std::array, 2>& perBDt2Grid, FsGrid< std::array, 2>& EGrid, FsGrid< std::array, 2>& EHallGrid, FsGrid< std::array, 2>& EGradPeGrid, @@ -590,6 +603,7 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti if(*it == "fg_VolB") { // Static (typically dipole) magnetic field part outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fg_volB",[]( FsGrid< std::array, 2>& perBGrid, + FsGrid< std::array, 2>& perBDt2Grid, FsGrid< std::array, 2>& EGrid, FsGrid< std::array, 2>& EHallGrid, FsGrid< std::array, 2>& EGradPeGrid, @@ -638,6 +652,7 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti // Overall scalar pressure from all populations outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fg_Pressure",[]( FsGrid< std::array, 2>& perBGrid, + FsGrid< std::array, 2>& perBDt2Grid, FsGrid< std::array, 2>& EGrid, FsGrid< std::array, 2>& EHallGrid, FsGrid< std::array, 2>& EGradPeGrid, @@ -696,6 +711,7 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti if(*it == "fg_GridCoordinates") { outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fg_X",[]( FsGrid< std::array, 2>& perBGrid, + FsGrid< std::array, 2>& perBDt2Grid, FsGrid< std::array, 2>& EGrid, FsGrid< std::array, 2>& EHallGrid, FsGrid< std::array, 2>& EGradPeGrid, @@ -722,6 +738,7 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti )); outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fg_Y",[]( FsGrid< std::array, 2>& perBGrid, + FsGrid< std::array, 2>& perBDt2Grid, FsGrid< std::array, 2>& EGrid, FsGrid< std::array, 2>& EHallGrid, FsGrid< std::array, 2>& EGradPeGrid, @@ -748,6 +765,7 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti )); outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fg_Z",[]( FsGrid< std::array, 2>& perBGrid, + FsGrid< std::array, 2>& perBDt2Grid, FsGrid< std::array, 2>& EGrid, FsGrid< std::array, 2>& EHallGrid, FsGrid< std::array, 2>& EGradPeGrid, @@ -774,6 +792,7 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti )); outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fg_DX",[]( FsGrid< std::array, 2>& perBGrid, + FsGrid< std::array, 2>& perBDt2Grid, FsGrid< std::array, 2>& EGrid, FsGrid< std::array, 2>& EHallGrid, FsGrid< std::array, 2>& EGradPeGrid, @@ -791,6 +810,7 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti )); outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fg_DY",[]( FsGrid< std::array, 2>& perBGrid, + FsGrid< std::array, 2>& perBDt2Grid, FsGrid< std::array, 2>& EGrid, FsGrid< std::array, 2>& EHallGrid, FsGrid< std::array, 2>& EGradPeGrid, @@ -808,6 +828,7 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti )); outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fg_DZ",[]( FsGrid< std::array, 2>& perBGrid, + FsGrid< std::array, 2>& perBDt2Grid, FsGrid< std::array, 2>& EGrid, FsGrid< std::array, 2>& EHallGrid, FsGrid< std::array, 2>& EGradPeGrid, @@ -1061,6 +1082,7 @@ bool DataReducer::writeParameters(const unsigned int& operatorID, vlsv::Writer& */ bool DataReducer::writeFsGridData( FsGrid< std::array, 2>& perBGrid, + FsGrid< std::array, 2>& perBDt2Grid, FsGrid< std::array, 2>& EGrid, FsGrid< std::array, 2>& EHallGrid, FsGrid< std::array, 2>& EGradPeGrid, @@ -1076,6 +1098,6 @@ bool DataReducer::writeFsGridData( if(!DROf) { return false; } else { - return DROf->writeFsGridData(perBGrid, EGrid, EHallGrid, EGradPeGrid, momentsGrid, dPerBGrid, dMomentsGrid, BgBGrid, volGrid, technicalGrid, meshName, vlsvWriter); + return DROf->writeFsGridData(perBGrid, perBDt2Grid, EGrid, EHallGrid, EGradPeGrid, momentsGrid, dPerBGrid, dMomentsGrid, BgBGrid, volGrid, technicalGrid, meshName, vlsvWriter); } } diff --git a/datareduction/datareducer.h b/datareduction/datareducer.h index ac37d9546..8cf5fabcf 100644 --- a/datareduction/datareducer.h +++ b/datareduction/datareducer.h @@ -57,6 +57,7 @@ class DataReducer { bool writeParameters(const unsigned int& operatorID, vlsv::Writer& vlsvWriter); bool writeFsGridData( FsGrid< std::array, 2>& perBGrid, + FsGrid< std::array, 2>& perBDt2Grid, FsGrid< std::array, 2>& EGrid, FsGrid< std::array, 2>& EHallGrid, FsGrid< std::array, 2>& EGradPeGrid, diff --git a/datareduction/datareductionoperator.cpp b/datareduction/datareductionoperator.cpp index 640beacc0..fa89b919f 100644 --- a/datareduction/datareductionoperator.cpp +++ b/datareduction/datareductionoperator.cpp @@ -133,6 +133,7 @@ namespace DRO { bool DataReductionOperatorFsGrid::writeFsGridData( FsGrid< std::array, 2>& perBGrid, + FsGrid< std::array, 2>& perBDt2Grid, FsGrid< std::array, 2>& EGrid, FsGrid< std::array, 2>& EHallGrid, FsGrid< std::array, 2>& EGradPeGrid, @@ -148,12 +149,12 @@ namespace DRO { attribs["name"]=variableName; std::vector varBuffer = - lambda(perBGrid,EGrid,EHallGrid,EGradPeGrid,momentsGrid,dPerBGrid,dMomentsGrid,BgBGrid,volGrid,technicalGrid); + lambda(perBGrid,perBDt2Grid,EGrid,EHallGrid,EGradPeGrid,momentsGrid,dPerBGrid,dMomentsGrid,BgBGrid,volGrid,technicalGrid); std::array& gridSize = technicalGrid.getLocalSize(); int vectorSize = varBuffer.size() / (gridSize[0]*gridSize[1]*gridSize[2]); if(vlsvWriter.writeArray("VARIABLE",attribs, "float", gridSize[0]*gridSize[1]*gridSize[2], vectorSize, sizeof(double), reinterpret_cast(varBuffer.data())) == false) { - string message = "The DataReductionOperator " + this->getName() + " failed to write it's data."; + string message = "The DataReductionOperator " + this->getName() + " failed to write its data."; bailout(true, message, __FILE__, __LINE__); } diff --git a/datareduction/datareductionoperator.h b/datareduction/datareductionoperator.h index 67b54b184..cdd80aeca 100644 --- a/datareduction/datareductionoperator.h +++ b/datareduction/datareductionoperator.h @@ -85,6 +85,7 @@ namespace DRO { public: typedef std::function( FsGrid< std::array, 2>& perBGrid, + FsGrid< std::array, 2>& perBDt2Grid, FsGrid< std::array, 2>& EGrid, FsGrid< std::array, 2>& EHallGrid, FsGrid< std::array, 2>& EGradPeGrid, @@ -107,6 +108,7 @@ namespace DRO { virtual bool reduceDiagnostic(const SpatialCell* cell,Real * result); virtual bool writeFsGridData( FsGrid< std::array, 2>& perBGrid, + FsGrid< std::array, 2>& perBDt2Grid, FsGrid< std::array, 2>& EGrid, FsGrid< std::array, 2>& EHallGrid, FsGrid< std::array, 2>& EGradPeGrid, diff --git a/fieldsolver/ldz_main.cpp b/fieldsolver/ldz_main.cpp index 7fbb8a0f3..986801b4f 100644 --- a/fieldsolver/ldz_main.cpp +++ b/fieldsolver/ldz_main.cpp @@ -84,14 +84,57 @@ bool initializeFieldPropagator( // Assuming B is known, calculate derivatives and upwinded edge-E. Exchange derivatives // and edge-E:s between neighbouring processes and calculate volume-averaged E,B fields. bool communicateMomentsDerivatives = true; - calculateDerivativesSimple(perBGrid, perBDt2Grid, momentsGrid, momentsDt2Grid, dPerBGrid, dMomentsGrid, technicalGrid, sysBoundaries, RK_ORDER1, true); - if(P::ohmGradPeTerm > 0) { - calculateGradPeTermSimple(EGradPeGrid, momentsGrid, momentsDt2Grid, dMomentsGrid, technicalGrid, sysBoundaries, RK_ORDER1); - communicateMomentsDerivatives = false; - } - // derivatives, gradPe and volume B are needed also in cases where propagateFields is false. - if(P::propagateField) { + if(!P::isRestart) { + calculateDerivativesSimple(perBGrid, perBDt2Grid, momentsGrid, momentsDt2Grid, dPerBGrid, dMomentsGrid, technicalGrid, sysBoundaries, RK_ORDER1, true); + + if(P::ohmGradPeTerm > 0) { + calculateGradPeTermSimple(EGradPeGrid, momentsGrid, momentsDt2Grid, dMomentsGrid, technicalGrid, sysBoundaries, RK_ORDER1); + communicateMomentsDerivatives = false; + } + // derivatives, gradPe and volume B are needed also in cases where propagateFields is false. + if(P::propagateField) { + if(P::ohmHallTerm > 0) { + calculateHallTermSimple( + perBGrid, + perBDt2Grid, + EHallGrid, + momentsGrid, + momentsDt2Grid, + dPerBGrid, + dMomentsGrid, + BgBGrid, + technicalGrid, + sysBoundaries, + RK_ORDER1, + communicateMomentsDerivatives + ); + } + calculateUpwindedElectricFieldSimple( + perBGrid, + perBDt2Grid, + EGrid, + EDt2Grid, + EHallGrid, + EGradPeGrid, + momentsGrid, + momentsDt2Grid, + dPerBGrid, + dMomentsGrid, + BgBGrid, + technicalGrid, + sysBoundaries, + RK_ORDER1 + ); + } + calculateVolumeAveragedFields(perBGrid,EGrid,dPerBGrid,volGrid,technicalGrid); + calculateBVOLDerivativesSimple(volGrid, technicalGrid, sysBoundaries); + } else { + calculateDerivativesSimple(perBGrid, perBDt2Grid, momentsGrid, momentsDt2Grid, dPerBGrid, dMomentsGrid, technicalGrid, sysBoundaries, RK_ORDER2_STEP2, true); + if(P::ohmGradPeTerm > 0) { + calculateGradPeTermSimple(EGradPeGrid, momentsGrid, momentsDt2Grid, dMomentsGrid, technicalGrid, sysBoundaries, RK_ORDER2_STEP2); + communicateMomentsDerivatives = false; + } if(P::ohmHallTerm > 0) { calculateHallTermSimple( perBGrid, @@ -104,7 +147,7 @@ bool initializeFieldPropagator( BgBGrid, technicalGrid, sysBoundaries, - RK_ORDER1, + RK_ORDER2_STEP2, communicateMomentsDerivatives ); } @@ -122,12 +165,9 @@ bool initializeFieldPropagator( BgBGrid, technicalGrid, sysBoundaries, - RK_ORDER1 + RK_ORDER2_STEP2 ); } - calculateVolumeAveragedFields(perBGrid,EGrid,dPerBGrid,volGrid,technicalGrid); - calculateBVOLDerivativesSimple(volGrid, technicalGrid, sysBoundaries); - return true; } diff --git a/ioread.cpp b/ioread.cpp index e0fe44966..c746cbb31 100644 --- a/ioread.cpp +++ b/ioread.cpp @@ -822,7 +822,7 @@ template bool readFsGridVariable( thatTasksSize[2] = targetGrid.calcLocalSize(globalSize[2], decomposition[2], task%decomposition[2]); localStartOffset += thatTasksSize[0] * thatTasksSize[1] * thatTasksSize[2]; } - + // Read into buffer std::vector buffer(storageSize*N); @@ -830,7 +830,7 @@ template bool readFsGridVariable( logFile << "(RESTART) ERROR: Failed to read fsgrid variable " << variableName << endl << write; return false; } - + // Assign buffer into fsgrid int index=0; for(int z=0; z bool readFsGridVariable( logFile << "(RESTART) ERROR: Attempting to restart from different number of tasks, this is not supported yet." << endl << write; return false; } + + targetGrid.updateGhostCells(); return true; } @@ -901,6 +903,7 @@ bool checkScalarParameter(vlsv::ParallelReader& file,const string& name,T correc */ bool exec_readGrid(dccrg::Dccrg& mpiGrid, FsGrid< std::array, 2>& perBGrid, + FsGrid< std::array, 2>& EGrid, FsGrid< std::array, 2>& EGradPeGrid, FsGrid< std::array, 2>& momentsGrid, FsGrid< std::array, 2>& BgBGrid, @@ -1094,13 +1097,17 @@ bool exec_readGrid(dccrg::Dccrg& mpiGrid, } phiprof::stop("readBlockData"); + mpiGrid.update_copies_of_remote_neighbors(FULL_NEIGHBORHOOD_ID); + // Read fsgrid data back in int fsgridInputRanks=0; if(readScalarParameter(file,"numWritingRanks",fsgridInputRanks, MASTER_RANK, MPI_COMM_WORLD) == false) { exitOnError(false, "(RESTART) FSGrid writing rank number not found in restart file", MPI_COMM_WORLD); } + success = readFsGridVariable(file, "fg_PERB", fsgridInputRanks, perBGrid); - + success = readFsGridVariable(file, "fg_E", fsgridInputRanks, EGrid); + success = file.close(); phiprof::stop("readGrid"); @@ -1116,6 +1123,7 @@ bool exec_readGrid(dccrg::Dccrg& mpiGrid, */ bool readGrid(dccrg::Dccrg& mpiGrid, FsGrid< std::array, 2>& perBGrid, + FsGrid< std::array, 2>& EGrid, FsGrid< std::array, 2>& EGradPeGrid, FsGrid< std::array, 2>& momentsGrid, FsGrid< std::array, 2>& BgBGrid, @@ -1123,5 +1131,5 @@ bool readGrid(dccrg::Dccrg& mpiGrid, FsGrid< fsgrids::technical, 2>& technicalGrid, const std::string& name){ //Check the vlsv version from the file: - return exec_readGrid(mpiGrid,perBGrid,EGradPeGrid,momentsGrid,BgBGrid,volGrid,technicalGrid,name); + return exec_readGrid(mpiGrid,perBGrid,EGrid,EGradPeGrid,momentsGrid,BgBGrid,volGrid,technicalGrid,name); } diff --git a/ioread.h b/ioread.h index 4f47966ae..ee1ccde4d 100644 --- a/ioread.h +++ b/ioread.h @@ -38,6 +38,7 @@ */ bool readGrid(dccrg::Dccrg& mpiGrid, FsGrid< std::array, 2>& perBGrid, + FsGrid< std::array, 2>& EGrid, FsGrid< std::array, 2> & EGradPeGrid, FsGrid< std::array, 2>& momentsGrid, FsGrid< std::array, 2>& BgBGrid, diff --git a/iowrite.cpp b/iowrite.cpp index aee11c729..7fffb304c 100644 --- a/iowrite.cpp +++ b/iowrite.cpp @@ -1302,31 +1302,6 @@ bool writeRestart(dccrg::Dccrg& mpiGrid, restartReducer.addOperator(new DRO::BoundaryLayer); // Fsgrid Reducers - restartReducer.addOperator(new DRO::DataReductionOperatorFsGrid("fg_EFIELD",[]( - FsGrid< std::array, 2>& perBGrid, - FsGrid< std::array, 2>& EGrid, - FsGrid< std::array, 2>& EHallGrid, - FsGrid< std::array, 2>& EGradPeGrid, - FsGrid< std::array, 2>& momentsGrid, - FsGrid< std::array, 2>& dPerBGrid, - FsGrid< std::array, 2>& dMomentsGrid, - FsGrid< std::array, 2>& BgBGrid, - FsGrid< std::array, 2>& volGrid, - FsGrid< fsgrids::technical, 2>& technicalGrid)->std::vector { - std::array& gridSize = technicalGrid.getLocalSize(); - std::vector retval(gridSize[0]*gridSize[1]*gridSize[2]*fsgrids::efield::N_EFIELD); - int index=0; - for(int z=0; z, 2>& perBGrid, FsGrid< std::array, 2>& EGrid, @@ -1352,55 +1327,31 @@ bool writeRestart(dccrg::Dccrg& mpiGrid, return retval; } )); - restartReducer.addOperator(new DRO::DataReductionOperatorFsGrid("fg_BGB",[]( - FsGrid< std::array, 2>& perBGrid, - FsGrid< std::array, 2>& EGrid, - FsGrid< std::array, 2>& EHallGrid, - FsGrid< std::array, 2>& EGradPeGrid, - FsGrid< std::array, 2>& momentsGrid, - FsGrid< std::array, 2>& dPerBGrid, - FsGrid< std::array, 2>& dMomentsGrid, - FsGrid< std::array, 2>& BgBGrid, - FsGrid< std::array, 2>& volGrid, - FsGrid< fsgrids::technical, 2>& technicalGrid)->std::vector { - std::array& gridSize = technicalGrid.getLocalSize(); - std::vector retval(gridSize[0]*gridSize[1]*gridSize[2]*fsgrids::bgbfield::N_BGB); - int index=0; - for(int z=0; z, 2>& perBGrid, - FsGrid< std::array, 2>& EGrid, - FsGrid< std::array, 2>& EHallGrid, - FsGrid< std::array, 2>& EGradPeGrid, - FsGrid< std::array, 2>& momentsGrid, - FsGrid< std::array, 2>& dPerBGrid, - FsGrid< std::array, 2>& dMomentsGrid, - FsGrid< std::array, 2>& BgBGrid, - FsGrid< std::array, 2>& volGrid, - FsGrid< fsgrids::technical, 2>& technicalGrid)->std::vector { - std::array& gridSize = technicalGrid.getLocalSize(); - std::vector retval(gridSize[0]*gridSize[1]*gridSize[2]*fsgrids::dperb::N_DPERB); - int index=0; - for(int z=0; z, 2>& perBGrid, + FsGrid< std::array, 2>& EGrid, + FsGrid< std::array, 2>& EHallGrid, + FsGrid< std::array, 2>& EGradPeGrid, + FsGrid< std::array, 2>& momentsGrid, + FsGrid< std::array, 2>& dPerBGrid, + FsGrid< std::array, 2>& dMomentsGrid, + FsGrid< std::array, 2>& BgBGrid, + FsGrid< std::array, 2>& volGrid, + FsGrid< fsgrids::technical, 2>& technicalGrid)->std::vector { + std::array& gridSize = technicalGrid.getLocalSize(); + std::vector retval(gridSize[0]*gridSize[1]*gridSize[2]*fsgrids::efield::N_EFIELD); + int index=0; + for(int z=0; z, 2>& BgBGrid, FsGrid< fsgrids::technical, 2>& technicalGrid ) { - auto localSize = perBGrid.getLocalSize(); + setBackgroundFieldToZero(BgBGrid); + if (!P::isRestart) { + auto localSize = perBGrid.getLocalSize(); + #pragma omp parallel for collapse(3) - for (int x = 0; x < localSize[0]; ++x) { - for (int y = 0; y < localSize[1]; ++y) { - for (int z = 0; z < localSize[2]; ++z) { - const std::array xyz = perBGrid.getPhysicalCoords(x, y, z); - std::array* cell = perBGrid.get(x, y, z); - - Real dBxavg, dByavg, dBzavg; - dBxavg = dByavg = dBzavg = 0.0; - Real d_x = perBGrid.DX / (this->nSpaceSamples - 1); - Real d_y = perBGrid.DY / (this->nSpaceSamples - 1); - - for (uint i=0; inSpaceSamples; ++i) { - for (uint j=0; jnSpaceSamples; ++j) { - for (uint k=0; knSpaceSamples; ++k) { - Real ksi = ((xyz[0] + i * d_x) * cos(this->ALPHA) + (xyz[1] + j * d_y) * sin(this->ALPHA)) / this->WAVELENGTH; - dBxavg += sin(2.0 * M_PI * ksi); - dByavg += sin(2.0 * M_PI * ksi); - dBzavg += cos(2.0 * M_PI * ksi); + for (int x = 0; x < localSize[0]; ++x) { + for (int y = 0; y < localSize[1]; ++y) { + for (int z = 0; z < localSize[2]; ++z) { + const std::array xyz = perBGrid.getPhysicalCoords(x, y, z); + std::array* cell = perBGrid.get(x, y, z); + + Real dBxavg, dByavg, dBzavg; + dBxavg = dByavg = dBzavg = 0.0; + Real d_x = perBGrid.DX / (this->nSpaceSamples - 1); + Real d_y = perBGrid.DY / (this->nSpaceSamples - 1); + + for (uint i=0; inSpaceSamples; ++i) { + for (uint j=0; jnSpaceSamples; ++j) { + for (uint k=0; knSpaceSamples; ++k) { + Real ksi = ((xyz[0] + i * d_x) * cos(this->ALPHA) + (xyz[1] + j * d_y) * sin(this->ALPHA)) / this->WAVELENGTH; + dBxavg += sin(2.0 * M_PI * ksi); + dByavg += sin(2.0 * M_PI * ksi); + dBzavg += cos(2.0 * M_PI * ksi); + } } } + + cuint nPts = pow(this->nSpaceSamples, 3.0); + cell->at(fsgrids::bfield::PERBX) = this->B0 * cos(this->ALPHA) - this->A_MAG * this->B0 * sin(this->ALPHA) * dBxavg / nPts; + cell->at(fsgrids::bfield::PERBY) = this->B0 * sin(this->ALPHA) + this->A_MAG * this->B0 * cos(this->ALPHA) * dByavg / nPts; + cell->at(fsgrids::bfield::PERBZ) = this->B0 * this->A_MAG * dBzavg / nPts; + } - - cuint nPts = pow(this->nSpaceSamples, 3.0); - cell->at(fsgrids::bfield::PERBX) = this->B0 * cos(this->ALPHA) - this->A_MAG * this->B0 * sin(this->ALPHA) * dBxavg / nPts; - cell->at(fsgrids::bfield::PERBY) = this->B0 * sin(this->ALPHA) + this->A_MAG * this->B0 * cos(this->ALPHA) * dByavg / nPts; - cell->at(fsgrids::bfield::PERBZ) = this->B0 * this->A_MAG * dBzavg / nPts; - } } } diff --git a/projects/Dispersion/Dispersion.cpp b/projects/Dispersion/Dispersion.cpp index a7ac4aab6..04499a5e8 100644 --- a/projects/Dispersion/Dispersion.cpp +++ b/projects/Dispersion/Dispersion.cpp @@ -216,25 +216,27 @@ namespace projects { setBackgroundField(bgField, BgBGrid); - const auto localSize = BgBGrid.getLocalSize(); - + if(!P::isRestart) { + const auto localSize = BgBGrid.getLocalSize(); + #pragma omp parallel for collapse(3) - for (int x = 0; x < localSize[0]; ++x) { - for (int y = 0; y < localSize[1]; ++y) { - for (int z = 0; z < localSize[2]; ++z) { - std::array* cell = perBGrid.get(x, y, z); - const int64_t cellid = perBGrid.GlobalIDForCoords(x, y, z); - - setRandomSeed(cellid); - - Real rndBuffer[3]; - rndBuffer[0]=getRandomNumber(); - rndBuffer[1]=getRandomNumber(); - rndBuffer[2]=getRandomNumber(); - - cell->at(fsgrids::bfield::PERBX) = this->magXPertAbsAmp * (0.5 - rndBuffer[0]); - cell->at(fsgrids::bfield::PERBY) = this->magYPertAbsAmp * (0.5 - rndBuffer[1]); - cell->at(fsgrids::bfield::PERBZ) = this->magZPertAbsAmp * (0.5 - rndBuffer[2]); + for (int x = 0; x < localSize[0]; ++x) { + for (int y = 0; y < localSize[1]; ++y) { + for (int z = 0; z < localSize[2]; ++z) { + std::array* cell = perBGrid.get(x, y, z); + const int64_t cellid = perBGrid.GlobalIDForCoords(x, y, z); + + setRandomSeed(cellid); + + Real rndBuffer[3]; + rndBuffer[0]=getRandomNumber(); + rndBuffer[1]=getRandomNumber(); + rndBuffer[2]=getRandomNumber(); + + cell->at(fsgrids::bfield::PERBX) = this->magXPertAbsAmp * (0.5 - rndBuffer[0]); + cell->at(fsgrids::bfield::PERBY) = this->magYPertAbsAmp * (0.5 - rndBuffer[1]); + cell->at(fsgrids::bfield::PERBZ) = this->magZPertAbsAmp * (0.5 - rndBuffer[2]); + } } } } diff --git a/projects/Distributions/Distributions.cpp b/projects/Distributions/Distributions.cpp index b4d7d13e4..7422be8ad 100644 --- a/projects/Distributions/Distributions.cpp +++ b/projects/Distributions/Distributions.cpp @@ -168,27 +168,29 @@ namespace projects { setBackgroundField(bgField, BgBGrid); - const auto localSize = BgBGrid.getLocalSize(); - + if(!P::isRestart) { + const auto localSize = BgBGrid.getLocalSize(); + #pragma omp parallel for collapse(3) - for (int x = 0; x < localSize[0]; ++x) { - for (int y = 0; y < localSize[1]; ++y) { - for (int z = 0; z < localSize[2]; ++z) { - std::array* cell = perBGrid.get(x, y, z); - const int64_t cellid = perBGrid.GlobalIDForCoords(x, y, z); - const std::array xyz = perBGrid.getPhysicalCoords(x, y, z); - - setRandomSeed(cellid); - - if (this->lambda != 0.0) { - cell->at(fsgrids::bfield::PERBX) = this->dBx*cos(2.0 * M_PI * xyz[0] / this->lambda); - cell->at(fsgrids::bfield::PERBY) = this->dBy*sin(2.0 * M_PI * xyz[0] / this->lambda); - cell->at(fsgrids::bfield::PERBZ) = this->dBz*cos(2.0 * M_PI * xyz[0] / this->lambda); + for (int x = 0; x < localSize[0]; ++x) { + for (int y = 0; y < localSize[1]; ++y) { + for (int z = 0; z < localSize[2]; ++z) { + std::array* cell = perBGrid.get(x, y, z); + const int64_t cellid = perBGrid.GlobalIDForCoords(x, y, z); + const std::array xyz = perBGrid.getPhysicalCoords(x, y, z); + + setRandomSeed(cellid); + + if (this->lambda != 0.0) { + cell->at(fsgrids::bfield::PERBX) = this->dBx*cos(2.0 * M_PI * xyz[0] / this->lambda); + cell->at(fsgrids::bfield::PERBY) = this->dBy*sin(2.0 * M_PI * xyz[0] / this->lambda); + cell->at(fsgrids::bfield::PERBZ) = this->dBz*cos(2.0 * M_PI * xyz[0] / this->lambda); + } + + cell->at(fsgrids::bfield::PERBX) += this->magXPertAbsAmp * (0.5 - getRandomNumber()); + cell->at(fsgrids::bfield::PERBY) += this->magYPertAbsAmp * (0.5 - getRandomNumber()); + cell->at(fsgrids::bfield::PERBZ) += this->magZPertAbsAmp * (0.5 - getRandomNumber()); } - - cell->at(fsgrids::bfield::PERBX) += this->magXPertAbsAmp * (0.5 - getRandomNumber()); - cell->at(fsgrids::bfield::PERBY) += this->magYPertAbsAmp * (0.5 - getRandomNumber()); - cell->at(fsgrids::bfield::PERBZ) += this->magZPertAbsAmp * (0.5 - getRandomNumber()); } } } diff --git a/projects/Fluctuations/Fluctuations.cpp b/projects/Fluctuations/Fluctuations.cpp index b891aa382..ad47a8582 100644 --- a/projects/Fluctuations/Fluctuations.cpp +++ b/projects/Fluctuations/Fluctuations.cpp @@ -182,20 +182,22 @@ namespace projects { setBackgroundField(bgField, BgBGrid); - const auto localSize = BgBGrid.getLocalSize(); - - #pragma omp parallel for collapse(3) - for (int x = 0; x < localSize[0]; ++x) { - for (int y = 0; y < localSize[1]; ++y) { - for (int z = 0; z < localSize[2]; ++z) { - std::array* cell = perBGrid.get(x, y, z); - const int64_t cellid = perBGrid.GlobalIDForCoords(x, y, z); - - setRandomSeed(cellid); - - cell->at(fsgrids::bfield::PERBX) = this->magXPertAbsAmp * (0.5 - getRandomNumber()); - cell->at(fsgrids::bfield::PERBY) = this->magYPertAbsAmp * (0.5 - getRandomNumber()); - cell->at(fsgrids::bfield::PERBZ) = this->magZPertAbsAmp * (0.5 - getRandomNumber()); + if(!P::isRestart) { + const auto localSize = BgBGrid.getLocalSize(); + + #pragma omp parallel for collapse(3) + for (int x = 0; x < localSize[0]; ++x) { + for (int y = 0; y < localSize[1]; ++y) { + for (int z = 0; z < localSize[2]; ++z) { + std::array* cell = perBGrid.get(x, y, z); + const int64_t cellid = perBGrid.GlobalIDForCoords(x, y, z); + + setRandomSeed(cellid); + + cell->at(fsgrids::bfield::PERBX) = this->magXPertAbsAmp * (0.5 - getRandomNumber()); + cell->at(fsgrids::bfield::PERBY) = this->magYPertAbsAmp * (0.5 - getRandomNumber()); + cell->at(fsgrids::bfield::PERBZ) = this->magZPertAbsAmp * (0.5 - getRandomNumber()); + } } } } diff --git a/projects/Harris/Harris.cpp b/projects/Harris/Harris.cpp index b23e23881..1da458a65 100644 --- a/projects/Harris/Harris.cpp +++ b/projects/Harris/Harris.cpp @@ -155,18 +155,20 @@ namespace projects { ) { setBackgroundFieldToZero(BgBGrid); - auto localSize = perBGrid.getLocalSize(); - - #pragma omp parallel for collapse(3) - for (int x = 0; x < localSize[0]; ++x) { - for (int y = 0; y < localSize[1]; ++y) { - for (int z = 0; z < localSize[2]; ++z) { - const std::array xyz = perBGrid.getPhysicalCoords(x, y, z); - std::array* cell = perBGrid.get(x, y, z); - - cell->at(fsgrids::bfield::PERBX) = this->BX0 * tanh((xyz[1] + 0.5 * perBGrid.DY) / this->SCA_LAMBDA); - cell->at(fsgrids::bfield::PERBY) = this->BY0 * tanh((xyz[2] + 0.5 * perBGrid.DZ) / this->SCA_LAMBDA); - cell->at(fsgrids::bfield::PERBZ) = this->BZ0 * tanh((xyz[0] + 0.5 * perBGrid.DX) / this->SCA_LAMBDA); + if(!P::isRestart) { + auto localSize = perBGrid.getLocalSize(); + + #pragma omp parallel for collapse(3) + for (int x = 0; x < localSize[0]; ++x) { + for (int y = 0; y < localSize[1]; ++y) { + for (int z = 0; z < localSize[2]; ++z) { + const std::array xyz = perBGrid.getPhysicalCoords(x, y, z); + std::array* cell = perBGrid.get(x, y, z); + + cell->at(fsgrids::bfield::PERBX) = this->BX0 * tanh((xyz[1] + 0.5 * perBGrid.DY) / this->SCA_LAMBDA); + cell->at(fsgrids::bfield::PERBY) = this->BY0 * tanh((xyz[2] + 0.5 * perBGrid.DZ) / this->SCA_LAMBDA); + cell->at(fsgrids::bfield::PERBZ) = this->BZ0 * tanh((xyz[0] + 0.5 * perBGrid.DX) / this->SCA_LAMBDA); + } } } } diff --git a/projects/IPShock/IPShock.cpp b/projects/IPShock/IPShock.cpp index 123dd770c..26c54042b 100644 --- a/projects/IPShock/IPShock.cpp +++ b/projects/IPShock/IPShock.cpp @@ -442,56 +442,58 @@ namespace projects { FsGrid< std::array, 2>& BgBGrid, FsGrid< fsgrids::technical, 2>& technicalGrid ) { - setBackgroundFieldToZero(BgBGrid); - - auto localSize = perBGrid.getLocalSize(); - - #pragma omp parallel for collapse(3) - for (int x = 0; x < localSize[0]; ++x) { - for (int y = 0; y < localSize[1]; ++y) { - for (int z = 0; z < localSize[2]; ++z) { - const std::array xyz = perBGrid.getPhysicalCoords(x, y, z); - std::array* cell = perBGrid.get(x, y, z); - - /* Maintain all values in BPERT for simplicity */ - Real KB = physicalconstants::K_B; - Real mu0 = physicalconstants::MU_0; - Real adiab = 5./3.; - - // Interpolate density between upstream and downstream - // All other values are calculated from jump conditions - Real MassDensity = 0.; - Real MassDensityU = 0.; - Real EffectiveVu0 = 0.; - for(uint i=0; i< getObjectWrapper().particleSpecies.size(); i++) { - const IPShockSpeciesParameters& sP = speciesParams[i]; - Real mass = getObjectWrapper().particleSpecies[i].mass; - - MassDensity += mass * interpolate(sP.DENSITYu,sP.DENSITYd, xyz[0]); - MassDensityU += mass * sP.DENSITYu; - EffectiveVu0 += sP.V0u[0] * mass * sP.DENSITYu; - } - EffectiveVu0 /= MassDensityU; - - // Solve tangential components for B and V - Real VX = MassDensityU * EffectiveVu0 / MassDensity; - Real BX = this->B0u[0]; - Real MAsq = std::pow((EffectiveVu0/this->B0u[0]), 2) * MassDensityU * mu0; - Real Btang = this->B0utangential * (MAsq - 1.0)/(MAsq*VX/EffectiveVu0 -1.0); - Real Vtang = VX * Btang / BX; - - /* Reconstruct Y and Z components using cos(phi) values and signs. Tangential variables are always positive. */ - Real BY = abs(Btang) * this->Bucosphi * this->Byusign; - Real BZ = abs(Btang) * sqrt(1. - this->Bucosphi * this->Bucosphi) * this->Bzusign; - //Real VY = Vtang * this->Vucosphi * this->Vyusign; - //Real VZ = Vtang * sqrt(1. - this->Vucosphi * this->Vucosphi) * this->Vzusign; - - cell->at(fsgrids::bfield::PERBX) = BX; - cell->at(fsgrids::bfield::PERBY) = BY; - cell->at(fsgrids::bfield::PERBZ) = BZ; - } - } - } - } + setBackgroundFieldToZero(BgBGrid); + + if(!P::isRestart) { + auto localSize = perBGrid.getLocalSize(); + +#pragma omp parallel for collapse(3) + for (int x = 0; x < localSize[0]; ++x) { + for (int y = 0; y < localSize[1]; ++y) { + for (int z = 0; z < localSize[2]; ++z) { + const std::array xyz = perBGrid.getPhysicalCoords(x, y, z); + std::array* cell = perBGrid.get(x, y, z); + + /* Maintain all values in BPERT for simplicity */ + Real KB = physicalconstants::K_B; + Real mu0 = physicalconstants::MU_0; + Real adiab = 5./3.; + + // Interpolate density between upstream and downstream + // All other values are calculated from jump conditions + Real MassDensity = 0.; + Real MassDensityU = 0.; + Real EffectiveVu0 = 0.; + for(uint i=0; i< getObjectWrapper().particleSpecies.size(); i++) { + const IPShockSpeciesParameters& sP = speciesParams[i]; + Real mass = getObjectWrapper().particleSpecies[i].mass; + + MassDensity += mass * interpolate(sP.DENSITYu,sP.DENSITYd, xyz[0]); + MassDensityU += mass * sP.DENSITYu; + EffectiveVu0 += sP.V0u[0] * mass * sP.DENSITYu; + } + EffectiveVu0 /= MassDensityU; + + // Solve tangential components for B and V + Real VX = MassDensityU * EffectiveVu0 / MassDensity; + Real BX = this->B0u[0]; + Real MAsq = std::pow((EffectiveVu0/this->B0u[0]), 2) * MassDensityU * mu0; + Real Btang = this->B0utangential * (MAsq - 1.0)/(MAsq*VX/EffectiveVu0 -1.0); + Real Vtang = VX * Btang / BX; + + /* Reconstruct Y and Z components using cos(phi) values and signs. Tangential variables are always positive. */ + Real BY = abs(Btang) * this->Bucosphi * this->Byusign; + Real BZ = abs(Btang) * sqrt(1. - this->Bucosphi * this->Bucosphi) * this->Bzusign; + //Real VY = Vtang * this->Vucosphi * this->Vyusign; + //Real VZ = Vtang * sqrt(1. - this->Vucosphi * this->Vucosphi) * this->Vzusign; + + cell->at(fsgrids::bfield::PERBX) = BX; + cell->at(fsgrids::bfield::PERBY) = BY; + cell->at(fsgrids::bfield::PERBZ) = BZ; + } + } + } + } + } }//namespace projects diff --git a/projects/KHB/KHB.cpp b/projects/KHB/KHB.cpp index e64d0a496..c748bb77b 100644 --- a/projects/KHB/KHB.cpp +++ b/projects/KHB/KHB.cpp @@ -160,35 +160,37 @@ namespace projects { ) { setBackgroundFieldToZero(BgBGrid); - auto localSize = perBGrid.getLocalSize(); - - #pragma omp parallel for collapse(3) - for (int x = 0; x < localSize[0]; ++x) { - for (int y = 0; y < localSize[1]; ++y) { - for (int z = 0; z < localSize[2]; ++z) { - const std::array xyz = perBGrid.getPhysicalCoords(x, y, z); - std::array* cell = perBGrid.get(x, y, z); - - Real Bxavg, Byavg, Bzavg; - Bxavg = Byavg = Bzavg = 0.0; - if(this->nSpaceSamples > 1) { - Real d_x = perBGrid.DX / (this->nSpaceSamples - 1); - Real d_z = perBGrid.DZ / (this->nSpaceSamples - 1); - for (uint i=0; inSpaceSamples; ++i) { - for (uint k=0; knSpaceSamples; ++k) { - Bxavg += profile(this->Bx[this->BOTTOM], this->Bx[this->TOP], xyz[0]+i*d_x, xyz[2]+k*d_z); - Byavg += profile(this->By[this->BOTTOM], this->By[this->TOP], xyz[0]+i*d_x, xyz[2]+k*d_z); - Bzavg += profile(this->Bz[this->BOTTOM], this->Bz[this->TOP], xyz[0]+i*d_x, xyz[2]+k*d_z); + if(!P::isRestart) { + auto localSize = perBGrid.getLocalSize(); + + #pragma omp parallel for collapse(3) + for (int x = 0; x < localSize[0]; ++x) { + for (int y = 0; y < localSize[1]; ++y) { + for (int z = 0; z < localSize[2]; ++z) { + const std::array xyz = perBGrid.getPhysicalCoords(x, y, z); + std::array* cell = perBGrid.get(x, y, z); + + Real Bxavg, Byavg, Bzavg; + Bxavg = Byavg = Bzavg = 0.0; + if(this->nSpaceSamples > 1) { + Real d_x = perBGrid.DX / (this->nSpaceSamples - 1); + Real d_z = perBGrid.DZ / (this->nSpaceSamples - 1); + for (uint i=0; inSpaceSamples; ++i) { + for (uint k=0; knSpaceSamples; ++k) { + Bxavg += profile(this->Bx[this->BOTTOM], this->Bx[this->TOP], xyz[0]+i*d_x, xyz[2]+k*d_z); + Byavg += profile(this->By[this->BOTTOM], this->By[this->TOP], xyz[0]+i*d_x, xyz[2]+k*d_z); + Bzavg += profile(this->Bz[this->BOTTOM], this->Bz[this->TOP], xyz[0]+i*d_x, xyz[2]+k*d_z); + } } + cuint nPts = pow(this->nSpaceSamples, 2.0); + cell->at(fsgrids::bfield::PERBX) = Bxavg / nPts; + cell->at(fsgrids::bfield::PERBY) = Byavg / nPts; + cell->at(fsgrids::bfield::PERBZ) = Bzavg / nPts; + } else { + cell->at(fsgrids::bfield::PERBX) = profile(this->Bx[this->BOTTOM], this->Bx[this->TOP], xyz[0]+0.5*perBGrid.DX, xyz[2]+0.5*perBGrid.DZ); + cell->at(fsgrids::bfield::PERBY) = profile(this->By[this->BOTTOM], this->By[this->TOP], xyz[0]+0.5*perBGrid.DX, xyz[2]+0.5*perBGrid.DZ); + cell->at(fsgrids::bfield::PERBZ) = profile(this->Bz[this->BOTTOM], this->Bz[this->TOP], xyz[0]+0.5*perBGrid.DX, xyz[2]+0.5*perBGrid.DZ); } - cuint nPts = pow(this->nSpaceSamples, 2.0); - cell->at(fsgrids::bfield::PERBX) = Bxavg / nPts; - cell->at(fsgrids::bfield::PERBY) = Byavg / nPts; - cell->at(fsgrids::bfield::PERBZ) = Bzavg / nPts; - } else { - cell->at(fsgrids::bfield::PERBX) = profile(this->Bx[this->BOTTOM], this->Bx[this->TOP], xyz[0]+0.5*perBGrid.DX, xyz[2]+0.5*perBGrid.DZ); - cell->at(fsgrids::bfield::PERBY) = profile(this->By[this->BOTTOM], this->By[this->TOP], xyz[0]+0.5*perBGrid.DX, xyz[2]+0.5*perBGrid.DZ); - cell->at(fsgrids::bfield::PERBZ) = profile(this->Bz[this->BOTTOM], this->Bz[this->TOP], xyz[0]+0.5*perBGrid.DX, xyz[2]+0.5*perBGrid.DZ); } } } diff --git a/projects/Magnetosphere/Magnetosphere.cpp b/projects/Magnetosphere/Magnetosphere.cpp index c8c8c5b07..bcc2d1357 100644 --- a/projects/Magnetosphere/Magnetosphere.cpp +++ b/projects/Magnetosphere/Magnetosphere.cpp @@ -385,8 +385,8 @@ namespace projects { setBackgroundField(bgConstantField, BgBGrid, true); } } - - + + Real Magnetosphere::getDistribValue( creal& x,creal& y,creal& z, creal& vx,creal& vy,creal& vz, diff --git a/projects/MultiPeak/MultiPeak.cpp b/projects/MultiPeak/MultiPeak.cpp index 27c937e82..3bca5de52 100644 --- a/projects/MultiPeak/MultiPeak.cpp +++ b/projects/MultiPeak/MultiPeak.cpp @@ -229,26 +229,28 @@ namespace projects { setBackgroundField(bgField, BgBGrid); - auto localSize = perBGrid.getLocalSize(); - - #pragma omp parallel for collapse(3) - for (int x = 0; x < localSize[0]; ++x) { - for (int y = 0; y < localSize[1]; ++y) { - for (int z = 0; z < localSize[2]; ++z) { - const std::array xyz = perBGrid.getPhysicalCoords(x, y, z); - std::array* cell = perBGrid.get(x, y, z); - const int64_t cellid = perBGrid.GlobalIDForCoords(x, y, z); - setRandomSeed(cellid); - - if (this->lambda != 0.0) { - cell->at(fsgrids::bfield::PERBX) = this->dBx*cos(2.0 * M_PI * xyz[0] / this->lambda); - cell->at(fsgrids::bfield::PERBY) = this->dBy*sin(2.0 * M_PI * xyz[0] / this->lambda); - cell->at(fsgrids::bfield::PERBZ) = this->dBz*cos(2.0 * M_PI * xyz[0] / this->lambda); + if(!P::isRestart) { + auto localSize = perBGrid.getLocalSize(); + +#pragma omp parallel for collapse(3) + for (int x = 0; x < localSize[0]; ++x) { + for (int y = 0; y < localSize[1]; ++y) { + for (int z = 0; z < localSize[2]; ++z) { + const std::array xyz = perBGrid.getPhysicalCoords(x, y, z); + std::array* cell = perBGrid.get(x, y, z); + const int64_t cellid = perBGrid.GlobalIDForCoords(x, y, z); + setRandomSeed(cellid); + + if (this->lambda != 0.0) { + cell->at(fsgrids::bfield::PERBX) = this->dBx*cos(2.0 * M_PI * xyz[0] / this->lambda); + cell->at(fsgrids::bfield::PERBY) = this->dBy*sin(2.0 * M_PI * xyz[0] / this->lambda); + cell->at(fsgrids::bfield::PERBZ) = this->dBz*cos(2.0 * M_PI * xyz[0] / this->lambda); + } + + cell->at(fsgrids::bfield::PERBX) += this->magXPertAbsAmp * (0.5 - getRandomNumber()); + cell->at(fsgrids::bfield::PERBY) += this->magYPertAbsAmp * (0.5 - getRandomNumber()); + cell->at(fsgrids::bfield::PERBZ) += this->magZPertAbsAmp * (0.5 - getRandomNumber()); } - - cell->at(fsgrids::bfield::PERBX) += this->magXPertAbsAmp * (0.5 - getRandomNumber()); - cell->at(fsgrids::bfield::PERBY) += this->magYPertAbsAmp * (0.5 - getRandomNumber()); - cell->at(fsgrids::bfield::PERBZ) += this->magZPertAbsAmp * (0.5 - getRandomNumber()); } } } diff --git a/projects/Riemann1/Riemann1.cpp b/projects/Riemann1/Riemann1.cpp index edcc813c0..86a36e904 100644 --- a/projects/Riemann1/Riemann1.cpp +++ b/projects/Riemann1/Riemann1.cpp @@ -126,36 +126,38 @@ namespace projects { ) { setBackgroundFieldToZero(BgBGrid); - auto localSize = perBGrid.getLocalSize(); - - #pragma omp parallel for collapse(3) - for (int x = 0; x < localSize[0]; ++x) { - for (int y = 0; y < localSize[1]; ++y) { - for (int z = 0; z < localSize[2]; ++z) { - const std::array xyz = perBGrid.getPhysicalCoords(x, y, z); - std::array* cell = perBGrid.get(x, y, z); - - Real Bxavg, Byavg, Bzavg; - Bxavg = Byavg = Bzavg = 0.0; - if(this->nSpaceSamples > 1) { - Real d_x = perBGrid.DX / (this->nSpaceSamples - 1); - Real d_z = perBGrid.DZ / (this->nSpaceSamples - 1); - for (uint i=0; inSpaceSamples; ++i) { - for (uint k=0; knSpaceSamples; ++k) { - Bxavg += ((xyz[0] + i * d_x) < 0.0) ? this->Bx[this->LEFT] : this->Bx[this->RIGHT]; - Byavg += ((xyz[0] + i * d_x) < 0.0) ? this->By[this->LEFT] : this->By[this->RIGHT]; - Bzavg += ((xyz[0] + i * d_x) < 0.0) ? this->Bz[this->LEFT] : this->Bz[this->RIGHT]; + if(!P::isRestart) { + auto localSize = perBGrid.getLocalSize(); + + #pragma omp parallel for collapse(3) + for (int x = 0; x < localSize[0]; ++x) { + for (int y = 0; y < localSize[1]; ++y) { + for (int z = 0; z < localSize[2]; ++z) { + const std::array xyz = perBGrid.getPhysicalCoords(x, y, z); + std::array* cell = perBGrid.get(x, y, z); + + Real Bxavg, Byavg, Bzavg; + Bxavg = Byavg = Bzavg = 0.0; + if(this->nSpaceSamples > 1) { + Real d_x = perBGrid.DX / (this->nSpaceSamples - 1); + Real d_z = perBGrid.DZ / (this->nSpaceSamples - 1); + for (uint i=0; inSpaceSamples; ++i) { + for (uint k=0; knSpaceSamples; ++k) { + Bxavg += ((xyz[0] + i * d_x) < 0.0) ? this->Bx[this->LEFT] : this->Bx[this->RIGHT]; + Byavg += ((xyz[0] + i * d_x) < 0.0) ? this->By[this->LEFT] : this->By[this->RIGHT]; + Bzavg += ((xyz[0] + i * d_x) < 0.0) ? this->Bz[this->LEFT] : this->Bz[this->RIGHT]; + } } + cuint nPts = pow(this->nSpaceSamples, 3.0); + + cell->at(fsgrids::bfield::PERBX) = Bxavg / nPts; + cell->at(fsgrids::bfield::PERBY) = Byavg / nPts; + cell->at(fsgrids::bfield::PERBZ) = Bzavg / nPts; + } else { + cell->at(fsgrids::bfield::PERBX) = (xyz[0] < 0.0) ? this->Bx[this->LEFT] : this->Bx[this->RIGHT]; + cell->at(fsgrids::bfield::PERBY) = (xyz[0] < 0.0) ? this->By[this->LEFT] : this->By[this->RIGHT]; + cell->at(fsgrids::bfield::PERBZ) = (xyz[0] < 0.0) ? this->Bz[this->LEFT] : this->Bz[this->RIGHT]; } - cuint nPts = pow(this->nSpaceSamples, 3.0); - - cell->at(fsgrids::bfield::PERBX) = Bxavg / nPts; - cell->at(fsgrids::bfield::PERBY) = Byavg / nPts; - cell->at(fsgrids::bfield::PERBZ) = Bzavg / nPts; - } else { - cell->at(fsgrids::bfield::PERBX) = (xyz[0] < 0.0) ? this->Bx[this->LEFT] : this->Bx[this->RIGHT]; - cell->at(fsgrids::bfield::PERBY) = (xyz[0] < 0.0) ? this->By[this->LEFT] : this->By[this->RIGHT]; - cell->at(fsgrids::bfield::PERBZ) = (xyz[0] < 0.0) ? this->Bz[this->LEFT] : this->Bz[this->RIGHT]; } } } diff --git a/projects/Shock/Shock.cpp b/projects/Shock/Shock.cpp index dd9eb567d..5038cc341 100644 --- a/projects/Shock/Shock.cpp +++ b/projects/Shock/Shock.cpp @@ -151,18 +151,20 @@ namespace projects { ) { setBackgroundFieldToZero(BgBGrid); - auto localSize = perBGrid.getLocalSize(); - - #pragma omp parallel for collapse(3) - for (int x = 0; x < localSize[0]; ++x) { - for (int y = 0; y < localSize[1]; ++y) { - for (int z = 0; z < localSize[2]; ++z) { - const std::array xyz = perBGrid.getPhysicalCoords(x, y, z); - std::array* cell = perBGrid.get(x, y, z); - - cell->at(fsgrids::bfield::PERBX) = 0.0; - cell->at(fsgrids::bfield::PERBY) = 0.0; - cell->at(fsgrids::bfield::PERBZ) = this->BZ0*(3.0 + 2.0*tanh((xyz[1] - Parameters::ymax/2.0)/(this->Sharp_Y*Parameters::ymax))); + if(!P::isRestart) { + auto localSize = perBGrid.getLocalSize(); + +#pragma omp parallel for collapse(3) + for (int x = 0; x < localSize[0]; ++x) { + for (int y = 0; y < localSize[1]; ++y) { + for (int z = 0; z < localSize[2]; ++z) { + const std::array xyz = perBGrid.getPhysicalCoords(x, y, z); + std::array* cell = perBGrid.get(x, y, z); + + cell->at(fsgrids::bfield::PERBX) = 0.0; + cell->at(fsgrids::bfield::PERBY) = 0.0; + cell->at(fsgrids::bfield::PERBZ) = this->BZ0*(3.0 + 2.0*tanh((xyz[1] - Parameters::ymax/2.0)/(this->Sharp_Y*Parameters::ymax))); + } } } } diff --git a/projects/Shocktest/Shocktest.cpp b/projects/Shocktest/Shocktest.cpp index be44ee460..d8199fb42 100644 --- a/projects/Shocktest/Shocktest.cpp +++ b/projects/Shocktest/Shocktest.cpp @@ -198,36 +198,38 @@ namespace projects { ) { setBackgroundFieldToZero(BgBGrid); - auto localSize = perBGrid.getLocalSize(); - - #pragma omp parallel for collapse(3) - for (int x = 0; x < localSize[0]; ++x) { - for (int y = 0; y < localSize[1]; ++y) { - for (int z = 0; z < localSize[2]; ++z) { - const std::array xyz = perBGrid.getPhysicalCoords(x, y, z); - std::array* cell = perBGrid.get(x, y, z); - - Real Bxavg, Byavg, Bzavg; - Bxavg = Byavg = Bzavg = 0.0; - if(this->nSpaceSamples > 1) { - Real d_x = perBGrid.DX / (this->nSpaceSamples - 1); - Real d_z = perBGrid.DZ / (this->nSpaceSamples - 1); - for (uint i=0; inSpaceSamples; ++i) { - for (uint k=0; knSpaceSamples; ++k) { - Bxavg += ((xyz[0] + i * d_x) < 0.0) ? this->Bx[this->LEFT] : this->Bx[this->RIGHT]; - Byavg += ((xyz[0] + i * d_x) < 0.0) ? this->By[this->LEFT] : this->By[this->RIGHT]; - Bzavg += ((xyz[0] + i * d_x) < 0.0) ? this->Bz[this->LEFT] : this->Bz[this->RIGHT]; + if(!P::isRestart) { + auto localSize = perBGrid.getLocalSize(); + + #pragma omp parallel for collapse(3) + for (int x = 0; x < localSize[0]; ++x) { + for (int y = 0; y < localSize[1]; ++y) { + for (int z = 0; z < localSize[2]; ++z) { + const std::array xyz = perBGrid.getPhysicalCoords(x, y, z); + std::array* cell = perBGrid.get(x, y, z); + + Real Bxavg, Byavg, Bzavg; + Bxavg = Byavg = Bzavg = 0.0; + if(this->nSpaceSamples > 1) { + Real d_x = perBGrid.DX / (this->nSpaceSamples - 1); + Real d_z = perBGrid.DZ / (this->nSpaceSamples - 1); + for (uint i=0; inSpaceSamples; ++i) { + for (uint k=0; knSpaceSamples; ++k) { + Bxavg += ((xyz[0] + i * d_x) < 0.0) ? this->Bx[this->LEFT] : this->Bx[this->RIGHT]; + Byavg += ((xyz[0] + i * d_x) < 0.0) ? this->By[this->LEFT] : this->By[this->RIGHT]; + Bzavg += ((xyz[0] + i * d_x) < 0.0) ? this->Bz[this->LEFT] : this->Bz[this->RIGHT]; + } } + cuint nPts = pow(this->nSpaceSamples, 3.0); + + cell->at(fsgrids::bfield::PERBX) = Bxavg / nPts; + cell->at(fsgrids::bfield::PERBY) = Byavg / nPts; + cell->at(fsgrids::bfield::PERBZ) = Bzavg / nPts; + } else { + cell->at(fsgrids::bfield::PERBX) = (xyz[0] < 0.0) ? this->Bx[this->LEFT] : this->Bx[this->RIGHT]; + cell->at(fsgrids::bfield::PERBY) = (xyz[0] < 0.0) ? this->By[this->LEFT] : this->By[this->RIGHT]; + cell->at(fsgrids::bfield::PERBZ) = (xyz[0] < 0.0) ? this->Bz[this->LEFT] : this->Bz[this->RIGHT]; } - cuint nPts = pow(this->nSpaceSamples, 3.0); - - cell->at(fsgrids::bfield::PERBX) = Bxavg / nPts; - cell->at(fsgrids::bfield::PERBY) = Byavg / nPts; - cell->at(fsgrids::bfield::PERBZ) = Bzavg / nPts; - } else { - cell->at(fsgrids::bfield::PERBX) = (xyz[0] < 0.0) ? this->Bx[this->LEFT] : this->Bx[this->RIGHT]; - cell->at(fsgrids::bfield::PERBY) = (xyz[0] < 0.0) ? this->By[this->LEFT] : this->By[this->RIGHT]; - cell->at(fsgrids::bfield::PERBZ) = (xyz[0] < 0.0) ? this->Bz[this->LEFT] : this->Bz[this->RIGHT]; } } } diff --git a/projects/testAmr/testAmr.cpp b/projects/testAmr/testAmr.cpp index 93a6e6830..afa7d161d 100644 --- a/projects/testAmr/testAmr.cpp +++ b/projects/testAmr/testAmr.cpp @@ -230,28 +230,30 @@ namespace projects { setBackgroundField(bgField, BgBGrid); - auto localSize = perBGrid.getLocalSize(); - - #pragma omp parallel for collapse(3) - for (int x = 0; x < localSize[0]; ++x) { - for (int y = 0; y < localSize[1]; ++y) { - for (int z = 0; z < localSize[2]; ++z) { - const std::array xyz = perBGrid.getPhysicalCoords(x, y, z); - std::array* cell = perBGrid.get(x, y, z); - - const int64_t cellid = perBGrid.GlobalIDForCoords(x, y, z); - - setRandomSeed(cellid); - - if (this->lambda != 0.0) { - cell->at(fsgrids::bfield::PERBX) = this->dBx*cos(2.0 * M_PI * xyz[0] / this->lambda); - cell->at(fsgrids::bfield::PERBY) = this->dBy*sin(2.0 * M_PI * xyz[0] / this->lambda); - cell->at(fsgrids::bfield::PERBZ) = this->dBz*cos(2.0 * M_PI * xyz[0] / this->lambda); + if(!P::isRestart) { + auto localSize = perBGrid.getLocalSize(); + + #pragma omp parallel for collapse(3) + for (int x = 0; x < localSize[0]; ++x) { + for (int y = 0; y < localSize[1]; ++y) { + for (int z = 0; z < localSize[2]; ++z) { + const std::array xyz = perBGrid.getPhysicalCoords(x, y, z); + std::array* cell = perBGrid.get(x, y, z); + + const int64_t cellid = perBGrid.GlobalIDForCoords(x, y, z); + + setRandomSeed(cellid); + + if (this->lambda != 0.0) { + cell->at(fsgrids::bfield::PERBX) = this->dBx*cos(2.0 * M_PI * xyz[0] / this->lambda); + cell->at(fsgrids::bfield::PERBY) = this->dBy*sin(2.0 * M_PI * xyz[0] / this->lambda); + cell->at(fsgrids::bfield::PERBZ) = this->dBz*cos(2.0 * M_PI * xyz[0] / this->lambda); + } + + cell->at(fsgrids::bfield::PERBX) += this->magXPertAbsAmp * (0.5 - getRandomNumber()); + cell->at(fsgrids::bfield::PERBY) += this->magYPertAbsAmp * (0.5 - getRandomNumber()); + cell->at(fsgrids::bfield::PERBZ) += this->magZPertAbsAmp * (0.5 - getRandomNumber()); } - - cell->at(fsgrids::bfield::PERBX) += this->magXPertAbsAmp * (0.5 - getRandomNumber()); - cell->at(fsgrids::bfield::PERBY) += this->magYPertAbsAmp * (0.5 - getRandomNumber()); - cell->at(fsgrids::bfield::PERBZ) += this->magZPertAbsAmp * (0.5 - getRandomNumber()); } } } diff --git a/projects/testHall/testHall.cpp b/projects/testHall/testHall.cpp index 24bef05a9..02a193633 100644 --- a/projects/testHall/testHall.cpp +++ b/projects/testHall/testHall.cpp @@ -137,18 +137,20 @@ namespace projects { ) { setBackgroundFieldToZero(BgBGrid); - auto localSize = perBGrid.getLocalSize(); - - #pragma omp parallel for collapse(3) - for (int x = 0; x < localSize[0]; ++x) { - for (int y = 0; y < localSize[1]; ++y) { - for (int z = 0; z < localSize[2]; ++z) { - const std::array xyz = perBGrid.getPhysicalCoords(x, y, z); - std::array* cell = perBGrid.get(x, y, z); - - cell->at(fsgrids::bfield::PERBX) = this->BX0 * cos(2.0*M_PI * 1.0 * xyz[0] / (P::xmax - P::xmin)) * cos(2.0*M_PI * 1.0 * xyz[1] / (P::ymax - P::ymin)) * cos(2.0*M_PI * 1.0 * xyz[2] / (P::zmax - P::zmin)); - cell->at(fsgrids::bfield::PERBY) = this->BY0 * cos(2.0*M_PI * 1.0 * xyz[0] / (P::xmax - P::xmin)) * cos(2.0*M_PI * 1.0 * xyz[1] / (P::ymax - P::ymin)) * cos(2.0*M_PI * 1.0 * xyz[2] / (P::zmax - P::zmin)); - cell->at(fsgrids::bfield::PERBZ) = this->BZ0 * cos(2.0*M_PI * 1.0 * xyz[0] / (P::xmax - P::xmin)) * cos(2.0*M_PI * 1.0 * xyz[1] / (P::ymax - P::ymin)) * cos(2.0*M_PI * 1.0 * xyz[2] / (P::zmax - P::zmin)); + if(!P::isRestart) { + auto localSize = perBGrid.getLocalSize(); + +#pragma omp parallel for collapse(3) + for (int x = 0; x < localSize[0]; ++x) { + for (int y = 0; y < localSize[1]; ++y) { + for (int z = 0; z < localSize[2]; ++z) { + const std::array xyz = perBGrid.getPhysicalCoords(x, y, z); + std::array* cell = perBGrid.get(x, y, z); + + cell->at(fsgrids::bfield::PERBX) = this->BX0 * cos(2.0*M_PI * 1.0 * xyz[0] / (P::xmax - P::xmin)) * cos(2.0*M_PI * 1.0 * xyz[1] / (P::ymax - P::ymin)) * cos(2.0*M_PI * 1.0 * xyz[2] / (P::zmax - P::zmin)); + cell->at(fsgrids::bfield::PERBY) = this->BY0 * cos(2.0*M_PI * 1.0 * xyz[0] / (P::xmax - P::xmin)) * cos(2.0*M_PI * 1.0 * xyz[1] / (P::ymax - P::ymin)) * cos(2.0*M_PI * 1.0 * xyz[2] / (P::zmax - P::zmin)); + cell->at(fsgrids::bfield::PERBZ) = this->BZ0 * cos(2.0*M_PI * 1.0 * xyz[0] / (P::xmax - P::xmin)) * cos(2.0*M_PI * 1.0 * xyz[1] / (P::ymax - P::ymin)) * cos(2.0*M_PI * 1.0 * xyz[2] / (P::zmax - P::zmin)); + } } } } diff --git a/projects/test_fp/test_fp.cpp b/projects/test_fp/test_fp.cpp index 80030e5a1..96140c473 100644 --- a/projects/test_fp/test_fp.cpp +++ b/projects/test_fp/test_fp.cpp @@ -112,64 +112,66 @@ namespace projects { ) { setBackgroundFieldToZero(BgBGrid); - auto localSize = perBGrid.getLocalSize(); - - creal dx = perBGrid.DX * 3.5; - creal dy = perBGrid.DY * 3.5; - creal dz = perBGrid.DZ * 3.5; - - Real areaFactor = 1.0; - - #pragma omp parallel for collapse(3) - for (int x = 0; x < localSize[0]; ++x) { - for (int y = 0; y < localSize[1]; ++y) { - for (int z = 0; z < localSize[2]; ++z) { - const std::array xyz = perBGrid.getPhysicalCoords(x, y, z); - std::array* cell = perBGrid.get(x, y, z); - - creal x = xyz[0] + 0.5 * perBGrid.DX; - creal y = xyz[1] + 0.5 * perBGrid.DY; - creal z = xyz[2] + 0.5 * perBGrid.DZ; - - switch (this->CASE) { - case BXCASE: - cell->at(fsgrids::bfield::PERBX) = 0.1 * this->B0 * areaFactor; - //areaFactor = (CellParams::DY * CellParams::DZ) / (dy * dz); - if (y >= -dy && y <= dy) - if (z >= -dz && z <= dz) - cell->at(fsgrids::bfield::PERBX) = this->B0 * areaFactor; - break; - case BYCASE: - cell->at(fsgrids::bfield::PERBY) = 0.1 * this->B0 * areaFactor; - //areaFactor = (CellParams::DX * CellParams::DZ) / (dx * dz); - if (x >= -dx && x <= dx) - if (z >= -dz && z <= dz) - cell->at(fsgrids::bfield::PERBY) = this->B0 * areaFactor; - break; - case BZCASE: - cell->at(fsgrids::bfield::PERBZ) = 0.1 * this->B0 * areaFactor; - //areaFactor = (CellParams::DX * CellParams::DY) / (dx * dy); - if (x >= -dx && x <= dx) + if(!P::isRestart) { + auto localSize = perBGrid.getLocalSize(); + + creal dx = perBGrid.DX * 3.5; + creal dy = perBGrid.DY * 3.5; + creal dz = perBGrid.DZ * 3.5; + + Real areaFactor = 1.0; + + #pragma omp parallel for collapse(3) + for (int x = 0; x < localSize[0]; ++x) { + for (int y = 0; y < localSize[1]; ++y) { + for (int z = 0; z < localSize[2]; ++z) { + const std::array xyz = perBGrid.getPhysicalCoords(x, y, z); + std::array* cell = perBGrid.get(x, y, z); + + creal x = xyz[0] + 0.5 * perBGrid.DX; + creal y = xyz[1] + 0.5 * perBGrid.DY; + creal z = xyz[2] + 0.5 * perBGrid.DZ; + + switch (this->CASE) { + case BXCASE: + cell->at(fsgrids::bfield::PERBX) = 0.1 * this->B0 * areaFactor; + //areaFactor = (CellParams::DY * CellParams::DZ) / (dy * dz); if (y >= -dy && y <= dy) - cell->at(fsgrids::bfield::PERBZ) = this->B0 * areaFactor; - break; - case BALLCASE: - cell->at(fsgrids::bfield::PERBX) = 0.1 * this->B0 * areaFactor; - cell->at(fsgrids::bfield::PERBY) = 0.1 * this->B0 * areaFactor; - cell->at(fsgrids::bfield::PERBZ) = 0.1 * this->B0 * areaFactor; - - //areaFactor = (CellParams::DX * CellParams::DY) / (dx * dy); - - if (y >= -dy && y <= dy) - if (z >= -dz && z <= dz) - cell->at(fsgrids::bfield::PERBX) = this->B0 * areaFactor; - if (x >= -dx && x <= dx) - if (z >= -dz && z <= dz) - cell->at(fsgrids::bfield::PERBY) = this->B0 * areaFactor; - if (x >= -dx && x <= dx) + if (z >= -dz && z <= dz) + cell->at(fsgrids::bfield::PERBX) = this->B0 * areaFactor; + break; + case BYCASE: + cell->at(fsgrids::bfield::PERBY) = 0.1 * this->B0 * areaFactor; + //areaFactor = (CellParams::DX * CellParams::DZ) / (dx * dz); + if (x >= -dx && x <= dx) + if (z >= -dz && z <= dz) + cell->at(fsgrids::bfield::PERBY) = this->B0 * areaFactor; + break; + case BZCASE: + cell->at(fsgrids::bfield::PERBZ) = 0.1 * this->B0 * areaFactor; + //areaFactor = (CellParams::DX * CellParams::DY) / (dx * dy); + if (x >= -dx && x <= dx) + if (y >= -dy && y <= dy) + cell->at(fsgrids::bfield::PERBZ) = this->B0 * areaFactor; + break; + case BALLCASE: + cell->at(fsgrids::bfield::PERBX) = 0.1 * this->B0 * areaFactor; + cell->at(fsgrids::bfield::PERBY) = 0.1 * this->B0 * areaFactor; + cell->at(fsgrids::bfield::PERBZ) = 0.1 * this->B0 * areaFactor; + + //areaFactor = (CellParams::DX * CellParams::DY) / (dx * dy); + if (y >= -dy && y <= dy) - cell->at(fsgrids::bfield::PERBZ) = this->B0 * areaFactor; - break; + if (z >= -dz && z <= dz) + cell->at(fsgrids::bfield::PERBX) = this->B0 * areaFactor; + if (x >= -dx && x <= dx) + if (z >= -dz && z <= dz) + cell->at(fsgrids::bfield::PERBY) = this->B0 * areaFactor; + if (x >= -dx && x <= dx) + if (y >= -dy && y <= dy) + cell->at(fsgrids::bfield::PERBZ) = this->B0 * areaFactor; + break; + } } } } diff --git a/vlasiator.cpp b/vlasiator.cpp index 0f5b38499..3c139459e 100644 --- a/vlasiator.cpp +++ b/vlasiator.cpp @@ -285,9 +285,6 @@ int main(int argn,char* args[]) { Real newDt; bool dtIsChanged; - const bool printCells = false; - const bool printSums = false; - // Init MPI: int required=MPI_THREAD_FUNNELED; int provided; @@ -448,9 +445,11 @@ int main(int argn,char* args[]) { mpiGrid, perBGrid, perBDt2Grid, + dPerBGrid, BgBGrid, momentsGrid, momentsDt2Grid, + EGrid, EGradPeGrid, volGrid, technicalGrid, @@ -470,29 +469,10 @@ int main(int argn,char* args[]) { initializeDataReducers(&outputReducer, &diagnosticReducer); phiprof::stop("Init DROs"); - phiprof::start("Init field propagator"); - if ( - initializeFieldPropagator( - perBGrid, - perBDt2Grid, - EGrid, - EDt2Grid, - EHallGrid, - EGradPeGrid, - momentsGrid, - momentsDt2Grid, - dPerBGrid, - dMomentsGrid, - BgBGrid, - volGrid, - technicalGrid, - sysBoundaries - ) == false - ) { - logFile << "(MAIN): Field propagator did not initialize correctly!" << endl << writeVerbose; - exit(1); - } - phiprof::stop("Init field propagator"); + phiprof::start("getFieldsFromFsGrid"); + volGrid.updateGhostCells(); + getFieldsFromFsGrid(volGrid, BgBGrid, EGradPeGrid, technicalGrid, mpiGrid, cells); + phiprof::stop("getFieldsFromFsGrid"); // Free up memory: readparameters.finalize(); @@ -639,32 +619,11 @@ int main(int argn,char* args[]) { double beforeTime = MPI_Wtime(); double beforeSimulationTime=P::t_min; double beforeStep=P::tstep_min; - - Real nSum = 0.0; - for(auto cell: cells) { - creal rho = mpiGrid[cell]->parameters[CellParams::RHOM_R]; - creal dx = mpiGrid[cell]->parameters[CellParams::DX]; - creal dy = mpiGrid[cell]->parameters[CellParams::DY]; - creal dz = mpiGrid[cell]->parameters[CellParams::DZ]; - creal x = mpiGrid[cell]->parameters[CellParams::XCRD]; - creal y = mpiGrid[cell]->parameters[CellParams::YCRD]; - creal z = mpiGrid[cell]->parameters[CellParams::ZCRD]; - - nSum += rho*dx*dy*dz; - if(printCells) cout << "Cell " << cell << " rho = " << rho << " x: " << x << " y: " << y << " z: " << z << endl; - } - if(printSums) { - cout << "Rank " << myRank << ", Local sum = " << nSum << endl; - Real globalSum = 0.0; - MPI_Reduce(&nSum, &globalSum, 1, MPI_DOUBLE, MPI_SUM, MASTER_RANK, MPI_COMM_WORLD); - MPI_Barrier(MPI_COMM_WORLD); - if(myRank == MASTER_RANK) cout << " Global sum = " << globalSum << endl; - } while(P::tstep <= P::tstep_max && P::t-P::dt <= P::t_max+DT_EPSILON && wallTimeRestartCounter <= P::exitAfterRestarts) { - + addTimedBarrier("barrier-loop-start"); phiprof::start("IO"); @@ -716,7 +675,6 @@ int main(int argn,char* args[]) { phiprof::stop("diagnostic-io"); } - bool extractFsGridFields = true; // write system, loop through write classes for (uint i = 0; i < P::systemWriteTimeInterval.size(); i++) { if (P::systemWriteTimeInterval[i] >= 0.0 && @@ -891,7 +849,7 @@ int main(int argn,char* args[]) { mpiGrid[cells[c]]->get_cell_parameters()[CellParams::LBWEIGHTCOUNTER] = 0; } } - + phiprof::start("Propagate"); //Propagate the state of simulation forward in time by dt: if (P::propagateVlasovTranslation || P::propagateVlasovAcceleration ) { @@ -908,30 +866,9 @@ int main(int argn,char* args[]) { } else { calculateSpatialTranslation(mpiGrid,0.0); } - - Real nSum = 0.0; - for(auto cell: cells) { - creal rho = mpiGrid[cell]->parameters[CellParams::RHOM_R]; - creal dx = mpiGrid[cell]->parameters[CellParams::DX]; - creal dy = mpiGrid[cell]->parameters[CellParams::DY]; - creal dz = mpiGrid[cell]->parameters[CellParams::DZ]; - creal x = mpiGrid[cell]->parameters[CellParams::XCRD]; - creal y = mpiGrid[cell]->parameters[CellParams::YCRD]; - creal z = mpiGrid[cell]->parameters[CellParams::ZCRD]; - - nSum += rho*dx*dy*dz; - if(printCells) cout << "Cell " << cell << " rho = " << rho << " x: " << x << " y: " << y << " z: " << z << endl; - } - if(printSums) { - cout << "Rank " << myRank << ", Local sum = " << nSum << endl; - Real globalSum = 0.0; - MPI_Reduce(&nSum, &globalSum, 1, MPI_DOUBLE, MPI_SUM, MASTER_RANK, MPI_COMM_WORLD); - MPI_Barrier(MPI_COMM_WORLD); - if(printSums && myRank == MASTER_RANK) cout << " Global sum = " << globalSum << endl; - } phiprof::stop("Spatial-space",computedCells,"Cells"); - + phiprof::start("Compute interp moments"); calculateInterpolatedVelocityMoments( mpiGrid, @@ -945,7 +882,7 @@ int main(int argn,char* args[]) { CellParams::P_33_DT2 ); phiprof::stop("Compute interp moments"); - + // Apply boundary conditions if (P::propagateVlasovTranslation || P::propagateVlasovAcceleration ) { phiprof::start("Update system boundaries (Vlasov post-translation)"); @@ -953,7 +890,7 @@ int main(int argn,char* args[]) { phiprof::stop("Update system boundaries (Vlasov post-translation)"); addTimedBarrier("barrier-boundary-conditions"); } - + // Propagate fields forward in time by dt. This needs to be done before the // moments for t + dt are computed (field uses t and t+0.5dt) if (P::propagateField) { @@ -965,7 +902,7 @@ int main(int argn,char* args[]) { feedMomentsIntoFsGrid(mpiGrid, cells, momentsGrid,false); feedMomentsIntoFsGrid(mpiGrid, cells, momentsDt2Grid,true); phiprof::stop("fsgrid-coupling-in"); - + propagateFields( perBGrid, perBDt2Grid, @@ -989,12 +926,12 @@ int main(int argn,char* args[]) { // Copy results back from fsgrid. volGrid.updateGhostCells(); technicalGrid.updateGhostCells(); - getFieldsFromFsGrid(volGrid, BgBGrid, EGradPeGrid, technicalGrid, mpiGrid, cells); - phiprof::stop("getFieldsFromFsGrid"); + getFieldsFromFsGrid(volGrid, BgBGrid, EGradPeGrid, technicalGrid, mpiGrid, cells); + phiprof::stop("getFieldsFromFsGrid"); phiprof::stop("Propagate Fields",cells.size(),"SpatialCells"); addTimedBarrier("barrier-after-field-solver"); } - + phiprof::start("Velocity-space"); if ( P::propagateVlasovAcceleration ) { calculateAcceleration(mpiGrid,P::dt); From 8b39e659e8eef1e1695242b75c3f9b89f2820ff5 Mon Sep 17 00:00:00 2001 From: Urs Ganse Date: Wed, 29 May 2019 11:23:34 +0300 Subject: [PATCH 486/602] Restart read and write PERB and EFIELD. Also change "double" to "real". Another step towards working and bit-identical fsgrid restarts. --- grid.cpp | 4 ++-- grid.h | 2 +- ioread.cpp | 13 ++++------- ioread.h | 5 +---- iowrite.cpp | 62 ++++++----------------------------------------------- 5 files changed, 14 insertions(+), 72 deletions(-) diff --git a/grid.cpp b/grid.cpp index 4951a807f..9d1a39813 100644 --- a/grid.cpp +++ b/grid.cpp @@ -87,7 +87,7 @@ void initializeGrids( char **argc, dccrg::Dccrg& mpiGrid, FsGrid< std::array, 2> & perBGrid, - FsGrid< std::array, 2> & perBDt2Grid, + FsGrid< std::array, 2>& EGrid, FsGrid< std::array, 2>& BgBGrid, FsGrid< std::array, 2> & momentsGrid, FsGrid< std::array, 2> & momentsDt2Grid, @@ -187,7 +187,7 @@ void initializeGrids( if (P::isRestart) { logFile << "Restart from "<< P::restartFileName << std::endl << writeVerbose; phiprof::start("Read restart"); - if (readGrid(mpiGrid,perBGrid,EGradPeGrid,momentsGrid,BgBGrid,volGrid,technicalGrid,P::restartFileName) == false) { + if (readGrid(mpiGrid,perBGrid,EGrid,technicalGrid,P::restartFileName) == false) { logFile << "(MAIN) ERROR: restarting failed" << endl; exit(1); } diff --git a/grid.h b/grid.h index e457d8154..2abf9d0b3 100644 --- a/grid.h +++ b/grid.h @@ -37,7 +37,7 @@ void initializeGrids( char **argc, dccrg::Dccrg& mpiGrid, FsGrid< std::array, 2> & perBGrid, - FsGrid< std::array, 2> & perBDt2Grid, + FsGrid< std::array, 2>& EGrid, FsGrid< std::array, 2>& BgBGrid, FsGrid< std::array, 2> & momentsGrid, FsGrid< std::array, 2> & momentsDt2Grid, diff --git a/ioread.cpp b/ioread.cpp index e0fe44966..0d065498c 100644 --- a/ioread.cpp +++ b/ioread.cpp @@ -901,10 +901,7 @@ bool checkScalarParameter(vlsv::ParallelReader& file,const string& name,T correc */ bool exec_readGrid(dccrg::Dccrg& mpiGrid, FsGrid< std::array, 2>& perBGrid, - FsGrid< std::array, 2>& EGradPeGrid, - FsGrid< std::array, 2>& momentsGrid, - FsGrid< std::array, 2>& BgBGrid, - FsGrid< std::array, 2>& volGrid, + FsGrid< std::array, 2>& EGrid, FsGrid< fsgrids::technical, 2>& technicalGrid, const std::string& name) { vector fileCells; /*< CellIds for all cells in file*/ @@ -1100,6 +1097,7 @@ bool exec_readGrid(dccrg::Dccrg& mpiGrid, exitOnError(false, "(RESTART) FSGrid writing rank number not found in restart file", MPI_COMM_WORLD); } success = readFsGridVariable(file, "fg_PERB", fsgridInputRanks, perBGrid); + success = readFsGridVariable(file, "fg_EFIELD", fsgridInputRanks, EGrid); success = file.close(); phiprof::stop("readGrid"); @@ -1116,12 +1114,9 @@ bool exec_readGrid(dccrg::Dccrg& mpiGrid, */ bool readGrid(dccrg::Dccrg& mpiGrid, FsGrid< std::array, 2>& perBGrid, - FsGrid< std::array, 2>& EGradPeGrid, - FsGrid< std::array, 2>& momentsGrid, - FsGrid< std::array, 2>& BgBGrid, - FsGrid< std::array, 2>& volGrid, + FsGrid< std::array, 2>& EGrid, FsGrid< fsgrids::technical, 2>& technicalGrid, const std::string& name){ //Check the vlsv version from the file: - return exec_readGrid(mpiGrid,perBGrid,EGradPeGrid,momentsGrid,BgBGrid,volGrid,technicalGrid,name); + return exec_readGrid(mpiGrid,perBGrid,EGrid,technicalGrid,name); } diff --git a/ioread.h b/ioread.h index 4f47966ae..eabb28cd6 100644 --- a/ioread.h +++ b/ioread.h @@ -38,10 +38,7 @@ */ bool readGrid(dccrg::Dccrg& mpiGrid, FsGrid< std::array, 2>& perBGrid, - FsGrid< std::array, 2> & EGradPeGrid, - FsGrid< std::array, 2>& momentsGrid, - FsGrid< std::array, 2>& BgBGrid, - FsGrid< std::array, 2>& volGrid, + FsGrid< std::array, 2>& EGrid, FsGrid< fsgrids::technical, 2>& technicalGrid, const std::string& name); diff --git a/iowrite.cpp b/iowrite.cpp index aee11c729..d50b80b74 100644 --- a/iowrite.cpp +++ b/iowrite.cpp @@ -1312,14 +1312,14 @@ bool writeRestart(dccrg::Dccrg& mpiGrid, FsGrid< std::array, 2>& dMomentsGrid, FsGrid< std::array, 2>& BgBGrid, FsGrid< std::array, 2>& volGrid, - FsGrid< fsgrids::technical, 2>& technicalGrid)->std::vector { + FsGrid< fsgrids::technical, 2>& technicalGrid)->std::vector { std::array& gridSize = technicalGrid.getLocalSize(); - std::vector retval(gridSize[0]*gridSize[1]*gridSize[2]*fsgrids::efield::N_EFIELD); + std::vector retval(gridSize[0]*gridSize[1]*gridSize[2]*fsgrids::efield::N_EFIELD); int index=0; for(int z=0; z& mpiGrid, FsGrid< std::array, 2>& dMomentsGrid, FsGrid< std::array, 2>& BgBGrid, FsGrid< std::array, 2>& volGrid, - FsGrid< fsgrids::technical, 2>& technicalGrid)->std::vector { + FsGrid< fsgrids::technical, 2>& technicalGrid)->std::vector { std::array& gridSize = technicalGrid.getLocalSize(); - std::vector retval(gridSize[0]*gridSize[1]*gridSize[2]*fsgrids::bfield::N_BFIELD); + std::vector retval(gridSize[0]*gridSize[1]*gridSize[2]*fsgrids::bfield::N_BFIELD); int index=0; for(int z=0; z& mpiGrid, return retval; } )); - restartReducer.addOperator(new DRO::DataReductionOperatorFsGrid("fg_BGB",[]( - FsGrid< std::array, 2>& perBGrid, - FsGrid< std::array, 2>& EGrid, - FsGrid< std::array, 2>& EHallGrid, - FsGrid< std::array, 2>& EGradPeGrid, - FsGrid< std::array, 2>& momentsGrid, - FsGrid< std::array, 2>& dPerBGrid, - FsGrid< std::array, 2>& dMomentsGrid, - FsGrid< std::array, 2>& BgBGrid, - FsGrid< std::array, 2>& volGrid, - FsGrid< fsgrids::technical, 2>& technicalGrid)->std::vector { - std::array& gridSize = technicalGrid.getLocalSize(); - std::vector retval(gridSize[0]*gridSize[1]*gridSize[2]*fsgrids::bgbfield::N_BGB); - int index=0; - for(int z=0; z, 2>& perBGrid, - FsGrid< std::array, 2>& EGrid, - FsGrid< std::array, 2>& EHallGrid, - FsGrid< std::array, 2>& EGradPeGrid, - FsGrid< std::array, 2>& momentsGrid, - FsGrid< std::array, 2>& dPerBGrid, - FsGrid< std::array, 2>& dMomentsGrid, - FsGrid< std::array, 2>& BgBGrid, - FsGrid< std::array, 2>& volGrid, - FsGrid< fsgrids::technical, 2>& technicalGrid)->std::vector { - std::array& gridSize = technicalGrid.getLocalSize(); - std::vector retval(gridSize[0]*gridSize[1]*gridSize[2]*fsgrids::dperb::N_DPERB); - int index=0; - for(int z=0; z Date: Wed, 29 May 2019 11:24:35 +0300 Subject: [PATCH 487/602] Fix template P component initialisation. --- sysboundary/ionosphere.cpp | 3 +++ 1 file changed, 3 insertions(+) diff --git a/sysboundary/ionosphere.cpp b/sysboundary/ionosphere.cpp index 7b968bbea..1b478b14c 100644 --- a/sysboundary/ionosphere.cpp +++ b/sysboundary/ionosphere.cpp @@ -754,6 +754,9 @@ namespace SBC { templateCell.parameters[CellParams::VY_DT2] = templateCell.parameters[CellParams::VY]; templateCell.parameters[CellParams::VZ_DT2] = templateCell.parameters[CellParams::VZ]; templateCell.parameters[CellParams::RHOQ_DT2] = templateCell.parameters[CellParams::RHOQ]; + templateCell.parameters[CellParams::P_11_DT2] = templateCell.parameters[CellParams::P_11]; + templateCell.parameters[CellParams::P_22_DT2] = templateCell.parameters[CellParams::P_22]; + templateCell.parameters[CellParams::P_33_DT2] = templateCell.parameters[CellParams::P_33]; } Real Ionosphere::shiftedMaxwellianDistribution( From ee81b104ccb9a2f4cbf9d083e9e038e5ef1b251a Mon Sep 17 00:00:00 2001 From: ykempf Date: Wed, 29 May 2019 11:25:52 +0300 Subject: [PATCH 488/602] Same as before for other boundary, P initialisation fix. --- sysboundary/setmaxwellian.cpp | 3 +++ 1 file changed, 3 insertions(+) diff --git a/sysboundary/setmaxwellian.cpp b/sysboundary/setmaxwellian.cpp index 7c924ff6d..c218970f3 100644 --- a/sysboundary/setmaxwellian.cpp +++ b/sysboundary/setmaxwellian.cpp @@ -318,6 +318,9 @@ namespace SBC { templateCell.parameters[CellParams::VY_DT2] = templateCell.parameters[CellParams::VY]; templateCell.parameters[CellParams::VZ_DT2] = templateCell.parameters[CellParams::VZ]; templateCell.parameters[CellParams::RHOQ_DT2] = templateCell.parameters[CellParams::RHOQ]; + templateCell.parameters[CellParams::P_11_DT2] = templateCell.parameters[CellParams::P_11]; + templateCell.parameters[CellParams::P_22_DT2] = templateCell.parameters[CellParams::P_22]; + templateCell.parameters[CellParams::P_33_DT2] = templateCell.parameters[CellParams::P_33]; } else { cerr << "ERROR: this is not dynamic in time, please code it!" << endl; abort(); From 13e91e782432f808c2eda07569d9186e8e650422 Mon Sep 17 00:00:00 2001 From: ykempf Date: Wed, 29 May 2019 11:26:30 +0300 Subject: [PATCH 489/602] Remove dangling EVOL. --- common.h | 6 ------ datareduction/datareducer.cpp | 37 ----------------------------------- fieldsolver/gridGlue.cpp | 11 +---------- spatial_cell.hpp | 31 ++++++++++++++--------------- 4 files changed, 16 insertions(+), 69 deletions(-) diff --git a/common.h b/common.h index 0193d790c..17555ce7c 100644 --- a/common.h +++ b/common.h @@ -146,9 +146,6 @@ namespace CellParams { PERBXVOL, /*!< perturbed magnetic field PERBX averaged over spatial cell.*/ PERBYVOL, /*!< perturbed magnetic field PERBY averaged over spatial cell.*/ PERBZVOL, /*!< perturbed magnetic field PERBZ averaged over spatial cell.*/ - EXVOL, /*!< Ex averaged over spatial cell.*/ - EYVOL, /*!< Ey averaged over spatial cell.*/ - EZVOL, /*!< Ez averaged over spatial cell.*/ EXGRADPE, /*!< Electron pressure gradient term x.*/ EYGRADPE, /*!< Electron pressure gradient term y.*/ EZGRADPE, /*!< Electron pressure gradient term z.*/ @@ -350,9 +347,6 @@ namespace fsgrids { PERBXVOL, /*!< perturbed magnetic field PERBX averaged over spatial cell.*/ PERBYVOL, /*!< perturbed magnetic field PERBY averaged over spatial cell.*/ PERBZVOL, /*!< perturbed magnetic field PERBZ averaged over spatial cell.*/ - EXVOL, /*!< Ex averaged over spatial cell.*/ - EYVOL, /*!< Ey averaged over spatial cell.*/ - EZVOL, /*!< Ez averaged over spatial cell.*/ dPERBXVOLdy, dPERBXVOLdz, dPERBYVOLdx, diff --git a/datareduction/datareducer.cpp b/datareduction/datareducer.cpp index 5dc2a4ec4..a8d76f895 100644 --- a/datareduction/datareducer.cpp +++ b/datareduction/datareducer.cpp @@ -520,43 +520,6 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti } continue; } - if(*it == "VolE" || *it == "vg_VolE") { - // Volume-averaged E field - outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("E_vol",CellParams::EXVOL,3)); - continue; - } - if(*it == "fg_VolE") { - outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fg_volB",[]( - FsGrid< std::array, 2>& perBGrid, - FsGrid< std::array, 2>& perBDt2Grid, - FsGrid< std::array, 2>& EGrid, - FsGrid< std::array, 2>& EHallGrid, - FsGrid< std::array, 2>& EGradPeGrid, - FsGrid< std::array, 2>& momentsGrid, - FsGrid< std::array, 2>& dPerBGrid, - FsGrid< std::array, 2>& dMomentsGrid, - FsGrid< std::array, 2>& BgBGrid, - FsGrid< std::array, 2>& volGrid, - FsGrid< fsgrids::technical, 2>& technicalGrid)->std::vector { - - std::array& gridSize = technicalGrid.getLocalSize(); - std::vector retval(gridSize[0]*gridSize[1]*gridSize[2]*3); - - // Iterate through fsgrid cells and extract EVOL - for(int z=0; zat(fsgrids::volfields::PERBXVOL); sendBuffer[ii].sums[1 ] += volcell->at(fsgrids::volfields::PERBYVOL); sendBuffer[ii].sums[2 ] += volcell->at(fsgrids::volfields::PERBZVOL); - sendBuffer[ii].sums[3 ] += volcell->at(fsgrids::volfields::EXVOL); - sendBuffer[ii].sums[4 ] += volcell->at(fsgrids::volfields::EYVOL); - sendBuffer[ii].sums[5 ] += volcell->at(fsgrids::volfields::EZVOL); sendBuffer[ii].sums[6 ] += volcell->at(fsgrids::volfields::dPERBXVOLdy) / technicalGrid.DY; sendBuffer[ii].sums[7 ] += volcell->at(fsgrids::volfields::dPERBXVOLdz) / technicalGrid.DZ; sendBuffer[ii].sums[8 ] += volcell->at(fsgrids::volfields::dPERBYVOLdx) / technicalGrid.DX; @@ -326,10 +323,7 @@ void getFieldsFromFsGrid( if ( cellAggregate.second.cells > 0) { cellParams[CellParams::PERBXVOL] = cellAggregate.second.sums[0] / cellAggregate.second.cells; cellParams[CellParams::PERBYVOL] = cellAggregate.second.sums[1] / cellAggregate.second.cells; - cellParams[CellParams::PERBZVOL] = cellAggregate.second.sums[2] / cellAggregate.second.cells; - cellParams[CellParams::EXVOL] = cellAggregate.second.sums[3] / cellAggregate.second.cells; - cellParams[CellParams::EYVOL] = cellAggregate.second.sums[4] / cellAggregate.second.cells; - cellParams[CellParams::EZVOL] = cellAggregate.second.sums[5] / cellAggregate.second.cells; + cellParams[CellParams::PERBZVOL] = cellAggregate.second.sums[2] / cellAggregate.second.cells; mpiGrid[cellAggregate.first]->derivativesBVOL[bvolderivatives::dPERBXVOLdy] = cellAggregate.second.sums[6] / cellAggregate.second.cells; mpiGrid[cellAggregate.first]->derivativesBVOL[bvolderivatives::dPERBXVOLdz] = cellAggregate.second.sums[7] / cellAggregate.second.cells; mpiGrid[cellAggregate.first]->derivativesBVOL[bvolderivatives::dPERBYVOLdx] = cellAggregate.second.sums[8] / cellAggregate.second.cells; @@ -348,9 +342,6 @@ void getFieldsFromFsGrid( cellParams[CellParams::PERBXVOL] = 0; cellParams[CellParams::PERBYVOL] = 0; cellParams[CellParams::PERBZVOL] = 0; - cellParams[CellParams::EXVOL] = 0; - cellParams[CellParams::EYVOL] = 0; - cellParams[CellParams::EZVOL] = 0; mpiGrid[cellAggregate.first]->derivativesBVOL[bvolderivatives::dPERBXVOLdy] = 0; mpiGrid[cellAggregate.first]->derivativesBVOL[bvolderivatives::dPERBXVOLdz] = 0; mpiGrid[cellAggregate.first]->derivativesBVOL[bvolderivatives::dPERBYVOLdx] = 0; diff --git a/spatial_cell.hpp b/spatial_cell.hpp index 0d3091971..b97e9e8c8 100644 --- a/spatial_cell.hpp +++ b/spatial_cell.hpp @@ -102,22 +102,21 @@ namespace spatial_cell { const uint64_t CELL_EDT2 = (1ull<<11); const uint64_t CELL_PERB = (1ull<<12); const uint64_t CELL_PERBDT2 = (1ull<<13); - const uint64_t CELL_BGB = (1ull<<14); - const uint64_t CELL_RHOM_V = (1ull<<15); - const uint64_t CELL_RHOMDT2_VDT2 = (1ull<<16); - const uint64_t CELL_RHOQ = (1ull<<17); - const uint64_t CELL_RHOQDT2 = (1ull<<18); - const uint64_t CELL_BVOL = (1ull<<19); - const uint64_t CELL_BVOL_DERIVATIVES = (1ull<<20); - const uint64_t CELL_DIMENSIONS = (1ull<<21); - const uint64_t CELL_IOLOCALCELLID = (1ull<<22); - const uint64_t NEIGHBOR_VEL_BLOCK_DATA = (1ull<<23); - const uint64_t CELL_HALL_TERM = (1ull<<24); - const uint64_t CELL_P = (1ull<<25); - const uint64_t CELL_PDT2 = (1ull<<26); - const uint64_t POP_METADATA = (1ull<<27); - const uint64_t RANDOMGEN = (1ull<<28); - const uint64_t CELL_GRADPE_TERM = (1ull<<29); + const uint64_t CELL_RHOM_V = (1ull<<14); + const uint64_t CELL_RHOMDT2_VDT2 = (1ull<<15); + const uint64_t CELL_RHOQ = (1ull<<16); + const uint64_t CELL_RHOQDT2 = (1ull<<17); + const uint64_t CELL_BVOL = (1ull<<18); + const uint64_t CELL_BVOL_DERIVATIVES = (1ull<<19); + const uint64_t CELL_DIMENSIONS = (1ull<<20); + const uint64_t CELL_IOLOCALCELLID = (1ull<<21); + const uint64_t NEIGHBOR_VEL_BLOCK_DATA = (1ull<<22); + const uint64_t CELL_HALL_TERM = (1ull<<23); + const uint64_t CELL_P = (1ull<<24); + const uint64_t CELL_PDT2 = (1ull<<25); + const uint64_t POP_METADATA = (1ull<<26); + const uint64_t RANDOMGEN = (1ull<<27); + const uint64_t CELL_GRADPE_TERM = (1ull<<28); //all data const uint64_t ALL_DATA = CELL_PARAMETERS From 53dbf5c7b0528ce05e7ab2f88b84be557b33e2bf Mon Sep 17 00:00:00 2001 From: ykempf Date: Wed, 29 May 2019 11:29:30 +0300 Subject: [PATCH 490/602] Updates and restart fixes in initializaGrids. --- grid.cpp | 25 +++++++++++++------------ grid.h | 2 ++ 2 files changed, 15 insertions(+), 12 deletions(-) diff --git a/grid.cpp b/grid.cpp index 4951a807f..880ad87c7 100644 --- a/grid.cpp +++ b/grid.cpp @@ -88,9 +88,11 @@ void initializeGrids( dccrg::Dccrg& mpiGrid, FsGrid< std::array, 2> & perBGrid, FsGrid< std::array, 2> & perBDt2Grid, + FsGrid< std::array, 2>& dPerBGrid, FsGrid< std::array, 2>& BgBGrid, FsGrid< std::array, 2> & momentsGrid, FsGrid< std::array, 2> & momentsDt2Grid, + FsGrid< std::array, 2> & EGrid, FsGrid< std::array, 2> & EGradPeGrid, FsGrid< std::array, 2> & volGrid, FsGrid< fsgrids::technical, 2>& technicalGrid, @@ -187,7 +189,7 @@ void initializeGrids( if (P::isRestart) { logFile << "Restart from "<< P::restartFileName << std::endl << writeVerbose; phiprof::start("Read restart"); - if (readGrid(mpiGrid,perBGrid,EGradPeGrid,momentsGrid,BgBGrid,volGrid,technicalGrid,P::restartFileName) == false) { + if (readGrid(mpiGrid,perBGrid,EGrid,EGradPeGrid,momentsGrid,BgBGrid,volGrid,technicalGrid,P::restartFileName) == false) { logFile << "(MAIN) ERROR: restarting failed" << endl; exit(1); } @@ -278,6 +280,7 @@ void initializeGrids( // update complete cell spatial data for full stencil ( SpatialCell::set_mpi_transfer_type(Transfer::ALL_SPATIAL_DATA); mpiGrid.update_copies_of_remote_neighbors(FULL_NEIGHBORHOOD_ID); + phiprof::stop("Fetch Neighbour data"); if (P::isRestart == false) { @@ -289,27 +292,25 @@ void initializeGrids( calculateInitialVelocityMoments(mpiGrid); phiprof::stop("Init moments"); } - + phiprof::start("setProjectBField"); project.setProjectBField(perBGrid, BgBGrid, technicalGrid); perBGrid.updateGhostCells(); BgBGrid.updateGhostCells(); + EGrid.updateGhostCells(); phiprof::stop("setProjectBField"); - phiprof::start("getFieldsFromFsGrid"); - // These should be done by initializeFieldPropagator() if the propagation is turned off. - volGrid.updateGhostCells(); - technicalGrid.updateGhostCells(); - getFieldsFromFsGrid(volGrid, BgBGrid, EGradPeGrid, technicalGrid, mpiGrid, cells); - phiprof::stop("getFieldsFromFsGrid"); - phiprof::start("Finish fsgrid setup"); - - // WARNING this means moments and dt2 moments are the same here. feedMomentsIntoFsGrid(mpiGrid, cells, momentsGrid,false); - feedMomentsIntoFsGrid(mpiGrid, cells, momentsDt2Grid,false); + if(!P::isRestart) { + // WARNING this means moments and dt2 moments are the same here at t=0, which is a feature so far. + feedMomentsIntoFsGrid(mpiGrid, cells, momentsDt2Grid,false); + } else { + feedMomentsIntoFsGrid(mpiGrid, cells, momentsDt2Grid,true); + } momentsGrid.updateGhostCells(); momentsDt2Grid.updateGhostCells(); + technicalGrid.updateGhostCells(); // This needs to be done at some point phiprof::stop("Finish fsgrid setup"); phiprof::stop("Set initial state"); diff --git a/grid.h b/grid.h index e457d8154..a0d782949 100644 --- a/grid.h +++ b/grid.h @@ -38,9 +38,11 @@ void initializeGrids( dccrg::Dccrg& mpiGrid, FsGrid< std::array, 2> & perBGrid, FsGrid< std::array, 2> & perBDt2Grid, + FsGrid< std::array, 2>& dPerBGrid, FsGrid< std::array, 2>& BgBGrid, FsGrid< std::array, 2> & momentsGrid, FsGrid< std::array, 2> & momentsDt2Grid, + FsGrid< std::array, 2> & EGrid, FsGrid< std::array, 2> & EGradPeGrid, FsGrid< std::array, 2> & volGrid, FsGrid< fsgrids::technical, 2>& technicalGrid, From 5697e9e339248feeece75223fd9fb537077ef3be Mon Sep 17 00:00:00 2001 From: Urs Ganse Date: Wed, 29 May 2019 11:29:41 +0300 Subject: [PATCH 491/602] In restarts, rename fg_EFIELD to fg_E. --- ioread.cpp | 2 +- iowrite.cpp | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/ioread.cpp b/ioread.cpp index 0d065498c..2bb78f543 100644 --- a/ioread.cpp +++ b/ioread.cpp @@ -1097,7 +1097,7 @@ bool exec_readGrid(dccrg::Dccrg& mpiGrid, exitOnError(false, "(RESTART) FSGrid writing rank number not found in restart file", MPI_COMM_WORLD); } success = readFsGridVariable(file, "fg_PERB", fsgridInputRanks, perBGrid); - success = readFsGridVariable(file, "fg_EFIELD", fsgridInputRanks, EGrid); + success = readFsGridVariable(file, "fg_E", fsgridInputRanks, EGrid); success = file.close(); phiprof::stop("readGrid"); diff --git a/iowrite.cpp b/iowrite.cpp index d50b80b74..c488923f1 100644 --- a/iowrite.cpp +++ b/iowrite.cpp @@ -1302,7 +1302,7 @@ bool writeRestart(dccrg::Dccrg& mpiGrid, restartReducer.addOperator(new DRO::BoundaryLayer); // Fsgrid Reducers - restartReducer.addOperator(new DRO::DataReductionOperatorFsGrid("fg_EFIELD",[]( + restartReducer.addOperator(new DRO::DataReductionOperatorFsGrid("fg_E",[]( FsGrid< std::array, 2>& perBGrid, FsGrid< std::array, 2>& EGrid, FsGrid< std::array, 2>& EHallGrid, From 83cbd65205f8dacfb9719456fd380bab336cab79 Mon Sep 17 00:00:00 2001 From: ykempf Date: Wed, 29 May 2019 11:31:54 +0300 Subject: [PATCH 492/602] Removed initializeFieldPropagator from ldz_main.cpp. --- fieldsolver/ldz_main.cpp | 120 +-------------------------------------- 1 file changed, 2 insertions(+), 118 deletions(-) diff --git a/fieldsolver/ldz_main.cpp b/fieldsolver/ldz_main.cpp index 986801b4f..2894cfb64 100644 --- a/fieldsolver/ldz_main.cpp +++ b/fieldsolver/ldz_main.cpp @@ -53,124 +53,6 @@ #include "fs_limiters.h" #include "mpiconversion.h" - -bool initializeFieldPropagator( - FsGrid< std::array, 2> & perBGrid, - FsGrid< std::array, 2> & perBDt2Grid, - FsGrid< std::array, 2> & EGrid, - FsGrid< std::array, 2> & EDt2Grid, - FsGrid< std::array, 2> & EHallGrid, - FsGrid< std::array, 2> & EGradPeGrid, - FsGrid< std::array, 2> & momentsGrid, - FsGrid< std::array, 2> & momentsDt2Grid, - FsGrid< std::array, 2> & dPerBGrid, - FsGrid< std::array, 2> & dMomentsGrid, - FsGrid< std::array, 2> & BgBGrid, - FsGrid< std::array, 2> & volGrid, - FsGrid< fsgrids::technical, 2> & technicalGrid, - SysBoundary& sysBoundaries -) { - // Checking that spatial cells are cubic, otherwise field solver is incorrect (cf. derivatives in E, Hall term) - if((abs((technicalGrid.DX-technicalGrid.DY)/technicalGrid.DX) > 0.001) || - (abs((technicalGrid.DX-technicalGrid.DZ)/technicalGrid.DX) > 0.001) || - (abs((technicalGrid.DY-technicalGrid.DZ)/technicalGrid.DY) > 0.001)) { - std::cerr << "WARNING: Your spatial cells seem not to be cubic. However the field solver is assuming them to be. Use at your own risk and responsibility!" << std::endl; - } - - // Assume static background field, they are not communicated here - // but are assumed to be ok after each load balance as that - // communicates all spatial data - - // Assuming B is known, calculate derivatives and upwinded edge-E. Exchange derivatives - // and edge-E:s between neighbouring processes and calculate volume-averaged E,B fields. - bool communicateMomentsDerivatives = true; - - if(!P::isRestart) { - calculateDerivativesSimple(perBGrid, perBDt2Grid, momentsGrid, momentsDt2Grid, dPerBGrid, dMomentsGrid, technicalGrid, sysBoundaries, RK_ORDER1, true); - - if(P::ohmGradPeTerm > 0) { - calculateGradPeTermSimple(EGradPeGrid, momentsGrid, momentsDt2Grid, dMomentsGrid, technicalGrid, sysBoundaries, RK_ORDER1); - communicateMomentsDerivatives = false; - } - // derivatives, gradPe and volume B are needed also in cases where propagateFields is false. - if(P::propagateField) { - if(P::ohmHallTerm > 0) { - calculateHallTermSimple( - perBGrid, - perBDt2Grid, - EHallGrid, - momentsGrid, - momentsDt2Grid, - dPerBGrid, - dMomentsGrid, - BgBGrid, - technicalGrid, - sysBoundaries, - RK_ORDER1, - communicateMomentsDerivatives - ); - } - calculateUpwindedElectricFieldSimple( - perBGrid, - perBDt2Grid, - EGrid, - EDt2Grid, - EHallGrid, - EGradPeGrid, - momentsGrid, - momentsDt2Grid, - dPerBGrid, - dMomentsGrid, - BgBGrid, - technicalGrid, - sysBoundaries, - RK_ORDER1 - ); - } - calculateVolumeAveragedFields(perBGrid,EGrid,dPerBGrid,volGrid,technicalGrid); - calculateBVOLDerivativesSimple(volGrid, technicalGrid, sysBoundaries); - } else { - calculateDerivativesSimple(perBGrid, perBDt2Grid, momentsGrid, momentsDt2Grid, dPerBGrid, dMomentsGrid, technicalGrid, sysBoundaries, RK_ORDER2_STEP2, true); - if(P::ohmGradPeTerm > 0) { - calculateGradPeTermSimple(EGradPeGrid, momentsGrid, momentsDt2Grid, dMomentsGrid, technicalGrid, sysBoundaries, RK_ORDER2_STEP2); - communicateMomentsDerivatives = false; - } - if(P::ohmHallTerm > 0) { - calculateHallTermSimple( - perBGrid, - perBDt2Grid, - EHallGrid, - momentsGrid, - momentsDt2Grid, - dPerBGrid, - dMomentsGrid, - BgBGrid, - technicalGrid, - sysBoundaries, - RK_ORDER2_STEP2, - communicateMomentsDerivatives - ); - } - calculateUpwindedElectricFieldSimple( - perBGrid, - perBDt2Grid, - EGrid, - EDt2Grid, - EHallGrid, - EGradPeGrid, - momentsGrid, - momentsDt2Grid, - dPerBGrid, - dMomentsGrid, - BgBGrid, - technicalGrid, - sysBoundaries, - RK_ORDER2_STEP2 - ); - } - return true; - } - /*! Re-initialize field propagator after rebalance. E, BGB, RHO, RHO_V, cell_dimensions, sysboundaryflag need to be up to date for the extended neighborhood @@ -370,6 +252,7 @@ bool propagateFields( // In case of subcycling, we decided to go for a blunt Runge-Kutta subcycling even though e.g. moments are not going along. // Result of the Summer of Debugging 2016, the behaviour in wave dispersion was much improved with this. propagateMagneticFieldSimple(perBGrid, perBDt2Grid, EGrid, EDt2Grid, technicalGrid, sysBoundaries, subcycleDt, RK_ORDER2_STEP1); + // We need to calculate derivatives of the moments at every substep, but they only // need to be communicated in the first one. calculateDerivativesSimple(perBGrid, perBDt2Grid, momentsGrid, momentsDt2Grid, dPerBGrid, dMomentsGrid, technicalGrid, sysBoundaries, RK_ORDER2_STEP1, (subcycleCount==0)); @@ -411,6 +294,7 @@ bool propagateFields( ); propagateMagneticFieldSimple(perBGrid, perBDt2Grid, EGrid, EDt2Grid, technicalGrid, sysBoundaries, subcycleDt, RK_ORDER2_STEP2); + // We need to calculate derivatives of the moments at every substep, but they only // need to be communicated in the first one. calculateDerivativesSimple(perBGrid, perBDt2Grid, momentsGrid, momentsDt2Grid, dPerBGrid, dMomentsGrid, technicalGrid, sysBoundaries, RK_ORDER2_STEP2, (subcycleCount==0)); From b83b4c85b7e47f936d92459b7d7620cf4d664572 Mon Sep 17 00:00:00 2001 From: ykempf Date: Wed, 29 May 2019 11:39:34 +0300 Subject: [PATCH 493/602] Fixed declarations in DROs. Clean-up. --- datareduction/datareducer.cpp | 23 +---------------------- datareduction/datareducer.h | 1 - datareduction/datareductionoperator.cpp | 3 +-- datareduction/datareductionoperator.h | 2 -- 4 files changed, 2 insertions(+), 27 deletions(-) diff --git a/datareduction/datareducer.cpp b/datareduction/datareducer.cpp index a8d76f895..f3c35c02e 100644 --- a/datareduction/datareducer.cpp +++ b/datareduction/datareducer.cpp @@ -39,7 +39,6 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti if(*it == "fg_B" || *it == "B") { // Bulk magnetic field at Yee-Lattice locations outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fg_B",[]( FsGrid< std::array, 2>& perBGrid, - FsGrid< std::array, 2>& perBDt2Grid, FsGrid< std::array, 2>& EGrid, FsGrid< std::array, 2>& EHallGrid, FsGrid< std::array, 2>& EGradPeGrid, @@ -74,7 +73,6 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti if(*it == "fg_BackgroundB" || *it == "BackgroundB") { // Static (typically dipole) magnetic field part outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fg_background_B",[]( FsGrid< std::array, 2>& perBGrid, - FsGrid< std::array, 2>& perBDt2Grid, FsGrid< std::array, 2>& EGrid, FsGrid< std::array, 2>& EHallGrid, FsGrid< std::array, 2>& EGradPeGrid, @@ -106,7 +104,6 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti if(*it == "fg_PerturbedB" || *it == "PerturbedB") { // Fluctuating magnetic field part outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fg_perturbed_B",[]( FsGrid< std::array, 2>& perBGrid, - FsGrid< std::array, 2>& perBDt2Grid, FsGrid< std::array, 2>& EGrid, FsGrid< std::array, 2>& EHallGrid, FsGrid< std::array, 2>& EGradPeGrid, @@ -138,7 +135,6 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti if(*it == "fg_E" || *it== "E") { // Bulk electric field at Yee-lattice locations outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fg_E",[]( FsGrid< std::array, 2>& perBGrid, - FsGrid< std::array, 2>& perBDt2Grid, FsGrid< std::array, 2>& EGrid, FsGrid< std::array, 2>& EHallGrid, FsGrid< std::array, 2>& EGradPeGrid, @@ -174,7 +170,6 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti if(*it == "fg_Rhom") { // Overall mass density (summed over all populations) outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fg_rhom",[]( FsGrid< std::array, 2>& perBGrid, - FsGrid< std::array, 2>& perBDt2Grid, FsGrid< std::array, 2>& EGrid, FsGrid< std::array, 2>& EHallGrid, FsGrid< std::array, 2>& EGradPeGrid, @@ -208,7 +203,6 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti if(*it == "fg_Rhoq") { // Overall charge density (summed over all populations) outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fg_rhoq",[]( FsGrid< std::array, 2>& perBGrid, - FsGrid< std::array, 2>& perBDt2Grid, FsGrid< std::array, 2>& EGrid, FsGrid< std::array, 2>& EHallGrid, FsGrid< std::array, 2>& EGradPeGrid, @@ -251,7 +245,6 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti if(*it == "fg_V") { // Overall effective bulk density defining the center-of-mass frame from all populations outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fg_V",[]( FsGrid< std::array, 2>& perBGrid, - FsGrid< std::array, 2>& perBDt2Grid, FsGrid< std::array, 2>& EGrid, FsGrid< std::array, 2>& EHallGrid, FsGrid< std::array, 2>& EGradPeGrid, @@ -373,7 +366,6 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti // Maximum timestep constraint as calculated by the fieldsolver outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("MaxFieldsdt",[]( FsGrid< std::array, 2>& perBGrid, - FsGrid< std::array, 2>& perBDt2Grid, FsGrid< std::array, 2>& EGrid, FsGrid< std::array, 2>& EHallGrid, FsGrid< std::array, 2>& EGradPeGrid, @@ -409,7 +401,6 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti // Map of spatial decomposition of the FsGrid into MPI ranks outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("FsGridRank",[]( FsGrid< std::array, 2>& perBGrid, - FsGrid< std::array, 2>& perBDt2Grid, FsGrid< std::array, 2>& EGrid, FsGrid< std::array, 2>& EHallGrid, FsGrid< std::array, 2>& EGradPeGrid, @@ -436,7 +427,6 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti // Type of boundarycells as stored in FSGrid outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("FsGridBoundaryType",[]( FsGrid< std::array, 2>& perBGrid, - FsGrid< std::array, 2>& perBDt2Grid, FsGrid< std::array, 2>& EGrid, FsGrid< std::array, 2>& EHallGrid, FsGrid< std::array, 2>& EGradPeGrid, @@ -472,7 +462,6 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti // Type of boundarycells as stored in FSGrid outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("FsGridBoundaryLayer",[]( FsGrid< std::array, 2>& perBGrid, - FsGrid< std::array, 2>& perBDt2Grid, FsGrid< std::array, 2>& EGrid, FsGrid< std::array, 2>& EHallGrid, FsGrid< std::array, 2>& EGradPeGrid, @@ -525,7 +514,6 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti std::string reducer_name = "fg_HallE" + std::to_string(index); outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid(reducer_name,[index]( FsGrid< std::array, 2>& perBGrid, - FsGrid< std::array, 2>& perBDt2Grid, FsGrid< std::array, 2>& EGrid, FsGrid< std::array, 2>& EHallGrid, FsGrid< std::array, 2>& EGradPeGrid, @@ -566,7 +554,6 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti if(*it == "fg_VolB") { // Static (typically dipole) magnetic field part outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fg_volB",[]( FsGrid< std::array, 2>& perBGrid, - FsGrid< std::array, 2>& perBDt2Grid, FsGrid< std::array, 2>& EGrid, FsGrid< std::array, 2>& EHallGrid, FsGrid< std::array, 2>& EGradPeGrid, @@ -615,7 +602,6 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti // Overall scalar pressure from all populations outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fg_Pressure",[]( FsGrid< std::array, 2>& perBGrid, - FsGrid< std::array, 2>& perBDt2Grid, FsGrid< std::array, 2>& EGrid, FsGrid< std::array, 2>& EHallGrid, FsGrid< std::array, 2>& EGradPeGrid, @@ -674,7 +660,6 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti if(*it == "fg_GridCoordinates") { outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fg_X",[]( FsGrid< std::array, 2>& perBGrid, - FsGrid< std::array, 2>& perBDt2Grid, FsGrid< std::array, 2>& EGrid, FsGrid< std::array, 2>& EHallGrid, FsGrid< std::array, 2>& EGradPeGrid, @@ -701,7 +686,6 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti )); outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fg_Y",[]( FsGrid< std::array, 2>& perBGrid, - FsGrid< std::array, 2>& perBDt2Grid, FsGrid< std::array, 2>& EGrid, FsGrid< std::array, 2>& EHallGrid, FsGrid< std::array, 2>& EGradPeGrid, @@ -728,7 +712,6 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti )); outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fg_Z",[]( FsGrid< std::array, 2>& perBGrid, - FsGrid< std::array, 2>& perBDt2Grid, FsGrid< std::array, 2>& EGrid, FsGrid< std::array, 2>& EHallGrid, FsGrid< std::array, 2>& EGradPeGrid, @@ -755,7 +738,6 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti )); outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fg_DX",[]( FsGrid< std::array, 2>& perBGrid, - FsGrid< std::array, 2>& perBDt2Grid, FsGrid< std::array, 2>& EGrid, FsGrid< std::array, 2>& EHallGrid, FsGrid< std::array, 2>& EGradPeGrid, @@ -773,7 +755,6 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti )); outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fg_DY",[]( FsGrid< std::array, 2>& perBGrid, - FsGrid< std::array, 2>& perBDt2Grid, FsGrid< std::array, 2>& EGrid, FsGrid< std::array, 2>& EHallGrid, FsGrid< std::array, 2>& EGradPeGrid, @@ -791,7 +772,6 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti )); outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fg_DZ",[]( FsGrid< std::array, 2>& perBGrid, - FsGrid< std::array, 2>& perBDt2Grid, FsGrid< std::array, 2>& EGrid, FsGrid< std::array, 2>& EHallGrid, FsGrid< std::array, 2>& EGradPeGrid, @@ -1045,7 +1025,6 @@ bool DataReducer::writeParameters(const unsigned int& operatorID, vlsv::Writer& */ bool DataReducer::writeFsGridData( FsGrid< std::array, 2>& perBGrid, - FsGrid< std::array, 2>& perBDt2Grid, FsGrid< std::array, 2>& EGrid, FsGrid< std::array, 2>& EHallGrid, FsGrid< std::array, 2>& EGradPeGrid, @@ -1061,6 +1040,6 @@ bool DataReducer::writeFsGridData( if(!DROf) { return false; } else { - return DROf->writeFsGridData(perBGrid, perBDt2Grid, EGrid, EHallGrid, EGradPeGrid, momentsGrid, dPerBGrid, dMomentsGrid, BgBGrid, volGrid, technicalGrid, meshName, vlsvWriter); + return DROf->writeFsGridData(perBGrid, EGrid, EHallGrid, EGradPeGrid, momentsGrid, dPerBGrid, dMomentsGrid, BgBGrid, volGrid, technicalGrid, meshName, vlsvWriter); } } diff --git a/datareduction/datareducer.h b/datareduction/datareducer.h index 8cf5fabcf..ac37d9546 100644 --- a/datareduction/datareducer.h +++ b/datareduction/datareducer.h @@ -57,7 +57,6 @@ class DataReducer { bool writeParameters(const unsigned int& operatorID, vlsv::Writer& vlsvWriter); bool writeFsGridData( FsGrid< std::array, 2>& perBGrid, - FsGrid< std::array, 2>& perBDt2Grid, FsGrid< std::array, 2>& EGrid, FsGrid< std::array, 2>& EHallGrid, FsGrid< std::array, 2>& EGradPeGrid, diff --git a/datareduction/datareductionoperator.cpp b/datareduction/datareductionoperator.cpp index fa89b919f..3cbb6e5d2 100644 --- a/datareduction/datareductionoperator.cpp +++ b/datareduction/datareductionoperator.cpp @@ -133,7 +133,6 @@ namespace DRO { bool DataReductionOperatorFsGrid::writeFsGridData( FsGrid< std::array, 2>& perBGrid, - FsGrid< std::array, 2>& perBDt2Grid, FsGrid< std::array, 2>& EGrid, FsGrid< std::array, 2>& EHallGrid, FsGrid< std::array, 2>& EGradPeGrid, @@ -149,7 +148,7 @@ namespace DRO { attribs["name"]=variableName; std::vector varBuffer = - lambda(perBGrid,perBDt2Grid,EGrid,EHallGrid,EGradPeGrid,momentsGrid,dPerBGrid,dMomentsGrid,BgBGrid,volGrid,technicalGrid); + lambda(perBGrid,EGrid,EHallGrid,EGradPeGrid,momentsGrid,dPerBGrid,dMomentsGrid,BgBGrid,volGrid,technicalGrid); std::array& gridSize = technicalGrid.getLocalSize(); int vectorSize = varBuffer.size() / (gridSize[0]*gridSize[1]*gridSize[2]); diff --git a/datareduction/datareductionoperator.h b/datareduction/datareductionoperator.h index cdd80aeca..67b54b184 100644 --- a/datareduction/datareductionoperator.h +++ b/datareduction/datareductionoperator.h @@ -85,7 +85,6 @@ namespace DRO { public: typedef std::function( FsGrid< std::array, 2>& perBGrid, - FsGrid< std::array, 2>& perBDt2Grid, FsGrid< std::array, 2>& EGrid, FsGrid< std::array, 2>& EHallGrid, FsGrid< std::array, 2>& EGradPeGrid, @@ -108,7 +107,6 @@ namespace DRO { virtual bool reduceDiagnostic(const SpatialCell* cell,Real * result); virtual bool writeFsGridData( FsGrid< std::array, 2>& perBGrid, - FsGrid< std::array, 2>& perBDt2Grid, FsGrid< std::array, 2>& EGrid, FsGrid< std::array, 2>& EHallGrid, FsGrid< std::array, 2>& EGradPeGrid, From cddbf5c1ec0721ab094e7440f05cfb9509d05e1d Mon Sep 17 00:00:00 2001 From: ykempf Date: Wed, 29 May 2019 11:52:06 +0300 Subject: [PATCH 494/602] Compilation fixing after merge. --- grid.cpp | 1 - grid.h | 1 - vlasiator.cpp | 2 -- 3 files changed, 4 deletions(-) diff --git a/grid.cpp b/grid.cpp index f6c80bc6d..ce55a6a4e 100644 --- a/grid.cpp +++ b/grid.cpp @@ -87,7 +87,6 @@ void initializeGrids( char **argc, dccrg::Dccrg& mpiGrid, FsGrid< std::array, 2> & perBGrid, - FsGrid< std::array, 2>& EGrid, FsGrid< std::array, 2>& BgBGrid, FsGrid< std::array, 2> & momentsGrid, FsGrid< std::array, 2> & momentsDt2Grid, diff --git a/grid.h b/grid.h index 27f8bd467..cd931ab14 100644 --- a/grid.h +++ b/grid.h @@ -37,7 +37,6 @@ void initializeGrids( char **argc, dccrg::Dccrg& mpiGrid, FsGrid< std::array, 2> & perBGrid, - FsGrid< std::array, 2>& EGrid, FsGrid< std::array, 2>& BgBGrid, FsGrid< std::array, 2> & momentsGrid, FsGrid< std::array, 2> & momentsDt2Grid, diff --git a/vlasiator.cpp b/vlasiator.cpp index 3c139459e..9166c3909 100644 --- a/vlasiator.cpp +++ b/vlasiator.cpp @@ -444,8 +444,6 @@ int main(int argn,char* args[]) { args, mpiGrid, perBGrid, - perBDt2Grid, - dPerBGrid, BgBGrid, momentsGrid, momentsDt2Grid, From 133bb5fa91cdb0c066313ef174a8fed694c9bac0 Mon Sep 17 00:00:00 2001 From: ykempf Date: Mon, 3 Jun 2019 09:00:06 +0300 Subject: [PATCH 495/602] Moved setFaceNeighborRanks behind an if P::amrMaxSpatialRefLevel > 0. --- grid.cpp | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/grid.cpp b/grid.cpp index d328fd31c..24bb83ce1 100644 --- a/grid.cpp +++ b/grid.cpp @@ -538,13 +538,17 @@ void balanceLoad(dccrg::Dccrg& mpiGrid, S exit(1); } } - + + phiprof::stop("Init solvers"); + // Record ranks of face neighbors - phiprof::start("set face neighbor ranks"); - setFaceNeighborRanks( mpiGrid ); - phiprof::stop("set face neighbor ranks"); + if(P::amrMaxSpatialRefLevel > 0) { + phiprof::start("set face neighbor ranks"); + setFaceNeighborRanks( mpiGrid ); + phiprof::stop("set face neighbor ranks"); + } + - phiprof::stop("Init solvers"); phiprof::stop("Balancing load"); } From 7164fddd8026cde25697e94f2832284d34a01db2 Mon Sep 17 00:00:00 2001 From: ykempf Date: Mon, 3 Jun 2019 09:00:41 +0300 Subject: [PATCH 496/602] Bugfix in Outflow. --- sysboundary/outflow.cpp | 3 +++ 1 file changed, 3 insertions(+) diff --git a/sysboundary/outflow.cpp b/sysboundary/outflow.cpp index 628aeb925..4eadab5ea 100644 --- a/sysboundary/outflow.cpp +++ b/sysboundary/outflow.cpp @@ -324,6 +324,9 @@ namespace SBC { cell->parameters[CellParams::VX_DT2] = cell->parameters[CellParams::VX]; cell->parameters[CellParams::VY_DT2] = cell->parameters[CellParams::VY]; cell->parameters[CellParams::VZ_DT2] = cell->parameters[CellParams::VZ]; + cell->parameters[CellParams::P_11_DT2] = cell->parameters[CellParams::P_11]; + cell->parameters[CellParams::P_22_DT2] = cell->parameters[CellParams::P_22]; + cell->parameters[CellParams::P_33_DT2] = cell->parameters[CellParams::P_33]; } } return true; From 526ea767e13d4db4e5c22c7f24092baa1d6abb11 Mon Sep 17 00:00:00 2001 From: ykempf Date: Mon, 3 Jun 2019 09:01:44 +0300 Subject: [PATCH 497/602] Removed unused declaration. --- vlasovsolver/cpu_trans_map_amr.hpp | 6 ------ 1 file changed, 6 deletions(-) diff --git a/vlasovsolver/cpu_trans_map_amr.hpp b/vlasovsolver/cpu_trans_map_amr.hpp index 6b73522a5..11d17ce00 100644 --- a/vlasovsolver/cpu_trans_map_amr.hpp +++ b/vlasovsolver/cpu_trans_map_amr.hpp @@ -194,12 +194,6 @@ setOfPencils buildPencilsWithNeighbors( const dccrg::Dccrg ids, uint dimension, std::vector path); -void get_seed_ids(const dccrg::Dccrg& mpiGrid, - const std::vector &localPropagatedCells, - const uint dimension, - std::vector &seedIds); - - bool trans_map_1d_amr(const dccrg::Dccrg& mpiGrid, const std::vector& localPropagatedCells, const std::vector& remoteTargetCells, From a90b90db59e1b269ab132b439e451197de0668ff Mon Sep 17 00:00:00 2001 From: Tuomas Koskela Date: Wed, 5 Jun 2019 11:27:06 +0300 Subject: [PATCH 498/602] Fix for a bug in the source cell computation. The ghost cells on the negative side of the pencil were computed in the wrong order (furthest to nearest), which caused a problem in cases where some of the ghost cells could not be found (ie. near boundary cells). This fix changes the order of iteration so that the nearest ghost cell is added to the source list first. --- vlasovsolver/cpu_trans_map_amr.cpp | 48 +++++++++++++++++++++--------- 1 file changed, 34 insertions(+), 14 deletions(-) diff --git a/vlasovsolver/cpu_trans_map_amr.cpp b/vlasovsolver/cpu_trans_map_amr.cpp index 04dbad0b1..bbd625bf7 100644 --- a/vlasovsolver/cpu_trans_map_amr.cpp +++ b/vlasovsolver/cpu_trans_map_amr.cpp @@ -103,7 +103,6 @@ void computeSpatialSourceCellsForPencil(const dccrg::Dccrg distances; @@ -113,11 +112,13 @@ void computeSpatialSourceCellsForPencil(const dccrg::Dccrg neighbors; for (const auto nbrPair : *frontNbrPairs) { @@ -128,9 +129,9 @@ void computeSpatialSourceCellsForPencil(const dccrg::Dccrg neighbors; for (const auto nbrPair : *backNbrPairs) { @@ -507,7 +508,8 @@ setOfPencils buildPencilsWithNeighbors( const dccrg::Dccrg &vmesh, const uint lengthOfPencil) { + const vmesh::VelocityMesh &vmesh, const uint lengthOfPencil, + const bool debug) { // Get velocity data from vmesh that we need later to calculate the translation velocity_block_indices_t block_indices; @@ -531,7 +533,7 @@ void propagatePencil(Vec* dz, Vec* values, const uint dimension, } // Go from 0 to length here to propagate all the cells in the pencil - for (uint i = 0; i < lengthOfPencil; i++){ + for (uint i = 0; i < lengthOfPencil; i++){ // The source array is padded by VLASOV_STENCIL_WIDTH on both sides. uint i_source = i + VLASOV_STENCIL_WIDTH; @@ -541,6 +543,12 @@ void propagatePencil(Vec* dz, Vec* values, const uint dimension, const Realv cell_vz = (block_indices[dimension] * WID + k + 0.5) * dvz + vz_min; //cell centered velocity const Vec z_translation = cell_vz * dt / dz[i_source]; // how much it moved in time dt (reduced units) + if(debug) { + + cout << "i = " << i << ", k = " << k << ", cell_vz = " << cell_vz << endl; + + } + // Determine direction of translation // part of density goes here (cell index change along spatial direcion) Vecb positiveTranslationDirection = (z_translation > Vec(0.0)); @@ -944,7 +952,7 @@ bool trans_map_1d_amr(const dccrg::Dccrg& const Realv dt, const uint popID) { - const bool printPencils = false; + const bool printPencils = true; const bool printTargets = false; Realv dvz,vz_min; uint cell_indices_to_id[3]; /*< used when computing id of target cell in block*/ @@ -1120,6 +1128,8 @@ bool trans_map_1d_amr(const dccrg::Dccrg& computeSpatialSourceCellsForPencil(mpiGrid, pencils, pencili, dimension, sourceCells.data()); + cout << "Rank " << myRank << ", Source cells for pencil " << pencili << ": "; + // dz is the cell size in the direction of the pencil std::vector> dz(sourceLength); for(uint i = 0; i < sourceCells.size(); ++i) { @@ -1133,9 +1143,17 @@ bool trans_map_1d_amr(const dccrg::Dccrg& case(2): dz[i] = sourceCells[i]->SpatialCell::parameters[CellParams::DZ]; break; - } + } + + if(sourceCells[i]) { + cout << sourceCells[i]->SpatialCell::parameters[CellParams::CELLID] << " "; + } else { + cout << "NULL "; + } } + cout << endl; + // Allocate source data: sourcedata> sourceVecData(sourceLength * WID3 / VECL); @@ -1144,9 +1162,11 @@ bool trans_map_1d_amr(const dccrg::Dccrg& copy_trans_block_data_amr(sourceCells.data(), blockGID, L, sourceVecData.data(), cellid_transpose, popID); + const bool debug = false; + // Dz and sourceVecData are both padded by VLASOV_STENCIL_WIDTH // Dz has 1 value/cell, sourceVecData has WID3 values/cell - propagatePencil(dz.data(), sourceVecData.data(), dimension, blockGID, dt, vmesh, L); + propagatePencil(dz.data(), sourceVecData.data(), dimension, blockGID, dt, vmesh, L, debug); // sourceVecData => targetBlockData[this pencil]) @@ -1579,7 +1599,7 @@ void update_remote_mapping_contribution_amr( } } - for (auto p : receiveBuffers) { + for (auto p : receiveBuffers) { aligned_free(p); } for (auto p : sendBuffers) { From 6be664652a912080c49ef9d7cd55577accc39595 Mon Sep 17 00:00:00 2001 From: Tuomas Koskela Date: Wed, 5 Jun 2019 12:14:26 +0300 Subject: [PATCH 499/602] Making sure that the std::advance functions in computeSpatialSourceCellsForPencils don't advance past the end of the container. --- vlasovsolver/cpu_trans_map_amr.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/vlasovsolver/cpu_trans_map_amr.cpp b/vlasovsolver/cpu_trans_map_amr.cpp index bbd625bf7..f6f4cc774 100644 --- a/vlasovsolver/cpu_trans_map_amr.cpp +++ b/vlasovsolver/cpu_trans_map_amr.cpp @@ -117,7 +117,7 @@ void computeSpatialSourceCellsForPencil(const dccrg::Dccrg neighbors; @@ -147,7 +147,7 @@ void computeSpatialSourceCellsForPencil(const dccrg::Dccrg= 0 ;i--){ if(sourceCells[i] == NULL) From ce3331e17fc36017365ec7e102f486186150cf68 Mon Sep 17 00:00:00 2001 From: ykempf Date: Wed, 5 Jun 2019 12:16:57 +0300 Subject: [PATCH 500/602] Committing to be able to merge the next fix. This is ugly. --- MAKE/Makefile.sisu_gcc | 2 +- Makefile | 2 +- spatial_cell.cpp | 2 +- vlasovsolver/cpu_trans_map_amr.cpp | 22 ++++++---------------- vlasovsolver/vlasovmover.cpp | 6 +++--- 5 files changed, 12 insertions(+), 22 deletions(-) diff --git a/MAKE/Makefile.sisu_gcc b/MAKE/Makefile.sisu_gcc index bbfc2c8c5..e4cf2542d 100644 --- a/MAKE/Makefile.sisu_gcc +++ b/MAKE/Makefile.sisu_gcc @@ -47,7 +47,7 @@ FLAGS = CC_BRAND = gcc CC_BRAND_VERSION = 6.2.0 CXXFLAGS += -O3 -fopenmp -funroll-loops -std=c++11 -W -Wall -Wno-unused -fabi-version=0 -mavx2 -testpackage: CXXFLAGS = -O2 -fopenmp -funroll-loops -std=c++0x -fabi-version=0 -mavx +testpackage: CXXFLAGS = -O2 -g -fopenmp -funroll-loops -std=c++0x -fabi-version=0 -mavx MATHFLAGS = -ffast-math LDFLAGS = diff --git a/Makefile b/Makefile index 44a55cf35..68271608d 100644 --- a/Makefile +++ b/Makefile @@ -26,7 +26,7 @@ AR ?= ar FIELDSOLVER ?= londrillo_delzanna #Add -DFS_1ST_ORDER_SPACE or -DFS_1ST_ORDER_TIME to make the field solver first-order in space or time # COMPFLAGS += -DFS_1ST_ORDER_SPACE -# COMPFLAGS += -DFS_1ST_ORDER_TIME +COMPFLAGS += -DFS_1ST_ORDER_TIME diff --git a/spatial_cell.cpp b/spatial_cell.cpp index 758d4afdc..018035329 100644 --- a/spatial_cell.cpp +++ b/spatial_cell.cpp @@ -650,7 +650,7 @@ namespace spatial_cell { if ( P::amrMaxSpatialRefLevel == 0 || receiving || ranks.find(receiver_rank) != ranks.end()) { for ( int i = 0; i < MAX_NEIGHBORS_PER_DIM; ++i) { - displacements.push_back((uint8_t*) this->neighbor_block_data[i] - (uint8_t*) this); + displacements.push_back((uint8_t*) this->neighbor_block_data[i] - (uint8_t*) this); block_lengths.push_back(sizeof(Realf) * VELOCITY_BLOCK_LENGTH * this->neighbor_number_of_blocks[i]); } diff --git a/vlasovsolver/cpu_trans_map_amr.cpp b/vlasovsolver/cpu_trans_map_amr.cpp index bbd625bf7..e1abbd85e 100644 --- a/vlasovsolver/cpu_trans_map_amr.cpp +++ b/vlasovsolver/cpu_trans_map_amr.cpp @@ -903,12 +903,13 @@ void printPencilsFunc(const setOfPencils& pencils, const uint dimension, const i std::cout << "I am rank " << myRank << ", I have " << pencils.N << " pencils along dimension " << dimension << ":\n"; MPI_Barrier(MPI_COMM_WORLD); if(myRank == MASTER_RANK) { - std::cout << "N, mpirank, (x, y): indices {path} " << std::endl; + std::cout << "t, N, mpirank, (x, y): indices {path} " << std::endl; std::cout << "-----------------------------------------------------------------" << std::endl; } MPI_Barrier(MPI_COMM_WORLD); for (uint i = 0; i < pencils.N; i++) { iend += pencils.lengthOfPencils[i]; + std::cout << P::t << ", "; std::cout << i << ", "; std::cout << myRank << ", "; std::cout << "(" << pencils.x[i] << ", " << pencils.y[i] << "): "; @@ -953,7 +954,6 @@ bool trans_map_1d_amr(const dccrg::Dccrg& const uint popID) { const bool printPencils = true; - const bool printTargets = false; Realv dvz,vz_min; uint cell_indices_to_id[3]; /*< used when computing id of target cell in block*/ unsigned char cellid_transpose[WID3]; /*< defines the transpose for the solver internal (transposed) id: i + j*WID + k*WID2 to actual one*/ @@ -965,7 +965,7 @@ bool trans_map_1d_amr(const dccrg::Dccrg& } int myRank; - if(printTargets || printPencils) MPI_Comm_rank(MPI_COMM_WORLD,&myRank); + if(printPencils) MPI_Comm_rank(MPI_COMM_WORLD,&myRank); // Vector with all cell ids vector allCells(localPropagatedCells); @@ -1107,7 +1107,7 @@ bool trans_map_1d_amr(const dccrg::Dccrg& phiprof::start(t1); std::vector targetBlockData((pencils.sumOfLengths + 2 * pencils.N) * WID3); - + // Compute spatial neighbors for target cells. // For targets we need the local cells, plus a padding of 1 cell at both ends std::vector targetCells(pencils.sumOfLengths + pencils.N * 2 ); @@ -1127,9 +1127,7 @@ bool trans_map_1d_amr(const dccrg::Dccrg& std::vector sourceCells(sourceLength); computeSpatialSourceCellsForPencil(mpiGrid, pencils, pencili, dimension, sourceCells.data()); - - cout << "Rank " << myRank << ", Source cells for pencil " << pencili << ": "; - + // dz is the cell size in the direction of the pencil std::vector> dz(sourceLength); for(uint i = 0; i < sourceCells.size(); ++i) { @@ -1144,16 +1142,8 @@ bool trans_map_1d_amr(const dccrg::Dccrg& dz[i] = sourceCells[i]->SpatialCell::parameters[CellParams::DZ]; break; } - - if(sourceCells[i]) { - cout << sourceCells[i]->SpatialCell::parameters[CellParams::CELLID] << " "; - } else { - cout << "NULL "; - } } - - cout << endl; - + // Allocate source data: sourcedata> sourceVecData(sourceLength * WID3 / VECL); diff --git a/vlasovsolver/vlasovmover.cpp b/vlasovsolver/vlasovmover.cpp index c17cc3184..71dbf0054 100644 --- a/vlasovsolver/vlasovmover.cpp +++ b/vlasovsolver/vlasovmover.cpp @@ -81,7 +81,7 @@ void calculateSpatialTranslation( MPI_Comm_rank(MPI_COMM_WORLD,&myRank); // ------------- SLICE - map dist function in Z --------------- // - if(P::zcells_ini > 1){ + if(P::zcells_ini > 1 && true){ trans_timer=phiprof::initializeTimer("transfer-stencil-data-z","MPI"); phiprof::start(trans_timer); SpatialCell::set_mpi_transfer_type(Transfer::VEL_BLOCK_DATA); @@ -110,7 +110,7 @@ void calculateSpatialTranslation( } // ------------- SLICE - map dist function in X --------------- // - if(P::xcells_ini > 1){ + if(P::xcells_ini > 1 && true){ trans_timer=phiprof::initializeTimer("transfer-stencil-data-x","MPI"); phiprof::start(trans_timer); @@ -142,7 +142,7 @@ void calculateSpatialTranslation( } // ------------- SLICE - map dist function in Y --------------- // - if(P::ycells_ini > 1) { + if(P::ycells_ini > 1 && true) { trans_timer=phiprof::initializeTimer("transfer-stencil-data-y","MPI"); phiprof::start(trans_timer); From 7fb6c8a12cf0d4264e2cd620fe09ee557ce15916 Mon Sep 17 00:00:00 2001 From: Tuomas Koskela Date: Wed, 5 Jun 2019 12:37:43 +0300 Subject: [PATCH 501/602] Removed debugging IO and unused flags. --- vlasovsolver/cpu_trans_map_amr.cpp | 30 +++++------------------------- vlasovsolver/cpu_trans_map_amr.hpp | 2 +- 2 files changed, 6 insertions(+), 26 deletions(-) diff --git a/vlasovsolver/cpu_trans_map_amr.cpp b/vlasovsolver/cpu_trans_map_amr.cpp index f6f4cc774..ff0cac887 100644 --- a/vlasovsolver/cpu_trans_map_amr.cpp +++ b/vlasovsolver/cpu_trans_map_amr.cpp @@ -508,8 +508,7 @@ setOfPencils buildPencilsWithNeighbors( const dccrg::Dccrg &vmesh, const uint lengthOfPencil, - const bool debug) { + const vmesh::VelocityMesh &vmesh, const uint lengthOfPencil) { // Get velocity data from vmesh that we need later to calculate the translation velocity_block_indices_t block_indices; @@ -543,12 +542,6 @@ void propagatePencil(Vec* dz, Vec* values, const uint dimension, const Realv cell_vz = (block_indices[dimension] * WID + k + 0.5) * dvz + vz_min; //cell centered velocity const Vec z_translation = cell_vz * dt / dz[i_source]; // how much it moved in time dt (reduced units) - if(debug) { - - cout << "i = " << i << ", k = " << k << ", cell_vz = " << cell_vz << endl; - - } - // Determine direction of translation // part of density goes here (cell index change along spatial direcion) Vecb positiveTranslationDirection = (z_translation > Vec(0.0)); @@ -875,7 +868,7 @@ bool checkPencils(const dccrg::Dccrg& mpi int myCount = std::count(pencils.ids.begin(), pencils.ids.end(), id); - if( myCount == 0 ) { + if( myCount == 0) { std::cerr << "ERROR: Cell ID " << id << " Appears in pencils " << myCount << " times!"<< std::endl; correct = false; @@ -952,8 +945,7 @@ bool trans_map_1d_amr(const dccrg::Dccrg& const Realv dt, const uint popID) { - const bool printPencils = true; - const bool printTargets = false; + const bool printPencils = false; Realv dvz,vz_min; uint cell_indices_to_id[3]; /*< used when computing id of target cell in block*/ unsigned char cellid_transpose[WID3]; /*< defines the transpose for the solver internal (transposed) id: i + j*WID + k*WID2 to actual one*/ @@ -965,7 +957,7 @@ bool trans_map_1d_amr(const dccrg::Dccrg& } int myRank; - if(printTargets || printPencils) MPI_Comm_rank(MPI_COMM_WORLD,&myRank); + if(printPencils) MPI_Comm_rank(MPI_COMM_WORLD,&myRank); // Vector with all cell ids vector allCells(localPropagatedCells); @@ -1128,8 +1120,6 @@ bool trans_map_1d_amr(const dccrg::Dccrg& computeSpatialSourceCellsForPencil(mpiGrid, pencils, pencili, dimension, sourceCells.data()); - cout << "Rank " << myRank << ", Source cells for pencil " << pencili << ": "; - // dz is the cell size in the direction of the pencil std::vector> dz(sourceLength); for(uint i = 0; i < sourceCells.size(); ++i) { @@ -1144,16 +1134,8 @@ bool trans_map_1d_amr(const dccrg::Dccrg& dz[i] = sourceCells[i]->SpatialCell::parameters[CellParams::DZ]; break; } - - if(sourceCells[i]) { - cout << sourceCells[i]->SpatialCell::parameters[CellParams::CELLID] << " "; - } else { - cout << "NULL "; - } } - cout << endl; - // Allocate source data: sourcedata> sourceVecData(sourceLength * WID3 / VECL); @@ -1162,11 +1144,9 @@ bool trans_map_1d_amr(const dccrg::Dccrg& copy_trans_block_data_amr(sourceCells.data(), blockGID, L, sourceVecData.data(), cellid_transpose, popID); - const bool debug = false; - // Dz and sourceVecData are both padded by VLASOV_STENCIL_WIDTH // Dz has 1 value/cell, sourceVecData has WID3 values/cell - propagatePencil(dz.data(), sourceVecData.data(), dimension, blockGID, dt, vmesh, L, debug); + propagatePencil(dz.data(), sourceVecData.data(), dimension, blockGID, dt, vmesh, L); // sourceVecData => targetBlockData[this pencil]) diff --git a/vlasovsolver/cpu_trans_map_amr.hpp b/vlasovsolver/cpu_trans_map_amr.hpp index 6b73522a5..7be70fa2f 100644 --- a/vlasovsolver/cpu_trans_map_amr.hpp +++ b/vlasovsolver/cpu_trans_map_amr.hpp @@ -178,7 +178,7 @@ CellID selectNeighbor(const dccrg::Dccrg void propagatePencil(Vec* dz, Vec* values, const uint dimension, const uint blockGID, const Realv dt, const vmesh::VelocityMesh &vmesh, - const uint lengthOfPencil, bool debugflag); + const uint lengthOfPencil); void copy_trans_block_data_amr( SpatialCell** source_neighbors, From d9110a44077dc8726a0be3057ee894c605614c15 Mon Sep 17 00:00:00 2001 From: ykempf Date: Wed, 5 Jun 2019 13:03:55 +0300 Subject: [PATCH 502/602] Revert useless change. --- MAKE/Makefile.sisu_gcc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/MAKE/Makefile.sisu_gcc b/MAKE/Makefile.sisu_gcc index e4cf2542d..bbfc2c8c5 100644 --- a/MAKE/Makefile.sisu_gcc +++ b/MAKE/Makefile.sisu_gcc @@ -47,7 +47,7 @@ FLAGS = CC_BRAND = gcc CC_BRAND_VERSION = 6.2.0 CXXFLAGS += -O3 -fopenmp -funroll-loops -std=c++11 -W -Wall -Wno-unused -fabi-version=0 -mavx2 -testpackage: CXXFLAGS = -O2 -g -fopenmp -funroll-loops -std=c++0x -fabi-version=0 -mavx +testpackage: CXXFLAGS = -O2 -fopenmp -funroll-loops -std=c++0x -fabi-version=0 -mavx MATHFLAGS = -ffast-math LDFLAGS = From 8fb07f8b155fa3003bbd58361a039f92913edd23 Mon Sep 17 00:00:00 2001 From: ykempf Date: Wed, 5 Jun 2019 13:04:22 +0300 Subject: [PATCH 503/602] Revert debugging change. --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 68271608d..44a55cf35 100644 --- a/Makefile +++ b/Makefile @@ -26,7 +26,7 @@ AR ?= ar FIELDSOLVER ?= londrillo_delzanna #Add -DFS_1ST_ORDER_SPACE or -DFS_1ST_ORDER_TIME to make the field solver first-order in space or time # COMPFLAGS += -DFS_1ST_ORDER_SPACE -COMPFLAGS += -DFS_1ST_ORDER_TIME +# COMPFLAGS += -DFS_1ST_ORDER_TIME From a440040a538571fe112a384029257e9d3fadf0a7 Mon Sep 17 00:00:00 2001 From: ykempf Date: Wed, 5 Jun 2019 13:07:59 +0300 Subject: [PATCH 504/602] Removed debugging changes. --- vlasovsolver/cpu_trans_map_amr.cpp | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/vlasovsolver/cpu_trans_map_amr.cpp b/vlasovsolver/cpu_trans_map_amr.cpp index 28705071d..5b739ab8e 100644 --- a/vlasovsolver/cpu_trans_map_amr.cpp +++ b/vlasovsolver/cpu_trans_map_amr.cpp @@ -903,13 +903,12 @@ void printPencilsFunc(const setOfPencils& pencils, const uint dimension, const i std::cout << "I am rank " << myRank << ", I have " << pencils.N << " pencils along dimension " << dimension << ":\n"; MPI_Barrier(MPI_COMM_WORLD); if(myRank == MASTER_RANK) { - std::cout << "t, N, mpirank, (x, y): indices {path} " << std::endl; + std::cout << "N, mpirank, (x, y): indices {path} " << std::endl; std::cout << "-----------------------------------------------------------------" << std::endl; } MPI_Barrier(MPI_COMM_WORLD); for (uint i = 0; i < pencils.N; i++) { iend += pencils.lengthOfPencils[i]; - std::cout << P::t << ", "; std::cout << i << ", "; std::cout << myRank << ", "; std::cout << "(" << pencils.x[i] << ", " << pencils.y[i] << "): "; @@ -953,7 +952,7 @@ bool trans_map_1d_amr(const dccrg::Dccrg& const Realv dt, const uint popID) { - const bool printPencils = true; + const bool printPencils = false; Realv dvz,vz_min; uint cell_indices_to_id[3]; /*< used when computing id of target cell in block*/ unsigned char cellid_transpose[WID3]; /*< defines the transpose for the solver internal (transposed) id: i + j*WID + k*WID2 to actual one*/ From e06e7ca1513b40f3e68dc6d918cbcffd6e56d26e Mon Sep 17 00:00:00 2001 From: ykempf Date: Wed, 5 Jun 2019 13:08:53 +0300 Subject: [PATCH 505/602] Removed debugging code. --- vlasovsolver/vlasovmover.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/vlasovsolver/vlasovmover.cpp b/vlasovsolver/vlasovmover.cpp index 71dbf0054..b94958dd1 100644 --- a/vlasovsolver/vlasovmover.cpp +++ b/vlasovsolver/vlasovmover.cpp @@ -81,7 +81,7 @@ void calculateSpatialTranslation( MPI_Comm_rank(MPI_COMM_WORLD,&myRank); // ------------- SLICE - map dist function in Z --------------- // - if(P::zcells_ini > 1 && true){ + if(P::zcells_ini > 1){ trans_timer=phiprof::initializeTimer("transfer-stencil-data-z","MPI"); phiprof::start(trans_timer); SpatialCell::set_mpi_transfer_type(Transfer::VEL_BLOCK_DATA); @@ -110,7 +110,7 @@ void calculateSpatialTranslation( } // ------------- SLICE - map dist function in X --------------- // - if(P::xcells_ini > 1 && true){ + if(P::xcells_ini > 1){ trans_timer=phiprof::initializeTimer("transfer-stencil-data-x","MPI"); phiprof::start(trans_timer); @@ -142,7 +142,7 @@ void calculateSpatialTranslation( } // ------------- SLICE - map dist function in Y --------------- // - if(P::ycells_ini > 1 && true) { + if(P::ycells_ini > 1) { trans_timer=phiprof::initializeTimer("transfer-stencil-data-y","MPI"); phiprof::start(trans_timer); From 7a3f4307bf028d84423f01aecf8769845b1e5960 Mon Sep 17 00:00:00 2001 From: tkoskela Date: Wed, 5 Jun 2019 14:58:17 +0300 Subject: [PATCH 506/602] Added a makefile for the docker container. --- MAKE/Makefile.docker | 88 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 88 insertions(+) create mode 100644 MAKE/Makefile.docker diff --git a/MAKE/Makefile.docker b/MAKE/Makefile.docker new file mode 100644 index 000000000..227109004 --- /dev/null +++ b/MAKE/Makefile.docker @@ -0,0 +1,88 @@ +# -*- mode: makefile -*- +CMP = mpic++ +LNK = mpic++ + +#======== Vectorization ========== +#Set vector backend type for vlasov solvers, sets precision and length. +#NOTE this has to have the same precision as the distribution function define (DISTRIBUTION_FP_PRECISION) +#Options: +# AVX: VEC4D_AGNER, VEC4F_AGNER, VEC8F_AGNER +# AVX512: VEC8D_AGNER, VEC16F_AGNER +# Fallback: VEC4D_FALLBACK, VEC4F_FALLBACK, VEC8F_FALLBACK + +ifeq ($(DISTRIBUTION_FP_PRECISION),SPF) +#Single-precision + VECTORCLASS = VEC8F_AGNER +else +#Double-precision + VECTORCLASS = VEC4D_AGNER +endif + +#======== PAPI ========== +#Add PAPI_MEM define to use papi to report memory consumption? +CXXFLAGS += -DPAPI_MEM + + +#======== Allocator ========= +#Use jemalloc instead of system malloc to reduce memory fragmentation? https://github.com/jemalloc/jemalloc +#Configure jemalloc with --with-jemalloc-prefix=je_ when installing it +CXXFLAGS += -DUSE_JEMALLOC -DJEMALLOC_NO_DEMANGLE + + +#======= Compiler and compilation flags ========= +# NOTES on compiler flags: +# CXXFLAGS is for compiler flags, they are always used +# MATHFLAGS are for special math etc. flags, these are only applied on solver functions +# LDFLAGS flags for linker + +#-DNO_WRITE_AT_ALL: Define to disable write at all to +# avoid memleak (much slower IO) +#-DMPICH_IGNORE_CXX_SEEK: Ignores some multiple definition +# errors that come up when using +# mpi.h in c++ on Cray + +CXXFLAGS += -DMPICH_IGNORE_CXX_SEEK + +FLAGS = + +#GNU flags: +CXXFLAGS += -O3 -fopenmp -funroll-loops -std=c++11 -W -Wall -Wno-unused -fabi-version=0 -mavx2 +testpackage: CXXFLAGS = -O2 -fopenmp -funroll-loops -std=c++0x -fabi-version=0 -mavx + +MATHFLAGS = -ffast-math +LDFLAGS = -g +LIB_MPI = -lgomp + +# BOOST_VERSION = current trilinos version +# ZOLTAN_VERSION = current trilinos verson + +#======== Libraries =========== + +LIBRARY_PREFIX = /home/lib + + +#compiled libraries +INC_BOOST = -I$/usr/include/boost +LIB_BOOST = -L$/usr/lib/x86_64-linux-gnu -lboost_program_options + +INC_ZOLTAN = -I$(LIBRARY_PREFIX)/zoltan/include +LIB_ZOLTAN = -L$(LIBRARY_PREFIX)/zoltan/lib -lzoltan + +INC_JEMALLOC = -I$(LIBRARY_PREFIX)/jemalloc/include +LIB_JEMALLOC = -L$(LIBRARY_PREFIX)/jemalloc/lib -ljemalloc + +INC_VLSV = -I$(LIBRARY_PREFIX)/vlsv +LIB_VLSV = -L$(LIBRARY_PREFIX)/vlsv -lvlsv + +INC_PROFILE = -I$(LIBRARY_PREFIX)/phiprof-2.0-beta/include +LIB_PROFILE = -L$(LIBRARY_PREFIX)/phiprof-2.0-beta/lib -lphiprof + +INC_PAPI = -I$(LIBRARY_PREFIX)/papi/include +LIB_PAPI = -L$(LIBRARY_PREFIX)/papi/lib -lpapi + +#header libraries + +INC_EIGEN = -I/usr/include/eigen3 +INC_DCCRG = -I$(LIBRARY_PREFIX)/dccrg/ +INC_VECTORCLASS = -I$(LIBRARY_PREFIX)/vectorclass +INC_FSGRID = -I$(LIBRARY_PREFIX)/fsgrid From daf489a3a94cb144fc6c61debf49f515a754f425 Mon Sep 17 00:00:00 2001 From: ykempf Date: Wed, 5 Jun 2019 15:21:33 +0300 Subject: [PATCH 507/602] Compute moments for populations at restart, improves cosmetics. --- grid.cpp | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/grid.cpp b/grid.cpp index 24bb83ce1..1b33128b4 100644 --- a/grid.cpp +++ b/grid.cpp @@ -289,6 +289,12 @@ void initializeGrids( phiprof::start("Init moments"); calculateInitialVelocityMoments(mpiGrid); phiprof::stop("Init moments"); + } else { + phiprof::start("Init moments"); + for (size_t i=0; i Date: Wed, 5 Jun 2019 15:22:50 +0300 Subject: [PATCH 508/602] Remove redundant maxrdt computation, cleanup and reset maxrdt before recomputing it. --- vlasiator.cpp | 25 ++++++++++---------- vlasovsolver/cpu_moments.cpp | 44 +++++++----------------------------- vlasovsolver/cpu_moments.h | 2 +- vlasovsolver/vlasovmover.cpp | 23 ++----------------- 4 files changed, 24 insertions(+), 70 deletions(-) diff --git a/vlasiator.cpp b/vlasiator.cpp index 9166c3909..09469ecce 100644 --- a/vlasiator.cpp +++ b/vlasiator.cpp @@ -119,7 +119,10 @@ bool computeNewTimeStep(dccrg::Dccrg& mpi const Real dy = cell->parameters[CellParams::DY]; const Real dz = cell->parameters[CellParams::DZ]; + cell->parameters[CellParams::MAXRDT] = numeric_limits::max(); + for (uint popID=0; popIDset_max_r_dt(popID,numeric_limits::max()); vmesh::VelocityBlockContainer& blockContainer = cell->get_velocity_blocks(popID); const Real* blockParams = blockContainer.getParameters(); const Real EPS = numeric_limits::min()*1000; @@ -161,15 +164,15 @@ bool computeNewTimeStep(dccrg::Dccrg& mpi //compute max dt for fieldsolver const std::array gridDims(technicalGrid.getLocalSize()); for (int k=0; ksysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY || - (cell->sysBoundaryLayer == 1 && cell->sysBoundaryFlag != sysboundarytype::NOT_SYSBOUNDARY )) { - dtMaxLocal[2]=min(dtMaxLocal[2], cell->maxFsDt); - } - } - } + for (int j=0; jsysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY || + (cell->sysBoundaryLayer == 1 && cell->sysBoundaryFlag != sysboundarytype::NOT_SYSBOUNDARY )) { + dtMaxLocal[2]=min(dtMaxLocal[2], cell->maxFsDt); + } + } + } } @@ -557,9 +560,7 @@ int main(int argn,char* args[]) { P::dt=newDt; } phiprof::stop("compute-dt"); - } - - if (!P::isRestart) { + //go forward by dt/2 in V, initializes leapfrog split. In restarts the //the distribution function is already propagated forward in time by dt/2 phiprof::start("propagate-velocity-space-dt/2"); diff --git a/vlasovsolver/cpu_moments.cpp b/vlasovsolver/cpu_moments.cpp index e33d1a0cb..eb5a1ed8f 100644 --- a/vlasovsolver/cpu_moments.cpp +++ b/vlasovsolver/cpu_moments.cpp @@ -142,19 +142,18 @@ void calculateCellMoments(spatial_cell::SpatialCell* cell, } /** Calculate zeroth, first, and (possibly) second bulk velocity moments for the - * given spatial cell. Additionally, for each species, calculate the maximum - * spatial time step so that CFL(spatial)=1. The calculated moments include + * given spatial cell. The calculated moments include * contributions from all existing particle populations. The calculated moments * are stored to SpatialCell::parameters in _R variables. This function is AMR safe. * @param mpiGrid Parallel grid library. * @param cells Vector containing the spatial cells to be calculated. * @param computeSecond If true, second velocity moments are calculated.*/ -void calculateMoments_R_maxdt( +void calculateMoments_R( dccrg::Dccrg& mpiGrid, const std::vector& cells, const bool& computeSecond) { - phiprof::start("compute-moments-n-maxdt"); + phiprof::start("compute-moments-n"); creal HALF = 0.5; for (uint popID=0; popIDparameters[CellParams::DY]; const Real dz = cell->parameters[CellParams::DZ]; - // Reset spatial max DT - if (popID == 0) cell->parameters[CellParams::MAXRDT] = numeric_limits::max(); - cell->set_max_r_dt(popID,numeric_limits::max()); - vmesh::VelocityBlockContainer& blockContainer = cell->get_velocity_blocks(popID); if (blockContainer.size() == 0) continue; const Realf* data = blockContainer.getData(); @@ -209,32 +204,9 @@ void calculateMoments_R_maxdt( // Calculate species' contribution to first velocity moments for (vmesh::LocalID blockLID=0; blockLID::min()*1000; - for (unsigned int i=0; iparameters[CellParams::MAXRDT] = min(dt_max_cell,cell->parameters[CellParams::MAXRDT]); - cell->set_max_r_dt(popID,min(dt_max_cell,cell->get_max_r_dt(popID))); - } - - blockVelocityFirstMoments(data+blockLID*WID3, - blockParams+blockLID*BlockParams::N_VELOCITY_BLOCK_PARAMS, - array); + blockVelocityFirstMoments(data+blockLID*WID3, + blockParams+blockLID*BlockParams::N_VELOCITY_BLOCK_PARAMS, + array); } // for-loop over velocity blocks // Store species' contribution to bulk velocity moments @@ -262,7 +234,7 @@ void calculateMoments_R_maxdt( // Compute second moments only if requested. if (computeSecond == false) { - phiprof::stop("compute-moments-n-maxdt"); + phiprof::stop("compute-moments-n"); return; } @@ -303,7 +275,7 @@ void calculateMoments_R_maxdt( } // for-loop over spatial cells } // for-loop over particle species - phiprof::stop("compute-moments-n-maxdt"); + phiprof::stop("compute-moments-n"); } /** Calculate zeroth, first, and (possibly) second bulk velocity moments for the diff --git a/vlasovsolver/cpu_moments.h b/vlasovsolver/cpu_moments.h index 9c3342ff8..1d3523ecc 100644 --- a/vlasovsolver/cpu_moments.h +++ b/vlasovsolver/cpu_moments.h @@ -45,7 +45,7 @@ void blockVelocitySecondMoments(const Realf* avgs,const Real* blockParams, const REAL v[3], REAL* array); -void calculateMoments_R_maxdt(dccrg::Dccrg& mpiGrid, +void calculateMoments_R(dccrg::Dccrg& mpiGrid, const std::vector& cells, const bool& computeSecond); diff --git a/vlasovsolver/vlasovmover.cpp b/vlasovsolver/vlasovmover.cpp index b94958dd1..664c5ebd7 100644 --- a/vlasovsolver/vlasovmover.cpp +++ b/vlasovsolver/vlasovmover.cpp @@ -204,15 +204,11 @@ void calculateSpatialTranslation( // If dt=0 we are either initializing or distribution functions are not translated. // In both cases go to the end of this function and calculate the moments. if (dt == 0.0) goto momentCalculation; - - // std::cout << "I am at line " << __LINE__ << " of " << __FILE__ << std::endl; phiprof::start("compute_cell_lists"); remoteTargetCellsx = mpiGrid.get_remote_cells_on_process_boundary(VLASOV_SOLVER_TARGET_X_NEIGHBORHOOD_ID); remoteTargetCellsy = mpiGrid.get_remote_cells_on_process_boundary(VLASOV_SOLVER_TARGET_Y_NEIGHBORHOOD_ID); remoteTargetCellsz = mpiGrid.get_remote_cells_on_process_boundary(VLASOV_SOLVER_TARGET_Z_NEIGHBORHOOD_ID); - - // std::cout << "I am at line " << __LINE__ << " of " << __FILE__ << std::endl; // Figure out which spatial cells are translated, // result independent of particle species. @@ -221,8 +217,6 @@ void calculateSpatialTranslation( local_propagated_cells.push_back(localCells[c]); } } - - // std::cout << "I am at line " << __LINE__ << " of " << __FILE__ << std::endl; // Figure out target spatial cells, result // independent of particle species. @@ -232,8 +226,6 @@ void calculateSpatialTranslation( } } phiprof::stop("compute_cell_lists"); - - // std::cout << "I am at line " << __LINE__ << " of " << __FILE__ << std::endl; // Translate all particle species for (uint popID=0; popIDparameters[CellParams::MAXRDT] < minDT) - minDT = mpiGrid[localCells[c]]->parameters[CellParams::MAXRDT]; - } - - // std::cout << "I am at line " << __LINE__ << " of " << __FILE__ << std::endl; - phiprof::stop("semilag-trans"); + phiprof::stop("semilag-trans"); } /* From 30ed9b096f0dfd13b32e4f65ccb4640f657feb13 Mon Sep 17 00:00:00 2001 From: tuomas Koskela Date: Wed, 5 Jun 2019 12:37:29 +0000 Subject: [PATCH 509/602] testpackage script for docker --- testpackage/small_test_docker.sh | 35 ++++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) create mode 100755 testpackage/small_test_docker.sh diff --git a/testpackage/small_test_docker.sh b/testpackage/small_test_docker.sh new file mode 100755 index 000000000..dfc5ec196 --- /dev/null +++ b/testpackage/small_test_docker.sh @@ -0,0 +1,35 @@ + +t=1 #threads per process + +#No idea how many cores we have available on travis. On my laptop I have 4. +cores_per_node=4 +#Change PBS parameters above + the ones here +total_units=$(echo $nodes $cores_per_node $ht | gawk '{print $1*$2*$3}') +units_per_node=$(echo $cores_per_node $ht | gawk '{print $1*$2}') +tasks=$(echo $total_units $t | gawk '{print $1/$2}') +tasks_per_node=$(echo $units_per_node $t | gawk '{print $1/$2}') +export OMP_NUM_THREADS=$t + +umask 007 +# Launch the OpenMP job to the allocated compute node +echo "Running $exec on $tasks mpi tasks, with $t threads per task on $nodes nodes ($ht threads per physical core)" +#command for running stuff +run_command="mpirun -n $tasks --allow-run-as-root" +small_run_command="mpirun -n 1 --allow-run-as-root" +run_command_tools="mpirun -n 1 --allow-run-as-root" + +#If 1, the reference vlsv files are generated +# if 0 then we check the v1 +create_verification_files=0 + +#folder for all reference data +reference_dir="/home/vlasiator/testpackage/" +#compare agains which revision. This can be a proper version string, or "current", which should be a symlink to the +#proper most recent one +reference_revision="current" + +# Define test +source small_test_definitions.sh +wait +# Run tests +source run_tests.sh From eaf64c7dbfcd3828ca140dddab048820bdb18ff6 Mon Sep 17 00:00:00 2001 From: Urs Ganse Date: Wed, 5 Jun 2019 15:54:02 +0300 Subject: [PATCH 510/602] FsGrid restarts from separate number of ranks. (Initial implementation, needs debugging still) --- ioread.cpp | 97 +++++++++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 89 insertions(+), 8 deletions(-) diff --git a/ioread.cpp b/ioread.cpp index 8bc6d0d52..7de4e807b 100644 --- a/ioread.cpp +++ b/ioread.cpp @@ -802,17 +802,20 @@ template bool readFsGridVariable( int size, myRank; MPI_Comm_size(MPI_COMM_WORLD, &size); MPI_Comm_rank(MPI_COMM_WORLD, &myRank); + + std::array& localSize = targetGrid.getLocalSize(); + std::array& localStart = targetGrid.getLocalStart(); + std::array& globalSize = targetGrid.getGlobalSize(); + + // Determine our tasks storage size + size_t storageSize = localSize[0]*localSize[1]*localSize[2]; + if(size == numWritingRanks) { // Easy case: same number of tasks => slurp it in. - std::array& localSize = targetGrid.getLocalSize(); - std::array& globalSize = targetGrid.getGlobalSize(); - + // std::array decomposition; targetGrid.computeDomainDecomposition(globalSize, size, decomposition); - // Determine our tasks storage size - size_t storageSize = localSize[0]*localSize[1]*localSize[2]; - // Determine offset in file by summing up all the previous tasks' sizes. size_t localStartOffset = 0; for(int task = 0; task < myRank; task++) { @@ -843,8 +846,86 @@ template bool readFsGridVariable( } } else { - logFile << "(RESTART) ERROR: Attempting to restart from different number of tasks, this is not supported yet." << endl << write; - return false; + + // More difficult case: different number of tasks. + // In this case, our own fsgrid domain overlaps (potentially many) domains in the file, and needs to be read in stripes. + // + // +------------+----------------+ + // | | | + // | . . . . . . . . . . . . | + // | .<----->|<----------->. | + // | .<----->|<----------->. | + // | .<----->|<----------->. | + // +----+-------+-------------+--| + // | .<----->|<----------->. | + // | .<----->|<----------->. | + // | .<----->|<----------->. | + // | . . . . . . . . . . . . | + // | | | + // +------------+----------------+ + + // Determine the decomposition in the file and the one in RAM for our restart + std::array fileDecomposition; + targetGrid.computeDomainDecomposition(globalSize, numWritingRanks, fileDecomposition); + std::array ramDecomposition; + targetGrid.computeDomainDecomposition(globalSize, size, ramDecomposition); + + // Iterate through tasks and find their overlap with our domain. + size_t fileOffset = 0; + for(int task = 0; task < myRank; task++) { + std::array thatTasksSize; + std::array thatTasksStart; + thatTasksSize[0] = targetGrid.calcLocalSize(globalSize[0], fileDecomposition[0], task/fileDecomposition[2]/fileDecomposition[1]); + thatTasksSize[1] = targetGrid.calcLocalSize(globalSize[1], fileDecomposition[1], (task/fileDecomposition[2])%fileDecomposition[1]); + thatTasksSize[2] = targetGrid.calcLocalSize(globalSize[2], fileDecomposition[2], task%fileDecomposition[2]); + + thatTasksStart[0] = targetGrid.calcLocalStart(globalSize[0], fileDecomposition[0], task/fileDecomposition[2]/fileDecomposition[1]); + thatTasksStart[1] = targetGrid.calcLocalStart(globalSize[1], fileDecomposition[1], (task/fileDecomposition[2])%fileDecomposition[1]); + thatTasksStart[2] = targetGrid.calcLocalStart(globalSize[2], fileDecomposition[2], task%fileDecomposition[2]); + + // AABB intersection test + if(thatTasksStart[0] + thatTasksSize[0] >= localStart[0] && + thatTasksStart[1] + thatTasksSize[1] >= localStart[1] && + thatTasksStart[2] + thatTasksSize[2] >= localStart[2] && + thatTasksStart[0] < localStart[0] + localSize[0] && + thatTasksStart[1] < localStart[1] + localSize[1] && + thatTasksStart[2] < localStart[2] + localSize[2]) { + + // Iterate through overlap area + std::array overlapStart,overlapEnd; + overlapStart[0] = max(localStart[0],thatTasksStart[0]); + overlapStart[1] = max(localStart[1],thatTasksStart[1]); + overlapStart[2] = max(localStart[2],thatTasksStart[2]); + + overlapEnd[0] = min(localStart[0]+localSize[0], thatTasksStart[0]+thatTasksSize[0]); + overlapEnd[1] = min(localStart[1]+localSize[1], thatTasksStart[1]+thatTasksSize[1]); + overlapEnd[2] = min(localStart[2]+localSize[2], thatTasksStart[2]+thatTasksSize[2]); + + // Read continuous stripes in x direction. + int stripeSize = overlapEnd[0]-overlapStart[0]; + for(int z=overlapStart[2]; z buffer(stripeSize*N); + + if(file.readArray("VARIABLE",attribs, fileOffset + index, stripeSize, (char*)buffer.data()) == false) { + logFile << "(RESTART) ERROR: Failed to read fsgrid variable " << variableName << endl << write; + return false; + } + + for(int x=overlapStart[0]; x Date: Wed, 5 Jun 2019 17:25:08 +0300 Subject: [PATCH 511/602] Fix rank number calculation in fsgrid restarts. --- ioread.cpp | 36 +++++++++++++++--------------------- 1 file changed, 15 insertions(+), 21 deletions(-) diff --git a/ioread.cpp b/ioread.cpp index 7de4e807b..3c92c7bf9 100644 --- a/ioread.cpp +++ b/ioread.cpp @@ -872,7 +872,7 @@ template bool readFsGridVariable( // Iterate through tasks and find their overlap with our domain. size_t fileOffset = 0; - for(int task = 0; task < myRank; task++) { + for(int task = 0; task < numWritingRanks; task++) { std::array thatTasksSize; std::array thatTasksStart; thatTasksSize[0] = targetGrid.calcLocalSize(globalSize[0], fileDecomposition[0], task/fileDecomposition[2]/fileDecomposition[1]); @@ -883,26 +883,19 @@ template bool readFsGridVariable( thatTasksStart[1] = targetGrid.calcLocalStart(globalSize[1], fileDecomposition[1], (task/fileDecomposition[2])%fileDecomposition[1]); thatTasksStart[2] = targetGrid.calcLocalStart(globalSize[2], fileDecomposition[2], task%fileDecomposition[2]); - // AABB intersection test - if(thatTasksStart[0] + thatTasksSize[0] >= localStart[0] && - thatTasksStart[1] + thatTasksSize[1] >= localStart[1] && - thatTasksStart[2] + thatTasksSize[2] >= localStart[2] && - thatTasksStart[0] < localStart[0] + localSize[0] && - thatTasksStart[1] < localStart[1] + localSize[1] && - thatTasksStart[2] < localStart[2] + localSize[2]) { - - // Iterate through overlap area - std::array overlapStart,overlapEnd; - overlapStart[0] = max(localStart[0],thatTasksStart[0]); - overlapStart[1] = max(localStart[1],thatTasksStart[1]); - overlapStart[2] = max(localStart[2],thatTasksStart[2]); - - overlapEnd[0] = min(localStart[0]+localSize[0], thatTasksStart[0]+thatTasksSize[0]); - overlapEnd[1] = min(localStart[1]+localSize[1], thatTasksStart[1]+thatTasksSize[1]); - overlapEnd[2] = min(localStart[2]+localSize[2], thatTasksStart[2]+thatTasksSize[2]); - - // Read continuous stripes in x direction. - int stripeSize = overlapEnd[0]-overlapStart[0]; + // Iterate through overlap area + std::array overlapStart,overlapEnd; + overlapStart[0] = max(localStart[0],thatTasksStart[0]); + overlapStart[1] = max(localStart[1],thatTasksStart[1]); + overlapStart[2] = max(localStart[2],thatTasksStart[2]); + + overlapEnd[0] = min(localStart[0]+localSize[0], thatTasksStart[0]+thatTasksSize[0]); + overlapEnd[1] = min(localStart[1]+localSize[1], thatTasksStart[1]+thatTasksSize[1]); + overlapEnd[2] = min(localStart[2]+localSize[2], thatTasksStart[2]+thatTasksSize[2]); + + // Read continuous stripes in x direction. + int stripeSize = overlapEnd[0]-overlapStart[0]; + if(stripeSize > 0) { for(int z=overlapStart[2]; z bool readFsGridVariable( // Read into buffer std::vector buffer(stripeSize*N); + // TODO: Should these be multireads instead? And/or can this be parallelized? if(file.readArray("VARIABLE",attribs, fileOffset + index, stripeSize, (char*)buffer.data()) == false) { logFile << "(RESTART) ERROR: Failed to read fsgrid variable " << variableName << endl << write; return false; From bf22e566435ee5f5071ac07f768c5ede1f450d2c Mon Sep 17 00:00:00 2001 From: Urs Ganse Date: Thu, 6 Jun 2019 14:56:39 +0300 Subject: [PATCH 512/602] Make all ranks participate in fsgrid collective reads. Fixes restart hangs. --- ioread.cpp | 27 +++++++++++++++++---------- 1 file changed, 17 insertions(+), 10 deletions(-) diff --git a/ioread.cpp b/ioread.cpp index 3c92c7bf9..f0af6b9fd 100644 --- a/ioread.cpp +++ b/ioread.cpp @@ -896,32 +896,39 @@ template bool readFsGridVariable( // Read continuous stripes in x direction. int stripeSize = overlapEnd[0]-overlapStart[0]; if(stripeSize > 0) { + // Read into buffer + std::vector buffer(thatTasksSize[0]*thatTasksSize[1]*thatTasksSize[2]*N); + + // TODO: Should these be multireads instead? And/or can this be parallelized? + if(file.readArray("VARIABLE",attribs, fileOffset, thatTasksSize[0]*thatTasksSize[1]*thatTasksSize[2], (char*)buffer.data()) == false) { + logFile << "(RESTART) ERROR: Failed to read fsgrid variable " << variableName << endl << write; + return false; + } for(int z=overlapStart[2]; z buffer(stripeSize*N); - - // TODO: Should these be multireads instead? And/or can this be parallelized? - if(file.readArray("VARIABLE",attribs, fileOffset + index, stripeSize, (char*)buffer.data()) == false) { - logFile << "(RESTART) ERROR: Failed to read fsgrid variable " << variableName << endl << write; - return false; - } + fprintf(stderr, "<%i> Reading a stripe from task %i\n", myRank, task); for(int x=overlapStart[0]; x done reading fsgrid %s\n", myRank, variableName.c_str()); + targetGrid.updateGhostCells(); return true; } From 171f292329783a4e17389eb44f705ba8a369b976 Mon Sep 17 00:00:00 2001 From: Urs Ganse Date: Thu, 6 Jun 2019 15:36:30 +0300 Subject: [PATCH 513/602] Fix buffer array index multiplier. This now appears to restart successfully. --- ioread.cpp | 33 +++++++++++++++++---------------- 1 file changed, 17 insertions(+), 16 deletions(-) diff --git a/ioread.cpp b/ioread.cpp index f0af6b9fd..c63e3c5bf 100644 --- a/ioread.cpp +++ b/ioread.cpp @@ -848,7 +848,9 @@ template bool readFsGridVariable( } else { // More difficult case: different number of tasks. - // In this case, our own fsgrid domain overlaps (potentially many) domains in the file, and needs to be read in stripes. + // In this case, our own fsgrid domain overlaps (potentially many) domains in the file. + // We read the whole source rank into a temporary buffer, and transfer the overlapping + // part. // // +------------+----------------+ // | | | @@ -867,8 +869,6 @@ template bool readFsGridVariable( // Determine the decomposition in the file and the one in RAM for our restart std::array fileDecomposition; targetGrid.computeDomainDecomposition(globalSize, numWritingRanks, fileDecomposition); - std::array ramDecomposition; - targetGrid.computeDomainDecomposition(globalSize, size, ramDecomposition); // Iterate through tasks and find their overlap with our domain. size_t fileOffset = 0; @@ -884,7 +884,7 @@ template bool readFsGridVariable( thatTasksStart[2] = targetGrid.calcLocalStart(globalSize[2], fileDecomposition[2], task%fileDecomposition[2]); // Iterate through overlap area - std::array overlapStart,overlapEnd; + std::array overlapStart,overlapEnd,overlapSize; overlapStart[0] = max(localStart[0],thatTasksStart[0]); overlapStart[1] = max(localStart[1],thatTasksStart[1]); overlapStart[2] = max(localStart[2],thatTasksStart[2]); @@ -893,9 +893,12 @@ template bool readFsGridVariable( overlapEnd[1] = min(localStart[1]+localSize[1], thatTasksStart[1]+thatTasksSize[1]); overlapEnd[2] = min(localStart[2]+localSize[2], thatTasksStart[2]+thatTasksSize[2]); - // Read continuous stripes in x direction. - int stripeSize = overlapEnd[0]-overlapStart[0]; - if(stripeSize > 0) { + overlapSize[0] = max(overlapEnd[0]-overlapStart[0],0); + overlapSize[1] = max(overlapEnd[1]-overlapStart[1],0); + overlapSize[2] = max(overlapEnd[2]-overlapStart[2],0); + + // Read every source rank that we have an overlap with. + if(overlapSize[0]*overlapSize[1]*overlapSize[2] > 0) { // Read into buffer std::vector buffer(thatTasksSize[0]*thatTasksSize[1]*thatTasksSize[2]*N); @@ -904,16 +907,16 @@ template bool readFsGridVariable( logFile << "(RESTART) ERROR: Failed to read fsgrid variable " << variableName << endl << write; return false; } + + // Copy continuous stripes in x direction. for(int z=overlapStart[2]; z Reading a stripe from task %i\n", myRank, task); - for(int x=overlapStart[0]; x bool readFsGridVariable( } } - fprintf(stderr, "<%i> done reading fsgrid %s\n", myRank, variableName.c_str()); - targetGrid.updateGhostCells(); return true; } From 33590f7ee5eaaf62db344b833bf594f2dd5ef0a9 Mon Sep 17 00:00:00 2001 From: ykempf Date: Fri, 7 Jun 2019 09:39:25 +0300 Subject: [PATCH 514/602] O0 in trans_map_amr --- vlasovsolver/cpu_trans_map_amr.cpp | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/vlasovsolver/cpu_trans_map_amr.cpp b/vlasovsolver/cpu_trans_map_amr.cpp index 8c1f10cdb..c84e3fd0c 100644 --- a/vlasovsolver/cpu_trans_map_amr.cpp +++ b/vlasovsolver/cpu_trans_map_amr.cpp @@ -68,6 +68,8 @@ int getNeighborhood(const uint dimension, const uint stencil) { } + + /* Get pointers to spatial cells that are considered source cells for a pencil. * Source cells are cells that the pencil reads data from to compute polynomial * fits that are used for propagation in the vlasov solver. All cells included @@ -496,6 +498,8 @@ setOfPencils buildPencilsWithNeighbors( const dccrg::Dccrg Date: Tue, 11 Jun 2019 13:45:51 +0300 Subject: [PATCH 515/602] AMR test of 20190611 from Yann. --- .../Flowthrough_amr_test_20190611_YPK.cfg | 124 ++++++++++++++++++ .../Flowthrough/run_amr_test_20190611_YPK.sh | 13 ++ 2 files changed, 137 insertions(+) create mode 100644 projects/Flowthrough/Flowthrough_amr_test_20190611_YPK.cfg create mode 100755 projects/Flowthrough/run_amr_test_20190611_YPK.sh diff --git a/projects/Flowthrough/Flowthrough_amr_test_20190611_YPK.cfg b/projects/Flowthrough/Flowthrough_amr_test_20190611_YPK.cfg new file mode 100644 index 000000000..89fb3e8d2 --- /dev/null +++ b/projects/Flowthrough/Flowthrough_amr_test_20190611_YPK.cfg @@ -0,0 +1,124 @@ +ParticlePopulations = proton + +project = Flowthrough +propagate_field = 1 +propagate_vlasov_acceleration = 1 +propagate_vlasov_translation = 1 +dynamic_timestep = 1 + +[proton_properties] +mass = 1 +mass_units = PROTON +charge = 1 + +[AMR] +max_spatial_level = 1 +box_half_width_x = 1 +box_half_width_z = 1 +box_half_width_y = 1 + +[gridbuilder] +x_length = 14 +y_length = 3 +z_length = 3 +x_min = -7e6 +x_max = 7e6 +y_min = -1.5e6 +y_max = 1.5e6 +z_min = -1.5e6 +z_max = 1.5e6 +t_max = 182.0 +dt = 2.0 + +[proton_vspace] +vx_min = -2e6 +vx_max = +2e6 +vy_min = -2e6 +vy_max = +2e6 +vz_min = -2e6 +vz_max = +2e6 +vx_length = 15 +vy_length = 15 +vz_length = 15 + +[io] +write_initial_state = 1 + +system_write_t_interval = 0.01 +system_write_file_name = bulk +system_write_distribution_stride = 0 +system_write_distribution_xline_stride = 0 +system_write_distribution_yline_stride = 0 +system_write_distribution_zline_stride = 0 + +[variables] +output = populations_Rho +output = vg_Rhom +output = fg_Rhom +output = B +output = VolB +output = fg_PerturbedB +output = fg_BackgroundB +output = E +output = GradPeE +output = Pressure +output = populations_V +output = populations_Rho +output = populations_moments_Backstream +output = populations_moments_NonBackstream +output = vg_BoundaryType +output = fg_BoundaryType +output = vg_BoundaryLayer +output = fg_BoundaryLayer +output = vg_GridCoordinates +output = fg_GridCoordinates +output = vg_rank +output = fg_rank +output = populations_Blocks +output = fSaved +output = populations_MaxVdt +output = populations_MaxRdt +output = MaxFieldsdt + +[boundaries] +periodic_x = no +periodic_y = yes +periodic_z = yes +boundary = Outflow +boundary = Maxwellian + +[outflow] +precedence = 3 + +[proton_outflow] +face = x+ +#face = y- +#face = y+ +#face = z- +#face = z+ + +[maxwellian] +precedence = 4 +face = x- + +[proton_maxwellian] +dynamic = 0 +file_x- = sw1.dat + +[proton_sparse] +minValue = 1.0e-15 + +[Flowthrough] +Bx = 1.0e-9 +By = 1.0e-9 +Bz = 1.0e-9 + +[proton_Flowthrough] +T = 1.0e5 +rho = 1.0e6 + +nSpaceSamples = 2 +nVelocitySamples = 2 + +[loadBalance] +algorithm = RCB diff --git a/projects/Flowthrough/run_amr_test_20190611_YPK.sh b/projects/Flowthrough/run_amr_test_20190611_YPK.sh new file mode 100755 index 000000000..31065ed4e --- /dev/null +++ b/projects/Flowthrough/run_amr_test_20190611_YPK.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +procs=4 + +rm -f *vlsv *txt + +aprun -n $procs ../vlasiator \ + --run_config ./Flowthrough_amr.cfg \ + --io.restart_walltime_interval 10000 \ + --gridbuilder.timestep_max 5 \ + --loadBalance.rebalanceInterval 100 \ + --loadBalance.algorithm RANDOM + From 10cada3c62dc9ffa8a9fdf4b2d7cf5efca8e8fbf Mon Sep 17 00:00:00 2001 From: ykempf Date: Tue, 11 Jun 2019 13:48:08 +0300 Subject: [PATCH 516/602] Updated Yann's test. --- projects/Flowthrough/Flowthrough_amr_test_20190611_YPK.cfg | 2 +- projects/Flowthrough/sw1_amr_test_20190611_YPK.dat | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 projects/Flowthrough/sw1_amr_test_20190611_YPK.dat diff --git a/projects/Flowthrough/Flowthrough_amr_test_20190611_YPK.cfg b/projects/Flowthrough/Flowthrough_amr_test_20190611_YPK.cfg index 89fb3e8d2..48ecede6a 100644 --- a/projects/Flowthrough/Flowthrough_amr_test_20190611_YPK.cfg +++ b/projects/Flowthrough/Flowthrough_amr_test_20190611_YPK.cfg @@ -103,7 +103,7 @@ face = x- [proton_maxwellian] dynamic = 0 -file_x- = sw1.dat +file_x- = sw1_amr_test_20190611_YPK.dat [proton_sparse] minValue = 1.0e-15 diff --git a/projects/Flowthrough/sw1_amr_test_20190611_YPK.dat b/projects/Flowthrough/sw1_amr_test_20190611_YPK.dat new file mode 100644 index 000000000..0a266607f --- /dev/null +++ b/projects/Flowthrough/sw1_amr_test_20190611_YPK.dat @@ -0,0 +1 @@ +0.0 2.0e6 1.0e6 1.0e5 0.0 0.0 1.0e-9 1.0e-9 1.0e-9 From 2b7d9c1f583e64f1b2e18f380bbb2b2299f27e92 Mon Sep 17 00:00:00 2001 From: ykempf Date: Wed, 12 Jun 2019 16:35:46 +0300 Subject: [PATCH 517/602] Single-cell pencils forced. --- vlasovsolver/cpu_trans_map_amr.cpp | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/vlasovsolver/cpu_trans_map_amr.cpp b/vlasovsolver/cpu_trans_map_amr.cpp index c84e3fd0c..5ce8e7460 100644 --- a/vlasovsolver/cpu_trans_map_amr.cpp +++ b/vlasovsolver/cpu_trans_map_amr.cpp @@ -643,7 +643,9 @@ void getSeedIds(const dccrg::Dccrg& mpiGr auto myIndices = mpiGrid.mapping.get_indices(celli); - bool addToSeedIds = false; +#warning This forces single-cell pencils! + // FIXME TODO Tuomas look at this! BUG + bool addToSeedIds = true; // Returns all neighbors as (id, direction-dimension) pair pointers. for ( const auto nbrPair : *(mpiGrid.get_neighbors_of(celli, neighborhood)) ) { From 39b78a545fe03e853ed844f65c026d57e3d627e4 Mon Sep 17 00:00:00 2001 From: ykempf Date: Wed, 12 Jun 2019 16:38:14 +0300 Subject: [PATCH 518/602] White-space changes and some more cosmetics. --- vlasovsolver/cpu_trans_map.cpp | 12 ++-- vlasovsolver/cpu_trans_map_amr.cpp | 89 ++++++++++++++++++------------ vlasovsolver/cpu_trans_map_amr.hpp | 6 ++ 3 files changed, 65 insertions(+), 42 deletions(-) diff --git a/vlasovsolver/cpu_trans_map.cpp b/vlasovsolver/cpu_trans_map.cpp index 7500ec6e1..5925e7684 100644 --- a/vlasovsolver/cpu_trans_map.cpp +++ b/vlasovsolver/cpu_trans_map.cpp @@ -300,7 +300,6 @@ void copy_trans_block_data( } } } else { - uint cellid=0; for (uint k=0; k& mpi std::vector allCellsPointer(allCells.size()); std::vector sourceNeighbors(localPropagatedCells.size() * nSourceNeighborsPerCell); std::vector targetNeighbors(3 * localPropagatedCells.size() ); - #pragma omp parallel for - for(uint celli = 0; celli < allCells.size(); celli++){ + for(uint celli = 0; celli < allCells.size(); celli++){ allCellsPointer[celli] = mpiGrid[allCells[celli]]; } #pragma omp parallel for - for(uint celli = 0; celli < localPropagatedCells.size(); celli++){ + for(uint celli = 0; celli < localPropagatedCells.size(); celli++){ // compute spatial neighbors, separately for targets and source. In // source cells we have a wider stencil and take into account // boundaries. For targets we only have actual cells as we do not @@ -393,7 +391,7 @@ bool trans_map_1d(const dccrg::Dccrg& mpi switch (dimension) { case 0: dz = P::dx_ini; - z_min = P::xmin; + z_min = P::xmin; // set values in array that is used to convert block indices // to global ID using a dot product. cell_indices_to_id[0]=WID2; @@ -444,7 +442,7 @@ bool trans_map_1d(const dccrg::Dccrg& mpi #pragma omp parallel - { + { std::vector targetBlockData(3 * localPropagatedCells.size() * WID3); std::vector targetsValid(localPropagatedCells.size()); std::vector allCellsBlockLocalID(allCells.size()); @@ -515,7 +513,7 @@ bool trans_map_1d(const dccrg::Dccrg& mpi z_2 = 1.0; } - for (uint planeVector = 0; planeVector < VEC_PER_PLANE; planeVector++) { + for (uint planeVector = 0; planeVector < VEC_PER_PLANE; planeVector++) { //compute reconstruction #ifdef TRANS_SEMILAG_PLM Vec a[3]; diff --git a/vlasovsolver/cpu_trans_map_amr.cpp b/vlasovsolver/cpu_trans_map_amr.cpp index 5ce8e7460..bc2a30336 100644 --- a/vlasovsolver/cpu_trans_map_amr.cpp +++ b/vlasovsolver/cpu_trans_map_amr.cpp @@ -342,7 +342,7 @@ setOfPencils buildPencilsWithNeighbors( const dccrg::Dccrg const auto* backNeighbors = mpiGrid.get_neighbors_of(ids.back() ,neighborhoodId); for (const auto nbrPair: *frontNeighbors) { - maxNbrRefLvl = max(maxNbrRefLvl,mpiGrid.mapping.get_refinement_level(nbrPair.first)); + maxNbrRefLvl = max(maxNbrRefLvl,mpiGrid.get_refinement_level(nbrPair.first)); } for (const auto nbrPair: *backNeighbors) { - maxNbrRefLvl = max(maxNbrRefLvl,mpiGrid.get_refinement_level(nbrPair.first)); + maxNbrRefLvl = max(maxNbrRefLvl,mpiGrid.get_refinement_level(nbrPair.first)); } if (maxNbrRefLvl > maxPencilRefLvl) { @@ -904,14 +904,16 @@ void printPencilsFunc(const setOfPencils& pencils, const uint dimension, const i std::cout << "I am rank " << myRank << ", I have " << pencils.N << " pencils along dimension " << dimension << ":\n"; MPI_Barrier(MPI_COMM_WORLD); if(myRank == MASTER_RANK) { - std::cout << "N, mpirank, (x, y): indices {path} " << std::endl; + std::cout << "t, N, mpirank, dimension (x, y): indices {path} " << std::endl; std::cout << "-----------------------------------------------------------------" << std::endl; } MPI_Barrier(MPI_COMM_WORLD); for (uint i = 0; i < pencils.N; i++) { iend += pencils.lengthOfPencils[i]; + std::cout << P::t << ", "; std::cout << i << ", "; std::cout << myRank << ", "; + std::cout << dimension << ", "; std::cout << "(" << pencils.x[i] << ", " << pencils.y[i] << "): "; for (auto j = pencils.ids.begin() + ibeg; j != pencils.ids.begin() + iend; ++j) { std::cout << *j << " "; @@ -954,10 +956,8 @@ bool trans_map_1d_amr(const dccrg::Dccrg& const uint popID) { const bool printPencils = false; - Realv dvz,vz_min; uint cell_indices_to_id[3]; /*< used when computing id of target cell in block*/ unsigned char cellid_transpose[WID3]; /*< defines the transpose for the solver internal (transposed) id: i + j*WID + k*WID2 to actual one*/ - const uint blocks_per_dim = 1; // return if there's no cells to propagate if(localPropagatedCells.size() == 0) { cout << "Returning because of no cells" << endl; @@ -1057,22 +1057,20 @@ bool trans_map_1d_amr(const dccrg::Dccrg& // Only one set is created for now but we retain support for multiple sets pencilSets.push_back(pencils); - const uint8_t VMESH_REFLEVEL = 0; - // Get a pointer to the velocity mesh of the first spatial cell - const vmesh::VelocityMesh& vmesh = allCellsPointer[0]->get_velocity_mesh(popID); + const vmesh::VelocityMesh& vmesh = allCellsPointer[0]->get_velocity_mesh(popID); // Get a unique sorted list of blockids that are in any of the // propagated cells. First use set for this, then add to vector (may not // be the most nice way to do this and in any case we could do it along // dimension for data locality reasons => copy acc map column code, TODO: FIXME // TODO: Do this separately for each pencil? - std::unordered_set unionOfBlocksSet; + std::unordered_set unionOfBlocksSet; for(auto cell : allCellsPointer) { - vmesh::VelocityMesh& vmesh = cell->get_velocity_mesh(popID); - for (vmesh::LocalID block_i=0; block_i< vmesh.size(); ++block_i) { - unionOfBlocksSet.insert(vmesh.getGlobalID(block_i)); + vmesh::VelocityMesh& cvmesh = cell->get_velocity_mesh(popID); + for (vmesh::LocalID block_i=0; block_i< cvmesh.size(); ++block_i) { + unionOfBlocksSet.insert(cvmesh.getGlobalID(block_i)); } } @@ -1088,9 +1086,9 @@ bool trans_map_1d_amr(const dccrg::Dccrg& #pragma omp parallel { - // Loop over velocity space blocks. Thread this loop (over vspace blocks) with OpenMP. + // Loop over velocity space blocks. Thread this loop (over vspace blocks) with OpenMP. #pragma omp for schedule(guided) - for(uint blocki = 0; blocki < unionOfBlocks.size(); blocki++) { + for(uint blocki = 0; blocki < unionOfBlocks.size(); blocki++) { // Get global id of the velocity block vmesh::GlobalID blockGID = unionOfBlocks[blocki]; @@ -1112,8 +1110,20 @@ bool trans_map_1d_amr(const dccrg::Dccrg& // For targets we need the local cells, plus a padding of 1 cell at both ends std::vector targetCells(pencils.sumOfLengths + pencils.N * 2 ); - computeSpatialTargetCellsForPencils(mpiGrid, pencils, dimension, targetCells.data()); - + computeSpatialTargetCellsForPencils(mpiGrid, pencils, dimension, targetCells.data()); + +// cout << P::t << " Rank " << myRank << ", dimension " << dimension << ", target cells: "; + +// for(uint i = 0; i < targetCells.size(); ++i) { +// if(targetCells[i]) { +// cout << targetCells[i]->SpatialCell::parameters[CellParams::CELLID] << " "; +// } else { +// cout << "NULL "; +// } +// } + +// cout << endl; + // Loop over pencils uint totalTargetLength = 0; for(uint pencili = 0; pencili < pencils.N; ++pencili){ @@ -1128,22 +1138,33 @@ bool trans_map_1d_amr(const dccrg::Dccrg& computeSpatialSourceCellsForPencil(mpiGrid, pencils, pencili, dimension, sourceCells.data()); +// cout << P::t << " Rank " << myRank << ", dimension " << dimension << ", source cells for pencil " << pencili << ": "; + // dz is the cell size in the direction of the pencil std::vector> dz(sourceLength); for(uint i = 0; i < sourceCells.size(); ++i) { switch (dimension) { case(0): - dz[i] = sourceCells[i]->SpatialCell::parameters[CellParams::DX]; - break; + dz[i] = sourceCells[i]->parameters[CellParams::DX]; + break; case(1): - dz[i] = sourceCells[i]->SpatialCell::parameters[CellParams::DY]; - break; + dz[i] = sourceCells[i]->parameters[CellParams::DY]; + break; case(2): - dz[i] = sourceCells[i]->SpatialCell::parameters[CellParams::DZ]; + dz[i] = sourceCells[i]->parameters[CellParams::DZ]; break; } + +// if(sourceCells[i]) { +// cout << sourceCells[i]->SpatialCell::parameters[CellParams::CELLID] << " "; +// } else { +// cout << "NULL "; +// } + } +// cout << endl; + // Allocate source data: sourcedata> sourceVecData(sourceLength * WID3 / VECL); @@ -1176,7 +1197,7 @@ bool trans_map_1d_amr(const dccrg::Dccrg& targetBlockData[(totalTargetLength + icell) * WID3 + cellid_transpose[iv + planeVector * VECL + k * WID2]] = vector[iv]; - } + } } } } @@ -1354,7 +1375,7 @@ void update_remote_mapping_contribution_amr( // We need the default for 1 to 1 communications if(ccell) { for (uint i = 0; i < MAX_NEIGHBORS_PER_DIM; ++i) { - ccell->neighbor_block_data[i] = ccell->get_data(popID); + ccell->neighbor_block_data.at(i) = ccell->get_data(popID); ccell->neighbor_number_of_blocks[i] = 0; } } @@ -1366,7 +1387,7 @@ void update_remote_mapping_contribution_amr( if(ccell) { // Initialize number of blocks to 0 and neighbor block data pointer to the local block data pointer for (uint i = 0; i < MAX_NEIGHBORS_PER_DIM; ++i) { - ccell->neighbor_block_data[i] = ccell->get_data(popID); + ccell->neighbor_block_data.at(i) = ccell->get_data(popID); ccell->neighbor_number_of_blocks[i] = 0; } } @@ -1406,8 +1427,6 @@ void update_remote_mapping_contribution_amr( // ccell adds a neighbor_block_data block for each neighbor in the positive direction to its local data for (const auto nbr : p_nbrs) { - - bool initBlocksForEmptySiblings = false; //Send data in nbr target array that we just mapped to, if // 1) it is a valid target, @@ -1425,9 +1444,9 @@ void update_remote_mapping_contribution_amr( if(mpiGrid.get_refinement_level(c) >= mpiGrid.get_refinement_level(nbr)) { sendIndex = mySiblingIndex; - } else if (mpiGrid.get_refinement_level(nbr) > mpiGrid.get_refinement_level(c)) { + } else { sendIndex = get_sibling_index(mpiGrid,nbr); - } + } SpatialCell *pcell = mpiGrid[nbr]; @@ -1453,7 +1472,7 @@ void update_remote_mapping_contribution_amr( (Realf*) aligned_malloc(ccell->neighbor_number_of_blocks.at(sendIndex) * WID3 * sizeof(Realf), 64); sendBuffers.push_back(ccell->neighbor_block_data.at(sendIndex)); for (uint j = 0; j < ccell->neighbor_number_of_blocks.at(sendIndex) * WID3; ++j) { - ccell->neighbor_block_data[sendIndex][j] = 0.0; + ccell->neighbor_block_data.at(sendIndex)[j] = 0.0; } // closes for(uint j = 0; j < ccell->neighbor_number_of_blocks.at(sendIndex) * WID3; ++j) @@ -1471,7 +1490,7 @@ void update_remote_mapping_contribution_amr( if (!all_of(n_nbrs.begin(), n_nbrs.end(), [&mpiGrid](CellID i){return mpiGrid.is_local(i);})) { // ccell adds a neighbor_block_data block for each neighbor in the positive direction to its local data - for (const auto nbr : n_nbrs) { + for (const auto nbr : n_nbrs) { if (nbr != INVALID_CELLID && !mpiGrid.is_local(nbr) && ccell->sysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY) { @@ -1499,7 +1518,7 @@ void update_remote_mapping_contribution_amr( recvIndex = get_sibling_index(mpiGrid,nbr); - ncell->neighbor_number_of_blocks.at(recvIndex) = ccell->get_number_of_velocity_blocks(popID); + ncell->neighbor_number_of_blocks.at(recvIndex) = ccell->get_number_of_velocity_blocks(popID); ncell->neighbor_block_data.at(recvIndex) = (Realf*) aligned_malloc(ncell->neighbor_number_of_blocks.at(recvIndex) * WID3 * sizeof(Realf), 64); receiveBuffers.push_back(ncell->neighbor_block_data.at(recvIndex)); @@ -1534,7 +1553,7 @@ void update_remote_mapping_contribution_amr( receive_cells.push_back(c); receive_origin_cells.push_back(nbr); - receive_origin_index.push_back(recvIndex); + receive_origin_index.push_back(recvIndex); } // closes (nbr != INVALID_CELLID && !mpiGrid.is_local(nbr) && ...) @@ -1578,7 +1597,7 @@ void update_remote_mapping_contribution_amr( // one cell is the neighbor on bot + and - side to the same process for (auto c : send_cells) { SpatialCell* spatial_cell = mpiGrid[c]; - Realf * blockData = spatial_cell->get_data(popID); + Realf * blockData = spatial_cell->get_data(popID); //#pragma omp for nowait for(unsigned int vCell = 0; vCell < VELOCITY_BLOCK_LENGTH * spatial_cell->get_number_of_velocity_blocks(popID); ++vCell) { // copy received target data to temporary array where target data is stored. @@ -1590,7 +1609,7 @@ void update_remote_mapping_contribution_amr( for (auto p : receiveBuffers) { aligned_free(p); } - for (auto p : sendBuffers) { + for (auto p : sendBuffers) { aligned_free(p); } diff --git a/vlasovsolver/cpu_trans_map_amr.hpp b/vlasovsolver/cpu_trans_map_amr.hpp index e23482fe0..d7738a663 100644 --- a/vlasovsolver/cpu_trans_map_amr.hpp +++ b/vlasovsolver/cpu_trans_map_amr.hpp @@ -28,6 +28,7 @@ #include "../common.h" #include "../spatial_cell.hpp" + struct setOfPencils { uint N; // Number of pencils in the set @@ -172,14 +173,18 @@ struct setOfPencils { } }; + CellID selectNeighbor(const dccrg::Dccrg &grid, CellID id, int dimension, uint path); + void propagatePencil(Vec* dz, Vec* values, const uint dimension, const uint blockGID, const Realv dt, const vmesh::VelocityMesh &vmesh, const uint lengthOfPencil); + + void copy_trans_block_data_amr( SpatialCell** source_neighbors, const vmesh::GlobalID blockGID, @@ -201,6 +206,7 @@ bool trans_map_1d_amr(const dccrg::Dccrg& mpiGrid, const uint dimension, From 9ad062731cfec1889c9076dff39916eac310e29c Mon Sep 17 00:00:00 2001 From: ykempf Date: Wed, 12 Jun 2019 16:44:20 +0300 Subject: [PATCH 519/602] Removed O0 GCC flag in translation. --- vlasovsolver/cpu_trans_map_amr.cpp | 4 ---- 1 file changed, 4 deletions(-) diff --git a/vlasovsolver/cpu_trans_map_amr.cpp b/vlasovsolver/cpu_trans_map_amr.cpp index bc2a30336..cba9ff49b 100644 --- a/vlasovsolver/cpu_trans_map_amr.cpp +++ b/vlasovsolver/cpu_trans_map_amr.cpp @@ -498,8 +498,6 @@ setOfPencils buildPencilsWithNeighbors( const dccrg::Dccrg Date: Mon, 17 Jun 2019 11:06:36 +0300 Subject: [PATCH 520/602] placed perturbed field initialization within restart check --- projects/Magnetosphere/Magnetosphere.cpp | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/projects/Magnetosphere/Magnetosphere.cpp b/projects/Magnetosphere/Magnetosphere.cpp index 615138e8b..97eb4b08a 100644 --- a/projects/Magnetosphere/Magnetosphere.cpp +++ b/projects/Magnetosphere/Magnetosphere.cpp @@ -330,11 +330,13 @@ namespace projects { // corrective terms in the perturbed field. This maintains the BGB as curl-free. bgFieldDipole.initialize(8e15 *this->dipoleScalingFactor, 0.0, 0.0, 0.0, 0.0 );//set dipole moment setBackgroundField(bgFieldDipole, BgBGrid); - // Difference into perBgrid - bgFieldDipole.initialize(-8e15 *this->dipoleScalingFactor, 0.0, 0.0, 0.0, 0.0 ); - setPerturbedField(bgFieldDipole, perBGrid); - bgVectorDipole.initialize(8e15 *this->dipoleScalingFactor, 0.0, 0.0, 0.0, this->dipoleTiltPhi*3.14159/180., this->dipoleTiltTheta*3.14159/180., this->dipoleXFull, this->dipoleXZero, this->dipoleInflowB[0], this->dipoleInflowB[1], this->dipoleInflowB[2]); - setPerturbedField(bgVectorDipole, perBGrid, true); + // Difference into perBgrid, only if not restarting + if (P::isRestart == false) { + bgFieldDipole.initialize(-8e15 *this->dipoleScalingFactor, 0.0, 0.0, 0.0, 0.0 ); + setPerturbedField(bgFieldDipole, perBGrid); + bgVectorDipole.initialize(8e15 *this->dipoleScalingFactor, 0.0, 0.0, 0.0, this->dipoleTiltPhi*3.14159/180., this->dipoleTiltTheta*3.14159/180., this->dipoleXFull, this->dipoleXZero, this->dipoleInflowB[0], this->dipoleInflowB[1], this->dipoleInflowB[2]); + setPerturbedField(bgVectorDipole, perBGrid, true); + } break; default: setBackgroundFieldToZero(BgBGrid); From c1ae6b4e0e1ae4bcb3ab2b0021660fcb620d7c9c Mon Sep 17 00:00:00 2001 From: Urs Ganse Date: Tue, 18 Jun 2019 16:56:42 +0300 Subject: [PATCH 521/602] Fix outflow and ionosphere boundaries to get symmetric B evaluation. Before, B vectors were always copied in the sysboundary cells, which caused the Yee-Lattice locations of these components to be asymmetric wrt. the boundary. Now, it is checking whether the neighbour on the corresponding components' side is a non-sysboundary and solving the field normally if it is. This should increase inner boundary accuracy. Hopefully. Testing in progress. --- sysboundary/ionosphere.cpp | 29 +++++++++++++++ sysboundary/outflow.cpp | 74 ++++++++++++++++---------------------- 2 files changed, 59 insertions(+), 44 deletions(-) diff --git a/sysboundary/ionosphere.cpp b/sysboundary/ionosphere.cpp index 1b478b14c..0cdd6140d 100644 --- a/sysboundary/ionosphere.cpp +++ b/sysboundary/ionosphere.cpp @@ -33,6 +33,7 @@ #include "../vlasovmover.h" #include "../fieldsolver/fs_common.h" #include "../fieldsolver/fs_limiters.h" +#include "../fieldsolver/ldz_magnetic_field.hpp" #include "../common.h" #include "../object_wrapper.h" @@ -554,6 +555,34 @@ namespace SBC { bGrid = &perBDt2Grid; } + // Easy case: in case we are neighboured by a non-sysboundary cell, we still solve the + // fields normally here. + cuint sysBoundaryLayer = technicalGrid.get(i,j,k)->sysBoundaryLayer; + if(sysBoundaryLayer == 1) { + cint neigh_i=i + ((component==0)?-1:0); + cint neigh_j=j + ((component==1)?-1:0); + cint neigh_k=k + ((component==2)?-1:0); + cuint neighborSysBoundaryFlag = technicalGrid.get(neigh_i, neigh_j, neigh_k)->sysBoundaryFlag; + + if (neighborSysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY) { + switch(component) { + case 0: + propagateMagneticField(perBGrid, perBDt2Grid, EGrid, EDt2Grid, i, j, k, dt, RKCase, true, false, false); + break; + case 1: + propagateMagneticField(perBGrid, perBDt2Grid, EGrid, EDt2Grid, i, j, k, dt, RKCase, false, true, false); + break; + case 2: + propagateMagneticField(perBGrid, perBDt2Grid, EGrid, EDt2Grid, i, j, k, dt, RKCase, false, false, true); + break; + default: + break; + } + return bGrid->get(i,j,k)->at(fsgrids::bfield::PERBX + component); + } + } + + // Otherwise: // Sum perturbed B component over all nearest NOT_SYSBOUNDARY neighbours std::array averageB = {{ 0.0 }}; for (uint it = 0; it < closestCells.size(); it++) { diff --git a/sysboundary/outflow.cpp b/sysboundary/outflow.cpp index 4eadab5ea..bf8f09587 100644 --- a/sysboundary/outflow.cpp +++ b/sysboundary/outflow.cpp @@ -359,54 +359,40 @@ namespace SBC { determineFace(&isThisCellOnAFace[0], x, y, z, dx, dy, dz, true); cuint sysBoundaryLayer = technicalGrid.get(i,j,k)->sysBoundaryLayer; + if(sysBoundaryLayer == 1) { + cint neigh_i=i + ((component==0)?-1:0); + cint neigh_j=j + ((component==1)?-1:0); + cint neigh_k=k + ((component==2)?-1:0); + cuint neighborSysBoundaryFlag = technicalGrid.get(neigh_i, neigh_j, neigh_k)->sysBoundaryFlag; - if (sysBoundaryLayer == 1 - && isThisCellOnAFace[0] // we are on the face - && this->facesToProcess[0] // we are supposed to do this face - && !this->facesToSkipFields[0] // we are not supposed to skip fields on this face - && component == 0 // we do the component normal to this face - && !(isThisCellOnAFace[2] || isThisCellOnAFace[3] || isThisCellOnAFace[4] || isThisCellOnAFace[5]) // we are not in a corner - ) { - propagateMagneticField(perBGrid, perBDt2Grid, EGrid, EDt2Grid, i, j, k, dt, RKCase, true, false, false); - if(RKCase == RK_ORDER1 || RKCase == RK_ORDER2_STEP2) { - fieldValue = perBGrid.get(i,j,k)->at(fsgrids::bfield::PERBX + component); - } else { - fieldValue = perBDt2Grid.get(i,j,k)->at(fsgrids::bfield::PERBX + component); - } - } else if (sysBoundaryLayer == 1 - && isThisCellOnAFace[2] // we are on the face - && this->facesToProcess[2] // we are supposed to do this face - && !this->facesToSkipFields[2] // we are not supposed to skip fields on this face - && component == 1 // we do the component normal to this face - && !(isThisCellOnAFace[0] || isThisCellOnAFace[1] || isThisCellOnAFace[4] || isThisCellOnAFace[5]) // we are not in a corner - ) { - propagateMagneticField(perBGrid, perBDt2Grid, EGrid, EDt2Grid, i, j, k, dt, RKCase, false, true, false); - if(RKCase == RK_ORDER1 || RKCase == RK_ORDER2_STEP2) { - fieldValue = perBGrid.get(i,j,k)->at(fsgrids::bfield::PERBX + component); - } else { - fieldValue = perBDt2Grid.get(i,j,k)->at(fsgrids::bfield::PERBX + component); - } - } else if (sysBoundaryLayer == 1 - && isThisCellOnAFace[4] // we are on the face - && this->facesToProcess[4] // we are supposed to do this face - && !this->facesToSkipFields[4] // we are not supposed to skip fields on this face - && component == 2 // we do the component normal to this face - && !(isThisCellOnAFace[0] || isThisCellOnAFace[1] || isThisCellOnAFace[2] || isThisCellOnAFace[3]) // we are not in a corner - ) { - propagateMagneticField(perBGrid, perBDt2Grid, EGrid, EDt2Grid, i, j, k, dt, RKCase, false, false, true); - if(RKCase == RK_ORDER1 || RKCase == RK_ORDER2_STEP2) { - fieldValue = perBGrid.get(i,j,k)->at(fsgrids::bfield::PERBX + component); - } else { - fieldValue = perBDt2Grid.get(i,j,k)->at(fsgrids::bfield::PERBX + component); + if (neighborSysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY) { + switch(component) { + case 0: + propagateMagneticField(perBGrid, perBDt2Grid, EGrid, EDt2Grid, i, j, k, dt, RKCase, true, false, false); + break; + case 1: + propagateMagneticField(perBGrid, perBDt2Grid, EGrid, EDt2Grid, i, j, k, dt, RKCase, false, true, false); + break; + case 2: + propagateMagneticField(perBGrid, perBDt2Grid, EGrid, EDt2Grid, i, j, k, dt, RKCase, false, false, true); + break; + default: + break; + } + if(RKCase == RK_ORDER1 || RKCase == RK_ORDER2_STEP2) { + fieldValue = perBGrid.get(i,j,k)->at(fsgrids::bfield::PERBX + component); + } else { + fieldValue = perBDt2Grid.get(i,j,k)->at(fsgrids::bfield::PERBX + component); + } + return fieldValue; } + } + + if(RKCase == RK_ORDER1 || RKCase == RK_ORDER2_STEP2) { + fieldValue = fieldBoundaryCopyFromExistingFaceNbrMagneticField(perBGrid, technicalGrid, i, j, k, component); } else { - if(RKCase == RK_ORDER1 || RKCase == RK_ORDER2_STEP2) { - fieldValue = fieldBoundaryCopyFromExistingFaceNbrMagneticField(perBGrid, technicalGrid, i, j, k, component); - } else { - fieldValue = fieldBoundaryCopyFromExistingFaceNbrMagneticField(perBDt2Grid, technicalGrid, i, j, k, component); - } + fieldValue = fieldBoundaryCopyFromExistingFaceNbrMagneticField(perBDt2Grid, technicalGrid, i, j, k, component); } - return fieldValue; } From 3c57151957579fdfd434d23a135f5aa9fafb0425 Mon Sep 17 00:00:00 2001 From: ykempf Date: Wed, 19 Jun 2019 14:19:46 +0300 Subject: [PATCH 522/602] Level 3 refinement for Magnetosphere, innermost sphere. --- projects/Magnetosphere/Magnetosphere.cpp | 111 ++++++++++++++++------- projects/Magnetosphere/Magnetosphere.h | 1 + 2 files changed, 78 insertions(+), 34 deletions(-) diff --git a/projects/Magnetosphere/Magnetosphere.cpp b/projects/Magnetosphere/Magnetosphere.cpp index 97eb4b08a..a126d9c7c 100644 --- a/projects/Magnetosphere/Magnetosphere.cpp +++ b/projects/Magnetosphere/Magnetosphere.cpp @@ -54,6 +54,7 @@ namespace projects { RP::add("Magnetosphere.dipoleType","0: Normal 3D dipole, 1: line-dipole for 2D polar simulations, 2: line-dipole with mirror, 3: 3D dipole with mirror", 0); RP::add("Magnetosphere.dipoleMirrorLocationX","x-coordinate of dipole Mirror", -1.0); + RP::add("Magnetosphere.refine_L3radius","Radius of L3-refined sphere", 6.371e7); // 10 RE RP::add("Magnetosphere.refine_L2radius","Radius of L2-refined sphere", 9.5565e7); // 15 RE RP::add("Magnetosphere.refine_L2tailthick","Thickness of L2-refined tail region", 3.1855e7); // 5 RE RP::add("Magnetosphere.refine_L1radius","Radius of L1-refined sphere", 1.59275e8); // 25 RE @@ -141,6 +142,10 @@ namespace projects { } + if(!Readparameters::get("Magnetosphere.refine_L3radius", this->refine_L3radius)) { + if(myRank == MASTER_RANK) cerr << __FILE__ << ":" << __LINE__ << " ERROR: This option has not been added!" << endl; + exit(1); + } if(!Readparameters::get("Magnetosphere.refine_L2radius", this->refine_L2radius)) { if(myRank == MASTER_RANK) cerr << __FILE__ << ":" << __LINE__ << " ERROR: This option has not been added!" << endl; exit(1); @@ -559,50 +564,50 @@ namespace projects { const int bw = 2 * VLASOV_STENCIL_WIDTH; const int bw2 = bw + VLASOV_STENCIL_WIDTH; - // Calculate regions for refinement + // Calculate regions for refinement if (P::amrMaxSpatialRefLevel > 0) { - // L1 refinement. Does not touch a 2-cell thick (at L0) boundary layer. - for (uint i = bw; i < P::xcells_ini-bw; ++i) { - for (uint j = bw; j < P::ycells_ini-bw; ++j) { - for (uint k = bw; k < P::zcells_ini-bw; ++k) { - - std::array xyz; - xyz[0] = P::xmin + (i+0.5)*P::dx_ini; - xyz[1] = P::ymin + (j+0.5)*P::dy_ini; - xyz[2] = P::zmin + (k+0.5)*P::dz_ini; - - Real radius2 = (xyz[0]*xyz[0]+xyz[1]*xyz[1]+xyz[2]*xyz[2]); - // Check if cell is within L1 sphere, or within L1 tail slice - if ((radius2 < refine_L1radius*refine_L1radius) || - ((xyz[0] < 0) && (std::abs(xyz[1]) < refine_L1radius) && - (std::abs(xyz[2]) xyz; + xyz[0] = P::xmin + (i+0.5)*P::dx_ini; + xyz[1] = P::ymin + (j+0.5)*P::dy_ini; + xyz[2] = P::zmin + (k+0.5)*P::dz_ini; + + Real radius2 = (xyz[0]*xyz[0]+xyz[1]*xyz[1]+xyz[2]*xyz[2]); + // Check if cell is within L1 sphere, or within L1 tail slice + if ((radius2 < refine_L1radius*refine_L1radius) || + ((xyz[0] < 0) && (std::abs(xyz[1]) < refine_L1radius) && + (std::abs(xyz[2]) 0) { - std::cout << "Rank " << myRank << " refined " << refinedCells.size() << " cells. " << std::endl; - } + if(refinedCells.size() > 0) { + std::cout << "Rank " << myRank << " refined " << refinedCells.size() << " cells. " << std::endl; + } #endif - mpiGrid.balance_load(); + mpiGrid.balance_load(); } if (P::amrMaxSpatialRefLevel > 1) { - // L2 refinement. Does not touch a 5-cell thick (at L1) boundary layer. - // This means a boundary width of 2 L0 cells and one L1 cell in between - // as a buffer + // L2 refinement. Does not touch a 5-cell thick (at L1) boundary layer. + // This means a boundary width of 2 L0 cells and one L1 cell in between + // as a buffer for (uint i = 2*bw2; i < 2*(P::xcells_ini-bw2); ++i) { for (uint j = 2*bw2; j < 2*(P::ycells_ini-bw2); ++j) { for (uint k = 2*bw2; k < 2*(P::zcells_ini-bw2); ++k) { - + std::array xyz; xyz[0] = P::xmin + (i+0.5)*0.5*P::dx_ini; xyz[1] = P::ymin + (j+0.5)*0.5*P::dy_ini; @@ -615,13 +620,13 @@ namespace projects { (std::abs(xyz[2]) 0) { @@ -632,6 +637,44 @@ namespace projects { mpiGrid.balance_load(); } + if (P::amrMaxSpatialRefLevel > 2) { + + if (refine_L3radius < refine_L2radius && refine_L3radius > ionosphereRadius) { + // L3 refinement. + for (uint i = 2*bw2; i < 2*(P::xcells_ini-bw2); ++i) { + for (uint j = 2*bw2; j < 2*(P::ycells_ini-bw2); ++j) { + for (uint k = 2*bw2; k < 2*(P::zcells_ini-bw2); ++k) { + + std::array xyz; + xyz[0] = P::xmin + (i+0.5)*0.5*P::dx_ini; + xyz[1] = P::ymin + (j+0.5)*0.5*P::dy_ini; + xyz[2] = P::zmin + (k+0.5)*0.5*P::dz_ini; + + Real radius2 = (xyz[0]*xyz[0]+xyz[1]*xyz[1]+xyz[2]*xyz[2]); + // Check if cell is within L1 sphere, or within L1 tail slice + if (radius2 < refine_L3radius*refine_L3radius) + { + CellID myCell = mpiGrid.get_existing_cell(xyz); + // Check if the cell is tagged as do not compute + mpiGrid.refine_completely(myCell); + } + } + } + } + refinedCells = mpiGrid.stop_refining(true); + if(myRank == MASTER_RANK) std::cout << "Finished third level of refinement" << endl; + #ifndef NDEBUG + if(refinedCells.size() > 0) { + std::cout << "Rank " << myRank << " refined " << refinedCells.size() << " cells. " << std::endl; + } + #endif + + mpiGrid.balance_load(); + } else { + std::cout << "Skipping third level of refinement because the radius is larger than the 2nd level radius or smaller than the ionosphere radius." << std::endl; + } + } + return true; } diff --git a/projects/Magnetosphere/Magnetosphere.h b/projects/Magnetosphere/Magnetosphere.h index 715df6b7b..c7b394187 100644 --- a/projects/Magnetosphere/Magnetosphere.h +++ b/projects/Magnetosphere/Magnetosphere.h @@ -85,6 +85,7 @@ namespace projects { Real dipoleMirrorLocationX; uint dipoleType; + Real refine_L3radius; Real refine_L2radius; Real refine_L2tailthick; Real refine_L1radius; From ec129721c316694852fff4afd406f408810909bc Mon Sep 17 00:00:00 2001 From: ykempf Date: Wed, 19 Jun 2019 14:49:40 +0300 Subject: [PATCH 523/602] Bugfix of L3 Magnetosphere refinement. --- projects/Magnetosphere/Magnetosphere.cpp | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/projects/Magnetosphere/Magnetosphere.cpp b/projects/Magnetosphere/Magnetosphere.cpp index a126d9c7c..91f0ed667 100644 --- a/projects/Magnetosphere/Magnetosphere.cpp +++ b/projects/Magnetosphere/Magnetosphere.cpp @@ -641,14 +641,14 @@ namespace projects { if (refine_L3radius < refine_L2radius && refine_L3radius > ionosphereRadius) { // L3 refinement. - for (uint i = 2*bw2; i < 2*(P::xcells_ini-bw2); ++i) { - for (uint j = 2*bw2; j < 2*(P::ycells_ini-bw2); ++j) { - for (uint k = 2*bw2; k < 2*(P::zcells_ini-bw2); ++k) { + for (uint i = 2*bw2; i < 4*P::xcells_ini-2*bw2; ++i) { + for (uint j = 2*bw2; j < 4*P::ycells_ini-2*bw2; ++j) { + for (uint k = 2*bw2; k < 4*P::zcells_ini-2*bw2; ++k) { std::array xyz; - xyz[0] = P::xmin + (i+0.5)*0.5*P::dx_ini; - xyz[1] = P::ymin + (j+0.5)*0.5*P::dy_ini; - xyz[2] = P::zmin + (k+0.5)*0.5*P::dz_ini; + xyz[0] = P::xmin + (i+0.5)*0.25*P::dx_ini; + xyz[1] = P::ymin + (j+0.5)*0.25*P::dy_ini; + xyz[2] = P::zmin + (k+0.5)*0.25*P::dz_ini; Real radius2 = (xyz[0]*xyz[0]+xyz[1]*xyz[1]+xyz[2]*xyz[2]); // Check if cell is within L1 sphere, or within L1 tail slice From c1d6e87becac529284d25ec262e6502c62b33793 Mon Sep 17 00:00:00 2001 From: Urs Ganse Date: Wed, 19 Jun 2019 16:07:24 +0300 Subject: [PATCH 524/602] add error message for invalid components in ionosphere or outflow boundary --- sysboundary/ionosphere.cpp | 1 + sysboundary/outflow.cpp | 1 + 2 files changed, 2 insertions(+) diff --git a/sysboundary/ionosphere.cpp b/sysboundary/ionosphere.cpp index 0cdd6140d..5ca7c7661 100644 --- a/sysboundary/ionosphere.cpp +++ b/sysboundary/ionosphere.cpp @@ -576,6 +576,7 @@ namespace SBC { propagateMagneticField(perBGrid, perBDt2Grid, EGrid, EDt2Grid, i, j, k, dt, RKCase, false, false, true); break; default: + cerr << "ERROR: ionosphere boundary tried to propagate nonsensical magnetic field component " << component << endl; break; } return bGrid->get(i,j,k)->at(fsgrids::bfield::PERBX + component); diff --git a/sysboundary/outflow.cpp b/sysboundary/outflow.cpp index bf8f09587..f50a8e197 100644 --- a/sysboundary/outflow.cpp +++ b/sysboundary/outflow.cpp @@ -377,6 +377,7 @@ namespace SBC { propagateMagneticField(perBGrid, perBDt2Grid, EGrid, EDt2Grid, i, j, k, dt, RKCase, false, false, true); break; default: + cerr << "ERROR: outflow boundary tried to propagate nonsensical magnetic field component " << component << endl; break; } if(RKCase == RK_ORDER1 || RKCase == RK_ORDER2_STEP2) { From e668e7fdd54a595944c713409a52ad69c4f66777 Mon Sep 17 00:00:00 2001 From: Markus Battarbee Date: Wed, 19 Jun 2019 16:09:54 +0300 Subject: [PATCH 525/602] Update Magnetosphere.cpp This should better ensure that we don't try to do L3 outside L2, or L2 outside L1 regions. --- projects/Magnetosphere/Magnetosphere.cpp | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/projects/Magnetosphere/Magnetosphere.cpp b/projects/Magnetosphere/Magnetosphere.cpp index 91f0ed667..235c15e4c 100644 --- a/projects/Magnetosphere/Magnetosphere.cpp +++ b/projects/Magnetosphere/Magnetosphere.cpp @@ -562,7 +562,8 @@ namespace projects { if(myRank == MASTER_RANK) std::cout << "Maximum refinement level is " << mpiGrid.mapping.get_maximum_refinement_level() << std::endl; const int bw = 2 * VLASOV_STENCIL_WIDTH; - const int bw2 = bw + VLASOV_STENCIL_WIDTH; + const int bw2 = 2 * bw + VLASOV_STENCIL_WIDTH; + const int bw3 = 2 * bw2 + VLASOV_STENCIL_WIDTH; // Calculate regions for refinement if (P::amrMaxSpatialRefLevel > 0) { @@ -604,9 +605,9 @@ namespace projects { // L2 refinement. Does not touch a 5-cell thick (at L1) boundary layer. // This means a boundary width of 2 L0 cells and one L1 cell in between // as a buffer - for (uint i = 2*bw2; i < 2*(P::xcells_ini-bw2); ++i) { - for (uint j = 2*bw2; j < 2*(P::ycells_ini-bw2); ++j) { - for (uint k = 2*bw2; k < 2*(P::zcells_ini-bw2); ++k) { + for (uint i = bw2; i < 2*P::xcells_ini-bw2; ++i) { + for (uint j = bw2; j < 2*P::ycells_ini-bw2; ++j) { + for (uint k = bw2; k < 2*P::zcells_ini-bw2; ++k) { std::array xyz; xyz[0] = P::xmin + (i+0.5)*0.5*P::dx_ini; @@ -641,9 +642,9 @@ namespace projects { if (refine_L3radius < refine_L2radius && refine_L3radius > ionosphereRadius) { // L3 refinement. - for (uint i = 2*bw2; i < 4*P::xcells_ini-2*bw2; ++i) { - for (uint j = 2*bw2; j < 4*P::ycells_ini-2*bw2; ++j) { - for (uint k = 2*bw2; k < 4*P::zcells_ini-2*bw2; ++k) { + for (uint i = bw3; i < 4*P::xcells_ini-bw3; ++i) { + for (uint j = bw3; j < 4*P::ycells_ini-bw3; ++j) { + for (uint k = bw3; k < 4*P::zcells_ini-bw3; ++k) { std::array xyz; xyz[0] = P::xmin + (i+0.5)*0.25*P::dx_ini; From 15045bae1d4e6be7f7ef81931b69f9470ef6772b Mon Sep 17 00:00:00 2001 From: Tuomas Koskela Date: Thu, 20 Jun 2019 12:28:18 +0300 Subject: [PATCH 526/602] Added an upper limit check for the number of times cells appear in pencils. --- vlasovsolver/cpu_trans_map_amr.cpp | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/vlasovsolver/cpu_trans_map_amr.cpp b/vlasovsolver/cpu_trans_map_amr.cpp index cba9ff49b..00a2a706f 100644 --- a/vlasovsolver/cpu_trans_map_amr.cpp +++ b/vlasovsolver/cpu_trans_map_amr.cpp @@ -882,6 +882,27 @@ bool checkPencils(const dccrg::Dccrg& mpi } + for (int ipencil = 0; ipencil < pencils.N; ++ipencil) { + + auto ids = pencils.getIds(ipencil); + + for (auto id : ids) { + + int myCount = std::count(pencils.ids.begin(), pencils.ids.end(), id); + int nPencilsThroughThisCell = pow(pow(2,pencils.path[ipencil].size()),2); + + if (myCount > nPencilsThroughThisCell) { + + std::cerr << "ERROR: Cell ID " << id << " Appears in pencils " << myCount << " times!"<< std::endl; + std::cerr << " It should not appear more than " << nPencilsThroughThisCell << " times." << std::endl; + correct = false; + + } + + } + + } + return correct; } From 2d015c68adeeda6c2b9dfe8e7d9b6c3d63b2d740 Mon Sep 17 00:00:00 2001 From: Markus Battarbee Date: Wed, 26 Jun 2019 11:08:01 +0300 Subject: [PATCH 527/602] calculate X/Y/Z strides based on AMR level --- iowrite.cpp | 55 ++++++++++++++++++++++++++++++++--------------------- 1 file changed, 33 insertions(+), 22 deletions(-) diff --git a/iowrite.cpp b/iowrite.cpp index dd4f095a3..43244dd10 100644 --- a/iowrite.cpp +++ b/iowrite.cpp @@ -934,28 +934,39 @@ bool writeVelocitySpace(dccrg::Dccrg& mpi } // Cell lines selection // Determine cellID's 3D indices - lineX = (cells[i]-1) % P::xcells_ini; - lineY = ((cells[i]-1) / P::xcells_ini) % P::ycells_ini; - lineZ = ((cells[i]-1) /(P::xcells_ini * P::ycells_ini)) % P::zcells_ini; - // Check that indices are in correct intersection at least in one plane - if ((P::systemWriteDistributionWriteXlineStride[index] > 0 && - P::systemWriteDistributionWriteYlineStride[index] > 0 && - lineX % P::systemWriteDistributionWriteXlineStride[index] == 0 && - lineY % P::systemWriteDistributionWriteYlineStride[index] == 0) - && - (P::systemWriteDistributionWriteYlineStride[index] > 0 && - P::systemWriteDistributionWriteZlineStride[index] > 0 && - lineY % P::systemWriteDistributionWriteYlineStride[index] == 0 && - lineZ % P::systemWriteDistributionWriteZlineStride[index] == 0) - && - (P::systemWriteDistributionWriteZlineStride[index] > 0 && - P::systemWriteDistributionWriteXlineStride[index] > 0 && - lineZ % P::systemWriteDistributionWriteZlineStride[index] == 0 && - lineX % P::systemWriteDistributionWriteXlineStride[index] == 0) - ) { - velSpaceCells.push_back(cells[i]); - mpiGrid[cells[i]]->parameters[CellParams::ISCELLSAVINGF] = 1.0; - } + + // Loop over AMR levels + for (uint AMR = 0; AMR <= P::amrMaxSpatialRefLevel; AMR++) { + uint AMRP = AMR+1; + uint startindex = (AMR*P::xcells_ini)*(AMR*P::ycells_ini)*(AMR*P::zcells_ini)+1; + uint endindex = (AMRP*P::xcells_ini)*(AMRP*P::ycells_ini)*(AMRP*P::zcells_ini)+1; + + // If cell belongs to this AMR level, find indices + if ((cells[i]>=startindex)&&(cells[i] 0 && + P::systemWriteDistributionWriteYlineStride[index] > 0 && + lineX % P::systemWriteDistributionWriteXlineStride[index] == 0 && + lineY % P::systemWriteDistributionWriteYlineStride[index] == 0) + && + (P::systemWriteDistributionWriteYlineStride[index] > 0 && + P::systemWriteDistributionWriteZlineStride[index] > 0 && + lineY % P::systemWriteDistributionWriteYlineStride[index] == 0 && + lineZ % P::systemWriteDistributionWriteZlineStride[index] == 0) + && + (P::systemWriteDistributionWriteZlineStride[index] > 0 && + P::systemWriteDistributionWriteXlineStride[index] > 0 && + lineZ % P::systemWriteDistributionWriteZlineStride[index] == 0 && + lineX % P::systemWriteDistributionWriteXlineStride[index] == 0) + ) { + velSpaceCells.push_back(cells[i]); + mpiGrid[cells[i]]->parameters[CellParams::ISCELLSAVINGF] = 1.0; + } + } + } } uint64_t numVelSpaceCells; From 8861eeaf2ccb2f58d4094007bc578cdabb20630f Mon Sep 17 00:00:00 2001 From: Markus Battarbee Date: Wed, 26 Jun 2019 15:52:35 +0300 Subject: [PATCH 528/602] Correct AMR level sub-indexing --- iowrite.cpp | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/iowrite.cpp b/iowrite.cpp index 43244dd10..2f043c09a 100644 --- a/iowrite.cpp +++ b/iowrite.cpp @@ -936,16 +936,19 @@ bool writeVelocitySpace(dccrg::Dccrg& mpi // Determine cellID's 3D indices // Loop over AMR levels + uint startindex=1; + uint endindex=1; for (uint AMR = 0; AMR <= P::amrMaxSpatialRefLevel; AMR++) { - uint AMRP = AMR+1; - uint startindex = (AMR*P::xcells_ini)*(AMR*P::ycells_ini)*(AMR*P::zcells_ini)+1; - uint endindex = (AMRP*P::xcells_ini)*(AMRP*P::ycells_ini)*(AMRP*P::zcells_ini)+1; + uint AMRm = std::floor(std::pow(2,AMR)); + uint cellsthislevel = (AMRm*P::xcells_ini)*(AMRm*P::ycells_ini)*(AMRm*P::zcells_ini); + startindex = endindex; + endindex = endindex + cellsthislevel; // If cell belongs to this AMR level, find indices if ((cells[i]>=startindex)&&(cells[i] 0 && P::systemWriteDistributionWriteYlineStride[index] > 0 && From 90ccd686b8a4f912c7e5788bfb4f67455d28c3f9 Mon Sep 17 00:00:00 2001 From: Markus Battarbee Date: Fri, 28 Jun 2019 13:30:46 +0300 Subject: [PATCH 529/602] Replaced ionosphere L2 cell B-field nearest copy and normal map with a simple -1 neighbor copy --- sysboundary/ionosphere.cpp | 59 ++++++++++++++++++++++++-------------- 1 file changed, 37 insertions(+), 22 deletions(-) diff --git a/sysboundary/ionosphere.cpp b/sysboundary/ionosphere.cpp index 5ca7c7661..227eb9a90 100644 --- a/sysboundary/ionosphere.cpp +++ b/sysboundary/ionosphere.cpp @@ -584,30 +584,45 @@ namespace SBC { } // Otherwise: - // Sum perturbed B component over all nearest NOT_SYSBOUNDARY neighbours - std::array averageB = {{ 0.0 }}; - for (uint it = 0; it < closestCells.size(); it++) { - #ifdef DEBUG_IONOSPHERE - if (technicalGrid.get(closestCells[it][0],closestCells[it][1],closestCells[it][2])->sysBoundaryFlag != sysboundarytype::NOT_SYSBOUNDARY) { - stringstream ss; - ss << "ERROR, ionosphere cell (" << i << "," << j << "," << k << ") uses value from sysboundary nbr (" << closestCells[it][0] << "," << closestCells[it][1] << "," << closestCells[it][2] << " in " << __FILE__ << ":" << __LINE__ << endl; - cerr << ss.str(); - exit(1); - } - #endif - averageB[0] += bGrid->get(closestCells[it][0], closestCells[it][1], closestCells[it][2])->at(fsgrids::bfield::PERBX); - averageB[1] += bGrid->get(closestCells[it][0], closestCells[it][1], closestCells[it][2])->at(fsgrids::bfield::PERBY); - averageB[2] += bGrid->get(closestCells[it][0], closestCells[it][1], closestCells[it][2])->at(fsgrids::bfield::PERBZ); - } - - // Average and project to normal direction - std::array normalDirection = fieldSolverGetNormalDirection(technicalGrid, i, j, k); - for(uint i=0; i<3; i++) { - averageB[i] *= normalDirection[i] / closestCells.size(); +// // Sum perturbed B component over all nearest NOT_SYSBOUNDARY neighbours +// std::array averageB = {{ 0.0 }}; +// for (uint it = 0; it < closestCells.size(); it++) { +// #ifdef DEBUG_IONOSPHERE +// if (technicalGrid.get(closestCells[it][0],closestCells[it][1],closestCells[it][2])->sysBoundaryFlag != sysboundarytype::NOT_SYSBOUNDARY) { +// stringstream ss; +// ss << "ERROR, ionosphere cell (" << i << "," << j << "," << k << ") uses value from sysboundary nbr (" << closestCells[it][0] << "," << closestCells[it][1] << "," << closestCells[it][2] << " in " << __FILE__ << ":" << __LINE__ << endl; +// cerr << ss.str(); +// exit(1); +// } +// #endif +// averageB[0] += bGrid->get(closestCells[it][0], closestCells[it][1], closestCells[it][2])->at(fsgrids::bfield::PERBX); +// averageB[1] += bGrid->get(closestCells[it][0], closestCells[it][1], closestCells[it][2])->at(fsgrids::bfield::PERBY); +// averageB[2] += bGrid->get(closestCells[it][0], closestCells[it][1], closestCells[it][2])->at(fsgrids::bfield::PERBZ); +// } + +// // Average and project to normal direction +// std::array normalDirection = fieldSolverGetNormalDirection(technicalGrid, i, j, k); +// for(uint i=0; i<3; i++) { +// averageB[i] *= normalDirection[i] / closestCells.size(); +// } +// // Return (B.n)*normalVector[component] +// return (averageB[0]+averageB[1]+averageB[2])*normalDirection[component]; + + // Otherwise + // Copy each face B-field from the cell on the other side of it + switch(component) { + case 0: + return bGrid->get(i-1,j,k)->at(fsgrids::bfield::PERBX + component); + case 1: + return bGrid->get(i,j-1,k)->at(fsgrids::bfield::PERBX + component); + case 2: + return bGrid->get(i,j,k-1)->at(fsgrids::bfield::PERBX + component); + default: + cerr << "ERROR: ionosphere boundary tried to copy nonsensical magnetic field component " << component << endl; + break; } + - // Return (B.n)*normalVector[component] - return (averageB[0]+averageB[1]+averageB[2])*normalDirection[component]; } void Ionosphere::fieldSolverBoundaryCondElectricField( From ccaf0e055726440c15a87711ec30e5244ca09955 Mon Sep 17 00:00:00 2001 From: Markus Battarbee Date: Fri, 28 Jun 2019 15:25:45 +0300 Subject: [PATCH 530/602] level 1 boundary cells calculate E-field if it's neighboring a regular cell --- fieldsolver/ldz_electric_field.cpp | 129 +++++++++++++++++++---------- sysboundary/ionosphere.cpp | 51 ++++++------ 2 files changed, 109 insertions(+), 71 deletions(-) diff --git a/fieldsolver/ldz_electric_field.cpp b/fieldsolver/ldz_electric_field.cpp index 88ead4faf..5fec2dbb3 100644 --- a/fieldsolver/ldz_electric_field.cpp +++ b/fieldsolver/ldz_electric_field.cpp @@ -1533,56 +1533,93 @@ void calculateElectricField( cuint cellSysBoundaryLayer = technicalGrid.get(i,j,k)->sysBoundaryLayer; - if ((cellSysBoundaryFlag != sysboundarytype::NOT_SYSBOUNDARY) && (cellSysBoundaryLayer != 1)) { + if ((cellSysBoundaryFlag != sysboundarytype::NOT_SYSBOUNDARY) && (cellSysBoundaryLayer > 1)) { + // Sysboundary level 2+ cells sysBoundaries.getSysBoundary(cellSysBoundaryFlag)->fieldSolverBoundaryCondElectricField(EGrid, i, j, k, 0); sysBoundaries.getSysBoundary(cellSysBoundaryFlag)->fieldSolverBoundaryCondElectricField(EGrid, i, j, k, 1); sysBoundaries.getSysBoundary(cellSysBoundaryFlag)->fieldSolverBoundaryCondElectricField(EGrid, i, j, k, 2); } else { - calculateEdgeElectricFieldX( - perBGrid, - EGrid, - EHallGrid, - EGradPeGrid, - momentsGrid, - dPerBGrid, - dMomentsGrid, - BgBGrid, - technicalGrid, - i, - j, - k, - RKCase - ); - calculateEdgeElectricFieldY( - perBGrid, - EGrid, - EHallGrid, - EGradPeGrid, - momentsGrid, - dPerBGrid, - dMomentsGrid, - BgBGrid, - technicalGrid, - i, - j, - k, - RKCase - ); - calculateEdgeElectricFieldZ( - perBGrid, - EGrid, - EHallGrid, - EGradPeGrid, - momentsGrid, - dPerBGrid, - dMomentsGrid, - BgBGrid, - technicalGrid, - i, - j, - k, - RKCase - ); + // Regular cells + // OR level 1 cells whose Ex-component is adjacent to a regular cell + if((cellSysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY) || + ((cellSysBoundaryFlag != sysboundarytype::NOT_SYSBOUNDARY) && + (technicalGrid.get(i ,j-1,k )->sysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY || + technicalGrid.get(i ,j ,k-1)->sysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY || + technicalGrid.get(i ,j-1,k-1)->sysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY)) + ) { + calculateEdgeElectricFieldX( + perBGrid, + EGrid, + EHallGrid, + EGradPeGrid, + momentsGrid, + dPerBGrid, + dMomentsGrid, + BgBGrid, + technicalGrid, + i, + j, + k, + RKCase + ); + } else { + // level 1 cells whose Ex-component is not adjacent to a regular cell + sysBoundaries.getSysBoundary(cellSysBoundaryFlag)->fieldSolverBoundaryCondElectricField(EGrid, i, j, k, 0); + } + // Regular cells + // OR level 1 cells whose Ey-component is adjacent to a regular cell + if((cellSysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY) || + ((cellSysBoundaryFlag != sysboundarytype::NOT_SYSBOUNDARY) && + (technicalGrid.get(i-1,j ,k )->sysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY || + technicalGrid.get(i ,j ,k-1)->sysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY || + technicalGrid.get(i-1,j ,k-1)->sysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY)) + ) { + calculateEdgeElectricFieldY( + perBGrid, + EGrid, + EHallGrid, + EGradPeGrid, + momentsGrid, + dPerBGrid, + dMomentsGrid, + BgBGrid, + technicalGrid, + i, + j, + k, + RKCase + ); + } else { + // level 1 cells whose Ey-component is not adjacent to a regular cell + sysBoundaries.getSysBoundary(cellSysBoundaryFlag)->fieldSolverBoundaryCondElectricField(EGrid, i, j, k, 1); + } + // Regular cells + // OR level 1 cells whose Ey-component is adjacent to a regular cell + if((cellSysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY) || + ((cellSysBoundaryFlag != sysboundarytype::NOT_SYSBOUNDARY) && + (technicalGrid.get(i-1,j ,k )->sysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY || + technicalGrid.get(i ,j-1,k )->sysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY || + technicalGrid.get(i-1,j-1,k )->sysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY)) + ) { + calculateEdgeElectricFieldZ( + perBGrid, + EGrid, + EHallGrid, + EGradPeGrid, + momentsGrid, + dPerBGrid, + dMomentsGrid, + BgBGrid, + technicalGrid, + i, + j, + k, + RKCase + ); + } else { + // level 1 cells whose Ez-component is not adjacent to a regular cell + sysBoundaries.getSysBoundary(cellSysBoundaryFlag)->fieldSolverBoundaryCondElectricField(EGrid, i, j, k, 2); + } } } diff --git a/sysboundary/ionosphere.cpp b/sysboundary/ionosphere.cpp index 227eb9a90..9e39a6690 100644 --- a/sysboundary/ionosphere.cpp +++ b/sysboundary/ionosphere.cpp @@ -584,31 +584,32 @@ namespace SBC { } // Otherwise: -// // Sum perturbed B component over all nearest NOT_SYSBOUNDARY neighbours -// std::array averageB = {{ 0.0 }}; -// for (uint it = 0; it < closestCells.size(); it++) { -// #ifdef DEBUG_IONOSPHERE -// if (technicalGrid.get(closestCells[it][0],closestCells[it][1],closestCells[it][2])->sysBoundaryFlag != sysboundarytype::NOT_SYSBOUNDARY) { -// stringstream ss; -// ss << "ERROR, ionosphere cell (" << i << "," << j << "," << k << ") uses value from sysboundary nbr (" << closestCells[it][0] << "," << closestCells[it][1] << "," << closestCells[it][2] << " in " << __FILE__ << ":" << __LINE__ << endl; -// cerr << ss.str(); -// exit(1); -// } -// #endif -// averageB[0] += bGrid->get(closestCells[it][0], closestCells[it][1], closestCells[it][2])->at(fsgrids::bfield::PERBX); -// averageB[1] += bGrid->get(closestCells[it][0], closestCells[it][1], closestCells[it][2])->at(fsgrids::bfield::PERBY); -// averageB[2] += bGrid->get(closestCells[it][0], closestCells[it][1], closestCells[it][2])->at(fsgrids::bfield::PERBZ); -// } - -// // Average and project to normal direction -// std::array normalDirection = fieldSolverGetNormalDirection(technicalGrid, i, j, k); -// for(uint i=0; i<3; i++) { -// averageB[i] *= normalDirection[i] / closestCells.size(); -// } -// // Return (B.n)*normalVector[component] -// return (averageB[0]+averageB[1]+averageB[2])*normalDirection[component]; - - // Otherwise + // Sum perturbed B component over all nearest NOT_SYSBOUNDARY neighbours + /**** + std::array averageB = {{ 0.0 }}; + for (uint it = 0; it < closestCells.size(); it++) { + #ifdef DEBUG_IONOSPHERE + if (technicalGrid.get(closestCells[it][0],closestCells[it][1],closestCells[it][2])->sysBoundaryFlag != sysboundarytype::NOT_SYSBOUNDARY) { + stringstream ss; + ss << "ERROR, ionosphere cell (" << i << "," << j << "," << k << ") uses value from sysboundary nbr (" << closestCells[it][0] << "," << closestCells[it][1] << "," << closestCells[it][2] << " in " << __FILE__ << ":" << __LINE__ << endl; + cerr << ss.str(); + exit(1); + } + #endif + averageB[0] += bGrid->get(closestCells[it][0], closestCells[it][1], closestCells[it][2])->at(fsgrids::bfield::PERBX); + averageB[1] += bGrid->get(closestCells[it][0], closestCells[it][1], closestCells[it][2])->at(fsgrids::bfield::PERBY); + averageB[2] += bGrid->get(closestCells[it][0], closestCells[it][1], closestCells[it][2])->at(fsgrids::bfield::PERBZ); + } + + // Average and project to normal direction + std::array normalDirection = fieldSolverGetNormalDirection(technicalGrid, i, j, k); + for(uint i=0; i<3; i++) { + averageB[i] *= normalDirection[i] / closestCells.size(); + } + // Return (B.n)*normalVector[component] + return (averageB[0]+averageB[1]+averageB[2])*normalDirection[component]; + ***/ + // Copy each face B-field from the cell on the other side of it switch(component) { case 0: From 4db632eef6037b7711a97bc29f579c7c8fe44b19 Mon Sep 17 00:00:00 2001 From: Markus Battarbee Date: Fri, 28 Jun 2019 16:09:47 +0300 Subject: [PATCH 531/602] Testing L3 refinement region setup --- iowrite.cpp | 58 +++--- projects/Magnetosphere/Magnetosphere.cpp | 221 +++++++++++------------ vlasovsolver/cpu_trans_map_amr.cpp | 21 +++ 3 files changed, 167 insertions(+), 133 deletions(-) diff --git a/iowrite.cpp b/iowrite.cpp index dd4f095a3..2f043c09a 100644 --- a/iowrite.cpp +++ b/iowrite.cpp @@ -934,28 +934,42 @@ bool writeVelocitySpace(dccrg::Dccrg& mpi } // Cell lines selection // Determine cellID's 3D indices - lineX = (cells[i]-1) % P::xcells_ini; - lineY = ((cells[i]-1) / P::xcells_ini) % P::ycells_ini; - lineZ = ((cells[i]-1) /(P::xcells_ini * P::ycells_ini)) % P::zcells_ini; - // Check that indices are in correct intersection at least in one plane - if ((P::systemWriteDistributionWriteXlineStride[index] > 0 && - P::systemWriteDistributionWriteYlineStride[index] > 0 && - lineX % P::systemWriteDistributionWriteXlineStride[index] == 0 && - lineY % P::systemWriteDistributionWriteYlineStride[index] == 0) - && - (P::systemWriteDistributionWriteYlineStride[index] > 0 && - P::systemWriteDistributionWriteZlineStride[index] > 0 && - lineY % P::systemWriteDistributionWriteYlineStride[index] == 0 && - lineZ % P::systemWriteDistributionWriteZlineStride[index] == 0) - && - (P::systemWriteDistributionWriteZlineStride[index] > 0 && - P::systemWriteDistributionWriteXlineStride[index] > 0 && - lineZ % P::systemWriteDistributionWriteZlineStride[index] == 0 && - lineX % P::systemWriteDistributionWriteXlineStride[index] == 0) - ) { - velSpaceCells.push_back(cells[i]); - mpiGrid[cells[i]]->parameters[CellParams::ISCELLSAVINGF] = 1.0; - } + + // Loop over AMR levels + uint startindex=1; + uint endindex=1; + for (uint AMR = 0; AMR <= P::amrMaxSpatialRefLevel; AMR++) { + uint AMRm = std::floor(std::pow(2,AMR)); + uint cellsthislevel = (AMRm*P::xcells_ini)*(AMRm*P::ycells_ini)*(AMRm*P::zcells_ini); + startindex = endindex; + endindex = endindex + cellsthislevel; + + // If cell belongs to this AMR level, find indices + if ((cells[i]>=startindex)&&(cells[i] 0 && + P::systemWriteDistributionWriteYlineStride[index] > 0 && + lineX % P::systemWriteDistributionWriteXlineStride[index] == 0 && + lineY % P::systemWriteDistributionWriteYlineStride[index] == 0) + && + (P::systemWriteDistributionWriteYlineStride[index] > 0 && + P::systemWriteDistributionWriteZlineStride[index] > 0 && + lineY % P::systemWriteDistributionWriteYlineStride[index] == 0 && + lineZ % P::systemWriteDistributionWriteZlineStride[index] == 0) + && + (P::systemWriteDistributionWriteZlineStride[index] > 0 && + P::systemWriteDistributionWriteXlineStride[index] > 0 && + lineZ % P::systemWriteDistributionWriteZlineStride[index] == 0 && + lineX % P::systemWriteDistributionWriteXlineStride[index] == 0) + ) { + velSpaceCells.push_back(cells[i]); + mpiGrid[cells[i]]->parameters[CellParams::ISCELLSAVINGF] = 1.0; + } + } + } } uint64_t numVelSpaceCells; diff --git a/projects/Magnetosphere/Magnetosphere.cpp b/projects/Magnetosphere/Magnetosphere.cpp index 235c15e4c..ce6a5c4d5 100644 --- a/projects/Magnetosphere/Magnetosphere.cpp +++ b/projects/Magnetosphere/Magnetosphere.cpp @@ -561,122 +561,121 @@ namespace projects { // cout << "I am at line " << __LINE__ << " of " << __FILE__ << endl; if(myRank == MASTER_RANK) std::cout << "Maximum refinement level is " << mpiGrid.mapping.get_maximum_refinement_level() << std::endl; - const int bw = 2 * VLASOV_STENCIL_WIDTH; - const int bw2 = 2 * bw + VLASOV_STENCIL_WIDTH; - const int bw3 = 2 * bw2 + VLASOV_STENCIL_WIDTH; - - // Calculate regions for refinement - if (P::amrMaxSpatialRefLevel > 0) { - - // L1 refinement. Does not touch a 2-cell thick (at L0) boundary layer. - for (uint i = bw; i < P::xcells_ini-bw; ++i) { - for (uint j = bw; j < P::ycells_ini-bw; ++j) { - for (uint k = bw; k < P::zcells_ini-bw; ++k) { - - std::array xyz; - xyz[0] = P::xmin + (i+0.5)*P::dx_ini; - xyz[1] = P::ymin + (j+0.5)*P::dy_ini; - xyz[2] = P::zmin + (k+0.5)*P::dz_ini; - - Real radius2 = (xyz[0]*xyz[0]+xyz[1]*xyz[1]+xyz[2]*xyz[2]); - // Check if cell is within L1 sphere, or within L1 tail slice - if ((radius2 < refine_L1radius*refine_L1radius) || + // Leave boundary cells and a bit of safety margin + const int bw = VLASOV_STENCIL_WIDTH + 2; + const int bw2 = 2*bw + 2; + const int bw3 = 2*bw2 + 2; + + // Calculate regions for refinement + if (P::amrMaxSpatialRefLevel > 0) { + + // L1 refinement. + for (uint i = bw; i < P::xcells_ini-bw; ++i) { + for (uint j = bw; j < P::ycells_ini-bw; ++j) { + for (uint k = bw; k < P::zcells_ini-bw; ++k) { + + std::array xyz; + xyz[0] = P::xmin + (i+0.5)*P::dx_ini; + xyz[1] = P::ymin + (j+0.5)*P::dy_ini; + xyz[2] = P::zmin + (k+0.5)*P::dz_ini; + + Real radius2 = (xyz[0]*xyz[0]+xyz[1]*xyz[1]+xyz[2]*xyz[2]); + // Check if cell is within L1 sphere, or within L1 tail slice + if ((radius2 < refine_L1radius*refine_L1radius) || ((xyz[0] < 0) && (std::abs(xyz[1]) < refine_L1radius) && - (std::abs(xyz[2]) 0) { - std::cout << "Rank " << myRank << " refined " << refinedCells.size() << " cells. " << std::endl; - } + if(refinedCells.size() > 0) { + std::cout << "Rank " << myRank << " refined " << refinedCells.size() << " cells. " << std::endl; + } #endif - mpiGrid.balance_load(); - } - - if (P::amrMaxSpatialRefLevel > 1) { - - // L2 refinement. Does not touch a 5-cell thick (at L1) boundary layer. - // This means a boundary width of 2 L0 cells and one L1 cell in between - // as a buffer - for (uint i = bw2; i < 2*P::xcells_ini-bw2; ++i) { - for (uint j = bw2; j < 2*P::ycells_ini-bw2; ++j) { - for (uint k = bw2; k < 2*P::zcells_ini-bw2; ++k) { - - std::array xyz; - xyz[0] = P::xmin + (i+0.5)*0.5*P::dx_ini; - xyz[1] = P::ymin + (j+0.5)*0.5*P::dy_ini; - xyz[2] = P::zmin + (k+0.5)*0.5*P::dz_ini; - - Real radius2 = (xyz[0]*xyz[0]+xyz[1]*xyz[1]+xyz[2]*xyz[2]); - // Check if cell is within L1 sphere, or within L1 tail slice - if ((radius2 < refine_L2radius*refine_L2radius) || - ((xyz[0] < 0) && (std::abs(xyz[1]) < refine_L2radius) && - (std::abs(xyz[2]) 1) { + + // L2 refinement. + for (uint i = bw2; i < 2*P::xcells_ini-bw2; ++i) { + for (uint j = bw2; j < 2*P::ycells_ini-bw2; ++j) { + for (uint k = bw2; k < 2*P::zcells_ini-bw2; ++k) { + + std::array xyz; + xyz[0] = P::xmin + (i+0.5)*0.5*P::dx_ini; + xyz[1] = P::ymin + (j+0.5)*0.5*P::dy_ini; + xyz[2] = P::zmin + (k+0.5)*0.5*P::dz_ini; + + Real radius2 = (xyz[0]*xyz[0]+xyz[1]*xyz[1]+xyz[2]*xyz[2]); + // Check if cell is within L1 sphere, or within L1 tail slice + if ((radius2 < refine_L2radius*refine_L2radius) || + ((xyz[0] < 0) && (std::abs(xyz[1]) < refine_L2radius) && + (std::abs(xyz[2]) 0) { - std::cout << "Rank " << myRank << " refined " << refinedCells.size() << " cells. " << std::endl; - } + if(refinedCells.size() > 0) { + std::cout << "Rank " << myRank << " refined " << refinedCells.size() << " cells. " << std::endl; + } #endif - - mpiGrid.balance_load(); - } - - if (P::amrMaxSpatialRefLevel > 2) { - - if (refine_L3radius < refine_L2radius && refine_L3radius > ionosphereRadius) { - // L3 refinement. - for (uint i = bw3; i < 4*P::xcells_ini-bw3; ++i) { - for (uint j = bw3; j < 4*P::ycells_ini-bw3; ++j) { - for (uint k = bw3; k < 4*P::zcells_ini-bw3; ++k) { - - std::array xyz; - xyz[0] = P::xmin + (i+0.5)*0.25*P::dx_ini; - xyz[1] = P::ymin + (j+0.5)*0.25*P::dy_ini; - xyz[2] = P::zmin + (k+0.5)*0.25*P::dz_ini; - - Real radius2 = (xyz[0]*xyz[0]+xyz[1]*xyz[1]+xyz[2]*xyz[2]); - // Check if cell is within L1 sphere, or within L1 tail slice - if (radius2 < refine_L3radius*refine_L3radius) - { - CellID myCell = mpiGrid.get_existing_cell(xyz); - // Check if the cell is tagged as do not compute - mpiGrid.refine_completely(myCell); - } - } - } - } - refinedCells = mpiGrid.stop_refining(true); - if(myRank == MASTER_RANK) std::cout << "Finished third level of refinement" << endl; - #ifndef NDEBUG - if(refinedCells.size() > 0) { - std::cout << "Rank " << myRank << " refined " << refinedCells.size() << " cells. " << std::endl; - } - #endif - - mpiGrid.balance_load(); - } else { - std::cout << "Skipping third level of refinement because the radius is larger than the 2nd level radius or smaller than the ionosphere radius." << std::endl; - } - } - - return true; + + mpiGrid.balance_load(); + } + + if (P::amrMaxSpatialRefLevel > 2) { + + if (refine_L3radius < refine_L2radius && refine_L3radius > ionosphereRadius) { + // L3 refinement. + for (uint i = bw3; i < 4*P::xcells_ini-bw3; ++i) { + for (uint j = bw3; j < 4*P::ycells_ini-bw3; ++j) { + for (uint k = bw3; k < 4*P::zcells_ini-bw3; ++k) { + + std::array xyz; + xyz[0] = P::xmin + (i+0.5)*0.25*P::dx_ini; + xyz[1] = P::ymin + (j+0.5)*0.25*P::dy_ini; + xyz[2] = P::zmin + (k+0.5)*0.25*P::dz_ini; + + Real radius2 = (xyz[0]*xyz[0]+xyz[1]*xyz[1]+xyz[2]*xyz[2]); + // Check if cell is within L1 sphere, or within L1 tail slice + if (radius2 < refine_L3radius*refine_L3radius) + { + CellID myCell = mpiGrid.get_existing_cell(xyz); + // Check if the cell is tagged as do not compute + mpiGrid.refine_completely(myCell); + } + } + } + } + refinedCells = mpiGrid.stop_refining(true); + if(myRank == MASTER_RANK) std::cout << "Finished third level of refinement" << endl; +#ifndef NDEBUG + if(refinedCells.size() > 0) { + std::cout << "Rank " << myRank << " refined " << refinedCells.size() << " cells. " << std::endl; + } +#endif + + mpiGrid.balance_load(); + } else { + std::cout << "Skipping third level of refinement because the radius is larger than the 2nd level radius or smaller than the ionosphere radius." << std::endl; + } + } + + return true; } } // namespace projects diff --git a/vlasovsolver/cpu_trans_map_amr.cpp b/vlasovsolver/cpu_trans_map_amr.cpp index cba9ff49b..00a2a706f 100644 --- a/vlasovsolver/cpu_trans_map_amr.cpp +++ b/vlasovsolver/cpu_trans_map_amr.cpp @@ -882,6 +882,27 @@ bool checkPencils(const dccrg::Dccrg& mpi } + for (int ipencil = 0; ipencil < pencils.N; ++ipencil) { + + auto ids = pencils.getIds(ipencil); + + for (auto id : ids) { + + int myCount = std::count(pencils.ids.begin(), pencils.ids.end(), id); + int nPencilsThroughThisCell = pow(pow(2,pencils.path[ipencil].size()),2); + + if (myCount > nPencilsThroughThisCell) { + + std::cerr << "ERROR: Cell ID " << id << " Appears in pencils " << myCount << " times!"<< std::endl; + std::cerr << " It should not appear more than " << nPencilsThroughThisCell << " times." << std::endl; + correct = false; + + } + + } + + } + return correct; } From 17ebc92c5014a5605272514394f7a78a91d5f1e6 Mon Sep 17 00:00:00 2001 From: Markus Battarbee Date: Tue, 2 Jul 2019 18:32:08 +0300 Subject: [PATCH 532/602] refixed looping limits --- projects/Magnetosphere/Magnetosphere.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/projects/Magnetosphere/Magnetosphere.cpp b/projects/Magnetosphere/Magnetosphere.cpp index ce6a5c4d5..787417666 100644 --- a/projects/Magnetosphere/Magnetosphere.cpp +++ b/projects/Magnetosphere/Magnetosphere.cpp @@ -562,9 +562,9 @@ namespace projects { if(myRank == MASTER_RANK) std::cout << "Maximum refinement level is " << mpiGrid.mapping.get_maximum_refinement_level() << std::endl; // Leave boundary cells and a bit of safety margin - const int bw = VLASOV_STENCIL_WIDTH + 2; - const int bw2 = 2*bw + 2; - const int bw3 = 2*bw2 + 2; + const int bw = 2* VLASOV_STENCIL_WIDTH; + const int bw2 = 2*(bw + VLASOV_STENCIL_WIDTH); + const int bw3 = 2*(bw2 + VLASOV_STENCIL_WIDTH); // Calculate regions for refinement if (P::amrMaxSpatialRefLevel > 0) { From 94ebecccc02b6f5ed8bd3efb5854edb2dd0a7aac Mon Sep 17 00:00:00 2001 From: Markus Battarbee Date: Tue, 2 Jul 2019 18:51:28 +0300 Subject: [PATCH 533/602] implemented L3 nose and tail boxes --- projects/Magnetosphere/Magnetosphere.cpp | 70 ++++++++++++++++++++++-- projects/Magnetosphere/Magnetosphere.h | 10 +++- 2 files changed, 73 insertions(+), 7 deletions(-) diff --git a/projects/Magnetosphere/Magnetosphere.cpp b/projects/Magnetosphere/Magnetosphere.cpp index 787417666..921a11446 100644 --- a/projects/Magnetosphere/Magnetosphere.cpp +++ b/projects/Magnetosphere/Magnetosphere.cpp @@ -54,7 +54,15 @@ namespace projects { RP::add("Magnetosphere.dipoleType","0: Normal 3D dipole, 1: line-dipole for 2D polar simulations, 2: line-dipole with mirror, 3: 3D dipole with mirror", 0); RP::add("Magnetosphere.dipoleMirrorLocationX","x-coordinate of dipole Mirror", -1.0); - RP::add("Magnetosphere.refine_L3radius","Radius of L3-refined sphere", 6.371e7); // 10 RE + //RP::add("Magnetosphere.refine_L3radius","Radius of L3-refined sphere", 6.371e7); // 10 RE + RP::add("Magnetosphere.refine_L3_nosewidth","Width of nose L3-refined box, in y and x", 5.0e7); // 10 RE + RP::add("Magnetosphere.refine_L3_nosexmin","Low x-value of nose L3-refined box", 5.0e7); // + RP::add("Magnetosphere.refine_L3_nosexmax","High x-value of nose L3-refined box", 10.0e7); // + RP::add("Magnetosphere.refine_L3_tailheight","Height in +-z of tail L3-refined box", 1.0e7); // + RP::add("Magnetosphere.refine_L3_tailwidth","Width in +-y of tail L3-refined box", 5.0e7); // 10 RE + RP::add("Magnetosphere.refine_L3_tailxmin","Low x-value of tail L3-refined box", -20.0e7); // 10 RE + RP::add("Magnetosphere.refine_L3_tailxmax","High x-value of tail L3-refined box", -5.0e7); // 10 RE + RP::add("Magnetosphere.refine_L2radius","Radius of L2-refined sphere", 9.5565e7); // 15 RE RP::add("Magnetosphere.refine_L2tailthick","Thickness of L2-refined tail region", 3.1855e7); // 5 RE RP::add("Magnetosphere.refine_L1radius","Radius of L1-refined sphere", 1.59275e8); // 25 RE @@ -142,10 +150,39 @@ namespace projects { } - if(!Readparameters::get("Magnetosphere.refine_L3radius", this->refine_L3radius)) { +// if(!Readparameters::get("Magnetosphere.refine_L3radius", this->refine_L3radius)) { +// if(myRank == MASTER_RANK) cerr << __FILE__ << ":" << __LINE__ << " ERROR: This option has not been added!" << endl; +// exit(1); +// } + if(!Readparameters::get("Magnetosphere.refine_L3nosewidth", this->refine_L3nosewidth)) { + if(myRank == MASTER_RANK) cerr << __FILE__ << ":" << __LINE__ << " ERROR: This option has not been added!" << endl; + exit(1); + } + if(!Readparameters::get("Magnetosphere.refine_L3nosexmin", this->refine_L3nosexmin)) { + if(myRank == MASTER_RANK) cerr << __FILE__ << ":" << __LINE__ << " ERROR: This option has not been added!" << endl; + exit(1); + } + if(!Readparameters::get("Magnetosphere.refine_L3nosexmax", this->refine_L3nosexmax)) { if(myRank == MASTER_RANK) cerr << __FILE__ << ":" << __LINE__ << " ERROR: This option has not been added!" << endl; exit(1); } + if(!Readparameters::get("Magnetosphere.refine_L3tailwidth", this->refine_L3tailwidth)) { + if(myRank == MASTER_RANK) cerr << __FILE__ << ":" << __LINE__ << " ERROR: This option has not been added!" << endl; + exit(1); + } + if(!Readparameters::get("Magnetosphere.refine_L3tailheight", this->refine_L3tailheight)) { + if(myRank == MASTER_RANK) cerr << __FILE__ << ":" << __LINE__ << " ERROR: This option has not been added!" << endl; + exit(1); + } + if(!Readparameters::get("Magnetosphere.refine_L3tailxmin", this->refine_L3tailxmin)) { + if(myRank == MASTER_RANK) cerr << __FILE__ << ":" << __LINE__ << " ERROR: This option has not been added!" << endl; + exit(1); + } + if(!Readparameters::get("Magnetosphere.refine_L3tailxmax", this->refine_L3tailxmax)) { + if(myRank == MASTER_RANK) cerr << __FILE__ << ":" << __LINE__ << " ERROR: This option has not been added!" << endl; + exit(1); + } + if(!Readparameters::get("Magnetosphere.refine_L2radius", this->refine_L2radius)) { if(myRank == MASTER_RANK) cerr << __FILE__ << ":" << __LINE__ << " ERROR: This option has not been added!" << endl; exit(1); @@ -650,15 +687,36 @@ namespace projects { xyz[1] = P::ymin + (j+0.5)*0.25*P::dy_ini; xyz[2] = P::zmin + (k+0.5)*0.25*P::dz_ini; - Real radius2 = (xyz[0]*xyz[0]+xyz[1]*xyz[1]+xyz[2]*xyz[2]); - // Check if cell is within L1 sphere, or within L1 tail slice - if (radius2 < refine_L3radius*refine_L3radius) +// Real radius2 = (xyz[0]*xyz[0]+xyz[1]*xyz[1]+xyz[2]*xyz[2]); +// // Check if cell is within L1 sphere, or within L1 tail slice +// if (radius2 < refine_L3radius*refine_L3radius) +// { +// CellID myCell = mpiGrid.get_existing_cell(xyz); +// // Check if the cell is tagged as do not compute +// mpiGrid.refine_completely(myCell); +// } + + // Check if cell is within the nose box + if ((xyz[0]>refine_L3_nosexmin) && (xyz[0]refine_L3_tailxmin) && (xyz[0] Date: Tue, 2 Jul 2019 19:27:13 +0300 Subject: [PATCH 534/602] parameter typos and ond L3radius if --- projects/Magnetosphere/Magnetosphere.cpp | 30 ++++++++++++------------ projects/Magnetosphere/Magnetosphere.h | 14 +++++------ 2 files changed, 22 insertions(+), 22 deletions(-) diff --git a/projects/Magnetosphere/Magnetosphere.cpp b/projects/Magnetosphere/Magnetosphere.cpp index 921a11446..2f23a688e 100644 --- a/projects/Magnetosphere/Magnetosphere.cpp +++ b/projects/Magnetosphere/Magnetosphere.cpp @@ -55,13 +55,13 @@ namespace projects { RP::add("Magnetosphere.dipoleMirrorLocationX","x-coordinate of dipole Mirror", -1.0); //RP::add("Magnetosphere.refine_L3radius","Radius of L3-refined sphere", 6.371e7); // 10 RE - RP::add("Magnetosphere.refine_L3_nosewidth","Width of nose L3-refined box, in y and x", 5.0e7); // 10 RE - RP::add("Magnetosphere.refine_L3_nosexmin","Low x-value of nose L3-refined box", 5.0e7); // - RP::add("Magnetosphere.refine_L3_nosexmax","High x-value of nose L3-refined box", 10.0e7); // - RP::add("Magnetosphere.refine_L3_tailheight","Height in +-z of tail L3-refined box", 1.0e7); // - RP::add("Magnetosphere.refine_L3_tailwidth","Width in +-y of tail L3-refined box", 5.0e7); // 10 RE - RP::add("Magnetosphere.refine_L3_tailxmin","Low x-value of tail L3-refined box", -20.0e7); // 10 RE - RP::add("Magnetosphere.refine_L3_tailxmax","High x-value of tail L3-refined box", -5.0e7); // 10 RE + RP::add("Magnetosphere.refine_L3nosewidth","Width of nose L3-refined box, in y and x", 5.0e7); // 10 RE + RP::add("Magnetosphere.refine_L3nosexmin","Low x-value of nose L3-refined box", 5.0e7); // + RP::add("Magnetosphere.refine_L3nosexmax","High x-value of nose L3-refined box", 10.0e7); // + RP::add("Magnetosphere.refine_L3tailheight","Height in +-z of tail L3-refined box", 1.0e7); // + RP::add("Magnetosphere.refine_L3tailwidth","Width in +-y of tail L3-refined box", 5.0e7); // 10 RE + RP::add("Magnetosphere.refine_L3tailxmin","Low x-value of tail L3-refined box", -20.0e7); // 10 RE + RP::add("Magnetosphere.refine_L3tailxmax","High x-value of tail L3-refined box", -5.0e7); // 10 RE RP::add("Magnetosphere.refine_L2radius","Radius of L2-refined sphere", 9.5565e7); // 15 RE RP::add("Magnetosphere.refine_L2tailthick","Thickness of L2-refined tail region", 3.1855e7); // 5 RE @@ -676,7 +676,7 @@ namespace projects { if (P::amrMaxSpatialRefLevel > 2) { - if (refine_L3radius < refine_L2radius && refine_L3radius > ionosphereRadius) { + // if (refine_L3radius < refine_L2radius && refine_L3radius > ionosphereRadius) { // L3 refinement. for (uint i = bw3; i < 4*P::xcells_ini-bw3; ++i) { for (uint j = bw3; j < 4*P::ycells_ini-bw3; ++j) { @@ -697,8 +697,8 @@ namespace projects { // } // Check if cell is within the nose box - if ((xyz[0]>refine_L3_nosexmin) && (xyz[0]refine_L3nosexmin) && (xyz[0]refine_L3_tailxmin) && (xyz[0]refine_L3tailxmin) && (xyz[0] Date: Tue, 2 Jul 2019 19:34:02 +0300 Subject: [PATCH 535/602] removed volumetric e-fields --- datareduction/datareducer.cpp | 38 ----------------------------------- parameters.cpp | 2 -- 2 files changed, 40 deletions(-) diff --git a/datareduction/datareducer.cpp b/datareduction/datareducer.cpp index 214158a87..3c4a20f74 100644 --- a/datareduction/datareducer.cpp +++ b/datareduction/datareducer.cpp @@ -574,44 +574,6 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti } continue; } - if(lowercase == "vole" || lowercase == "vg_vole" || lowercase == "evol" || lowercase == "vg_e_vol" || lowercase == "e_vol") { - // Volume-averaged E field - outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("vg_e_vol",CellParams::EXVOL,3)); - outputReducer->addMetadata(outputReducer->size()-1,"V/m","$\\mathrm{V}\\,\\mathrm{m}^{-1}$","$E_\\mathrm{vol,vg}$","1.0"); - continue; - } - if(lowercase == "fg_vole" || lowercase == "fg_e_vol" || lowercase == "fg_evol") { - outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fg_e_vol",[]( - FsGrid< std::array, 2>& perBGrid, - FsGrid< std::array, 2>& EGrid, - FsGrid< std::array, 2>& EHallGrid, - FsGrid< std::array, 2>& EGradPeGrid, - FsGrid< std::array, 2>& momentsGrid, - FsGrid< std::array, 2>& dPerBGrid, - FsGrid< std::array, 2>& dMomentsGrid, - FsGrid< std::array, 2>& BgBGrid, - FsGrid< std::array, 2>& volGrid, - FsGrid< fsgrids::technical, 2>& technicalGrid)->std::vector { - - std::array& gridSize = technicalGrid.getLocalSize(); - std::vector retval(gridSize[0]*gridSize[1]*gridSize[2]*3); - - // Iterate through fsgrid cells and extract EVOL - for(int z=0; zaddMetadata(outputReducer->size()-1,"V/m","$\\mathrm{V}\\,\\mathrm{m}^{-1}$","$E_\\mathrm{vol,fg}$","1.0"); - continue; - } if(lowercase == "halle" || lowercase == "fg_halle" || lowercase == "fg_e_hall") { for(int index=0; index Date: Wed, 3 Jul 2019 14:56:35 +0300 Subject: [PATCH 536/602] changed nose box to spherical cap --- projects/Magnetosphere/Magnetosphere.cfg | 8 +++++++ projects/Magnetosphere/Magnetosphere.cpp | 27 ++++++------------------ projects/Magnetosphere/Magnetosphere.h | 4 +--- 3 files changed, 16 insertions(+), 23 deletions(-) diff --git a/projects/Magnetosphere/Magnetosphere.cfg b/projects/Magnetosphere/Magnetosphere.cfg index dbaa760f4..19abe396b 100644 --- a/projects/Magnetosphere/Magnetosphere.cfg +++ b/projects/Magnetosphere/Magnetosphere.cfg @@ -96,6 +96,14 @@ constBgBY = 0.0 constBgBZ = -1.0e-9 noDipoleInSW = 0.0 + +refine_L3radius = 6.371e7 # 10 RE +refine_L3nosexmin = 5.0e7 +refine_L3tailheight = 1.0e7 +refine_L3tailwidth = 5.0e7 +refine_L3tailxmin = -40.0e7 +refine_L3tailxmax = -5.0e7 + refine_L2radius = 9.5565e7 # 15 RE refine_L2tailthick = 3.1855e7 # 5 RE refine_L1radius = 1.59275e8 # 25 RE diff --git a/projects/Magnetosphere/Magnetosphere.cpp b/projects/Magnetosphere/Magnetosphere.cpp index 2f23a688e..9a6d2e058 100644 --- a/projects/Magnetosphere/Magnetosphere.cpp +++ b/projects/Magnetosphere/Magnetosphere.cpp @@ -54,10 +54,8 @@ namespace projects { RP::add("Magnetosphere.dipoleType","0: Normal 3D dipole, 1: line-dipole for 2D polar simulations, 2: line-dipole with mirror, 3: 3D dipole with mirror", 0); RP::add("Magnetosphere.dipoleMirrorLocationX","x-coordinate of dipole Mirror", -1.0); - //RP::add("Magnetosphere.refine_L3radius","Radius of L3-refined sphere", 6.371e7); // 10 RE - RP::add("Magnetosphere.refine_L3nosewidth","Width of nose L3-refined box, in y and x", 5.0e7); // 10 RE + RP::add("Magnetosphere.refine_L3radius","Radius of L3-refined sphere", 6.371e7); // 10 RE RP::add("Magnetosphere.refine_L3nosexmin","Low x-value of nose L3-refined box", 5.0e7); // - RP::add("Magnetosphere.refine_L3nosexmax","High x-value of nose L3-refined box", 10.0e7); // RP::add("Magnetosphere.refine_L3tailheight","Height in +-z of tail L3-refined box", 1.0e7); // RP::add("Magnetosphere.refine_L3tailwidth","Width in +-y of tail L3-refined box", 5.0e7); // 10 RE RP::add("Magnetosphere.refine_L3tailxmin","Low x-value of tail L3-refined box", -20.0e7); // 10 RE @@ -150,11 +148,7 @@ namespace projects { } -// if(!Readparameters::get("Magnetosphere.refine_L3radius", this->refine_L3radius)) { -// if(myRank == MASTER_RANK) cerr << __FILE__ << ":" << __LINE__ << " ERROR: This option has not been added!" << endl; -// exit(1); -// } - if(!Readparameters::get("Magnetosphere.refine_L3nosewidth", this->refine_L3nosewidth)) { + if(!Readparameters::get("Magnetosphere.refine_L3radius", this->refine_L3radius)) { if(myRank == MASTER_RANK) cerr << __FILE__ << ":" << __LINE__ << " ERROR: This option has not been added!" << endl; exit(1); } @@ -162,10 +156,6 @@ namespace projects { if(myRank == MASTER_RANK) cerr << __FILE__ << ":" << __LINE__ << " ERROR: This option has not been added!" << endl; exit(1); } - if(!Readparameters::get("Magnetosphere.refine_L3nosexmax", this->refine_L3nosexmax)) { - if(myRank == MASTER_RANK) cerr << __FILE__ << ":" << __LINE__ << " ERROR: This option has not been added!" << endl; - exit(1); - } if(!Readparameters::get("Magnetosphere.refine_L3tailwidth", this->refine_L3tailwidth)) { if(myRank == MASTER_RANK) cerr << __FILE__ << ":" << __LINE__ << " ERROR: This option has not been added!" << endl; exit(1); @@ -677,7 +667,7 @@ namespace projects { if (P::amrMaxSpatialRefLevel > 2) { // if (refine_L3radius < refine_L2radius && refine_L3radius > ionosphereRadius) { - // L3 refinement. + // L3 refinement. for (uint i = bw3; i < 4*P::xcells_ini-bw3; ++i) { for (uint j = bw3; j < 4*P::ycells_ini-bw3; ++j) { for (uint k = bw3; k < 4*P::zcells_ini-bw3; ++k) { @@ -687,7 +677,7 @@ namespace projects { xyz[1] = P::ymin + (j+0.5)*0.25*P::dy_ini; xyz[2] = P::zmin + (k+0.5)*0.25*P::dz_ini; -// Real radius2 = (xyz[0]*xyz[0]+xyz[1]*xyz[1]+xyz[2]*xyz[2]); + Real radius2 = (xyz[0]*xyz[0]+xyz[1]*xyz[1]+xyz[2]*xyz[2]); // // Check if cell is within L1 sphere, or within L1 tail slice // if (radius2 < refine_L3radius*refine_L3radius) // { @@ -696,14 +686,12 @@ namespace projects { // mpiGrid.refine_completely(myCell); // } - // Check if cell is within the nose box - if ((xyz[0]>refine_L3nosexmin) && (xyz[0]refine_L3nosexmin) && (radius2 Date: Tue, 9 Jul 2019 14:41:59 +0300 Subject: [PATCH 537/602] removed accidental complaint messages, switched to just one final load balance --- projects/Magnetosphere/Magnetosphere.cpp | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/projects/Magnetosphere/Magnetosphere.cpp b/projects/Magnetosphere/Magnetosphere.cpp index 9a6d2e058..c80a46529 100644 --- a/projects/Magnetosphere/Magnetosphere.cpp +++ b/projects/Magnetosphere/Magnetosphere.cpp @@ -54,7 +54,7 @@ namespace projects { RP::add("Magnetosphere.dipoleType","0: Normal 3D dipole, 1: line-dipole for 2D polar simulations, 2: line-dipole with mirror, 3: 3D dipole with mirror", 0); RP::add("Magnetosphere.dipoleMirrorLocationX","x-coordinate of dipole Mirror", -1.0); - RP::add("Magnetosphere.refine_L3radius","Radius of L3-refined sphere", 6.371e7); // 10 RE + RP::add("Magnetosphere.refine_L3radius","Radius of L3-refined sphere or cap", 6.371e7); // 10 RE RP::add("Magnetosphere.refine_L3nosexmin","Low x-value of nose L3-refined box", 5.0e7); // RP::add("Magnetosphere.refine_L3tailheight","Height in +-z of tail L3-refined box", 1.0e7); // RP::add("Magnetosphere.refine_L3tailwidth","Width in +-y of tail L3-refined box", 5.0e7); // 10 RE @@ -625,7 +625,7 @@ namespace projects { std::cout << "Rank " << myRank << " refined " << refinedCells.size() << " cells. " << std::endl; } #endif - mpiGrid.balance_load(); + //mpiGrid.balance_load(); } if (P::amrMaxSpatialRefLevel > 1) { @@ -661,12 +661,10 @@ namespace projects { } #endif - mpiGrid.balance_load(); + //mpiGrid.balance_load(); } if (P::amrMaxSpatialRefLevel > 2) { - - // if (refine_L3radius < refine_L2radius && refine_L3radius > ionosphereRadius) { // L3 refinement. for (uint i = bw3; i < 4*P::xcells_ini-bw3; ++i) { for (uint j = bw3; j < 4*P::ycells_ini-bw3; ++j) { @@ -714,12 +712,14 @@ namespace projects { } #endif - mpiGrid.balance_load(); - } else { - std::cout << "Skipping third level of refinement because the radius is larger than the 2nd level radius or smaller than the ionosphere radius." << std::endl; - } - //} - + //mpiGrid.balance_load(); + } + + // Do load balance only once at end + if (P::amrMaxSpatialRefLevel > 0) { + mpiGrid.balance_load(); + } + return true; } From af928e2948d6dce018dd68df8cd9641da3ce73fe Mon Sep 17 00:00:00 2001 From: Markus Battarbee Date: Thu, 18 Jul 2019 14:45:52 +0300 Subject: [PATCH 538/602] added L4 refined nose cap --- projects/Magnetosphere/Magnetosphere.cpp | 49 ++++++++++++++++++++++++ projects/Magnetosphere/Magnetosphere.h | 3 ++ 2 files changed, 52 insertions(+) diff --git a/projects/Magnetosphere/Magnetosphere.cpp b/projects/Magnetosphere/Magnetosphere.cpp index c80a46529..7b076992a 100644 --- a/projects/Magnetosphere/Magnetosphere.cpp +++ b/projects/Magnetosphere/Magnetosphere.cpp @@ -54,6 +54,9 @@ namespace projects { RP::add("Magnetosphere.dipoleType","0: Normal 3D dipole, 1: line-dipole for 2D polar simulations, 2: line-dipole with mirror, 3: 3D dipole with mirror", 0); RP::add("Magnetosphere.dipoleMirrorLocationX","x-coordinate of dipole Mirror", -1.0); + RP::add("Magnetosphere.refine_L4radius","Radius of L3-refined sphere or cap", 6.0e7); + RP::add("Magnetosphere.refine_L4nosexmin","Low x-value of nose L3-refined box", 5.5.0e7); + RP::add("Magnetosphere.refine_L3radius","Radius of L3-refined sphere or cap", 6.371e7); // 10 RE RP::add("Magnetosphere.refine_L3nosexmin","Low x-value of nose L3-refined box", 5.0e7); // RP::add("Magnetosphere.refine_L3tailheight","Height in +-z of tail L3-refined box", 1.0e7); // @@ -148,6 +151,15 @@ namespace projects { } + if(!Readparameters::get("Magnetosphere.refine_L4radius", this->refine_L4radius)) { + if(myRank == MASTER_RANK) cerr << __FILE__ << ":" << __LINE__ << " ERROR: This option has not been added!" << endl; + exit(1); + } + if(!Readparameters::get("Magnetosphere.refine_L4nosexmin", this->refine_L4nosexmin)) { + if(myRank == MASTER_RANK) cerr << __FILE__ << ":" << __LINE__ << " ERROR: This option has not been added!" << endl; + exit(1); + } + if(!Readparameters::get("Magnetosphere.refine_L3radius", this->refine_L3radius)) { if(myRank == MASTER_RANK) cerr << __FILE__ << ":" << __LINE__ << " ERROR: This option has not been added!" << endl; exit(1); @@ -592,6 +604,7 @@ namespace projects { const int bw = 2* VLASOV_STENCIL_WIDTH; const int bw2 = 2*(bw + VLASOV_STENCIL_WIDTH); const int bw3 = 2*(bw2 + VLASOV_STENCIL_WIDTH); + const int bw4 = 2*(bw3 + VLASOV_STENCIL_WIDTH); // Calculate regions for refinement if (P::amrMaxSpatialRefLevel > 0) { @@ -715,6 +728,42 @@ namespace projects { //mpiGrid.balance_load(); } + if (P::amrMaxSpatialRefLevel > 3) { + // L4 refinement. + for (uint i = bw4; i < 8*P::xcells_ini-bw4; ++i) { + for (uint j = bw4; j < 8*P::ycells_ini-bw4; ++j) { + for (uint k = bw4; k < 8*P::zcells_ini-bw4; ++k) { + + std::array xyz; + xyz[0] = P::xmin + (i+0.5)*0.125*P::dx_ini; + xyz[1] = P::ymin + (j+0.5)*0.125*P::dy_ini; + xyz[2] = P::zmin + (k+0.5)*0.125*P::dz_ini; + + Real radius2 = (xyz[0]*xyz[0]+xyz[1]*xyz[1]+xyz[2]*xyz[2]); + + // Check if cell is within the nose cap + if ((xyz[0]>refine_L4nosexmin) && (radius2 0) { + std::cout << "Rank " << myRank << " refined " << refinedCells.size() << " cells. " << std::endl; + } +#endif + + //mpiGrid.balance_load(); + } + + // Do load balance only once at end if (P::amrMaxSpatialRefLevel > 0) { mpiGrid.balance_load(); diff --git a/projects/Magnetosphere/Magnetosphere.h b/projects/Magnetosphere/Magnetosphere.h index 352196a80..d6978cc9e 100644 --- a/projects/Magnetosphere/Magnetosphere.h +++ b/projects/Magnetosphere/Magnetosphere.h @@ -85,6 +85,9 @@ namespace projects { Real dipoleMirrorLocationX; uint dipoleType; + Real refine_L4radius; + Real refine_L4nosexmin; + Real refine_L3radius; Real refine_L3nosexmin; Real refine_L3tailheight; From 3db8a8478e76032362ae76b2b7e5ca53ee4808b2 Mon Sep 17 00:00:00 2001 From: Markus Battarbee Date: Thu, 18 Jul 2019 14:56:01 +0300 Subject: [PATCH 539/602] fix typo --- projects/Magnetosphere/Magnetosphere.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/projects/Magnetosphere/Magnetosphere.cpp b/projects/Magnetosphere/Magnetosphere.cpp index 7b076992a..e5f751ac0 100644 --- a/projects/Magnetosphere/Magnetosphere.cpp +++ b/projects/Magnetosphere/Magnetosphere.cpp @@ -55,7 +55,7 @@ namespace projects { RP::add("Magnetosphere.dipoleMirrorLocationX","x-coordinate of dipole Mirror", -1.0); RP::add("Magnetosphere.refine_L4radius","Radius of L3-refined sphere or cap", 6.0e7); - RP::add("Magnetosphere.refine_L4nosexmin","Low x-value of nose L3-refined box", 5.5.0e7); + RP::add("Magnetosphere.refine_L4nosexmin","Low x-value of nose L3-refined box", 5.5e7); RP::add("Magnetosphere.refine_L3radius","Radius of L3-refined sphere or cap", 6.371e7); // 10 RE RP::add("Magnetosphere.refine_L3nosexmin","Low x-value of nose L3-refined box", 5.0e7); // From 5ab34d3b8fcbe183f1dc703cdb87a7d8986e467f Mon Sep 17 00:00:00 2001 From: Markus Battarbee Date: Mon, 22 Jul 2019 14:33:31 +0300 Subject: [PATCH 540/602] added refinement to IPShock project --- projects/IPShock/IPShock.cfg | 4 + projects/IPShock/IPShock.cpp | 138 +++++++++++++++++++++++++++++++++++ projects/IPShock/IPShock.h | 5 ++ 3 files changed, 147 insertions(+) diff --git a/projects/IPShock/IPShock.cfg b/projects/IPShock/IPShock.cfg index 4d88dfb4a..06b6db385 100644 --- a/projects/IPShock/IPShock.cfg +++ b/projects/IPShock/IPShock.cfg @@ -136,6 +136,10 @@ BX0d = 3.93058e-09 BY0d = 0.00000 BZ0d = 1.1553350e-08 Width = 1.0e6 + +AMR_L1width = 3.0e6 +AMR_L2width = 1.5e6 + [proton_IPShock] VX0u = -250000. VY0u = 0.00000 diff --git a/projects/IPShock/IPShock.cpp b/projects/IPShock/IPShock.cpp index 26c54042b..09e610a5c 100644 --- a/projects/IPShock/IPShock.cpp +++ b/projects/IPShock/IPShock.cpp @@ -63,6 +63,11 @@ namespace projects { RP::add("IPShock.BZ0d", "Downstream mag. field value (T)", 3.0e-9); RP::add("IPShock.Width", "Shock Width (m)", 50000); + RP::add("IPShock.AMR_L1width", "L1 AMR region width (m)", 0); + RP::add("IPShock.AMR_L2width", "L2 AMR region width (m)", 0); + RP::add("IPShock.AMR_L3width", "L3 AMR region width (m)", 0); + RP::add("IPShock.AMR_L4width", "L4 AMR region width (m)", 0); + // Per-population parameters for(uint i=0; i< getObjectWrapper().particleSpecies.size(); i++) { const std::string& pop = getObjectWrapper().particleSpecies[i].name; @@ -120,6 +125,23 @@ namespace projects { exit(1); } + if(!RP::get("IPShock.AMR_L1width", this->AMR_L1width)) { + if(myRank == MASTER_RANK) std::cerr << __FILE__ << ":" << __LINE__ << " ERROR: This option has not been added!" << std::endl; + exit(1); + } + if(!RP::get("IPShock.AMR_L2width", this->AMR_L2width)) { + if(myRank == MASTER_RANK) std::cerr << __FILE__ << ":" << __LINE__ << " ERROR: This option has not been added!" << std::endl; + exit(1); + } + if(!RP::get("IPShock.AMR_L3width", this->AMR_L3width)) { + if(myRank == MASTER_RANK) std::cerr << __FILE__ << ":" << __LINE__ << " ERROR: This option has not been added!" << std::endl; + exit(1); + } + if(!RP::get("IPShock.AMR_L4width", this->AMR_L4width)) { + if(myRank == MASTER_RANK) std::cerr << __FILE__ << ":" << __LINE__ << " ERROR: This option has not been added!" << std::endl; + exit(1); + } + // Per-population parameters for(uint i=0; i< getObjectWrapper().particleSpecies.size(); i++) { const std::string& pop = getObjectWrapper().particleSpecies[i].name; @@ -496,4 +518,120 @@ namespace projects { } } + + bool IPShock::refineSpatialCells( dccrg::Dccrg& mpiGrid ) const { + + int myRank; + MPI_Comm_rank(MPI_COMM_WORLD,&myRank); + + std::vector refinedCells; + + if(myRank == MASTER_RANK) std::cout << "Maximum refinement level is " << mpiGrid.mapping.get_maximum_refinement_level() << std::endl; + + // Leave boundary cells and a bit of safety margin +// const int bw = 2* VLASOV_STENCIL_WIDTH; +// const int bw2 = 2*(bw + VLASOV_STENCIL_WIDTH); +// const int bw3 = 2*(bw2 + VLASOV_STENCIL_WIDTH); + + // Calculate regions for refinement + if (P::amrMaxSpatialRefLevel > 0) { + // L1 refinement. + for (uint i = 0; i < P::xcells_ini; ++i) { + for (uint j = 0; j < P::ycells_ini; ++j) { + for (uint k = 0; k < P::zcells_ini; ++k) { + + std::array xyz; + xyz[0] = P::xmin + (i+0.5)*P::dx_ini; + xyz[1] = P::ymin + (j+0.5)*P::dy_ini; + xyz[2] = P::zmin + (k+0.5)*P::dz_ini; + + if (abs(xyz[0]) < AMR_L1width) + { + CellID myCell = mpiGrid.get_existing_cell(xyz); + mpiGrid.refine_completely(myCell); + } + } + } + } + refinedCells = mpiGrid.stop_refining(true); + if(myRank == MASTER_RANK) std::cout << "Finished first level of refinement" << endl; + mpiGrid.balance_load(); + } + + if (P::amrMaxSpatialRefLevel > 1) { + // L2 refinement. + for (uint i = 0; i < 2*P::xcells_ini; ++i) { + for (uint j = 0; j < 2*P::ycells_ini; ++j) { + for (uint k = 0; k < 2*P::zcells_ini; ++k) { + + std::array xyz; + xyz[0] = P::xmin + (i+0.5)*0.5*P::dx_ini; + xyz[1] = P::ymin + (j+0.5)*0.5*P::dy_ini; + xyz[2] = P::zmin + (k+0.5)*0.5*P::dz_ini; + + if (abs(xyz[0]) < AMR_L2width) + { + CellID myCell = mpiGrid.get_existing_cell(xyz); + mpiGrid.refine_completely(myCell); + } + } + } + } + refinedCells = mpiGrid.stop_refining(true); + if(myRank == MASTER_RANK) std::cout << "Finished second level of refinement" << endl; + mpiGrid.balance_load(); + } + + if (P::amrMaxSpatialRefLevel > 2) { + // L3 refinement. + for (uint i = 0; i < 4*P::xcells_ini; ++i) { + for (uint j = 0; j < 4*P::ycells_ini; ++j) { + for (uint k = 0; k < 4*P::zcells_ini; ++k) { + + std::array xyz; + xyz[0] = P::xmin + (i+0.5)*0.25*P::dx_ini; + xyz[1] = P::ymin + (j+0.5)*0.25*P::dy_ini; + xyz[2] = P::zmin + (k+0.5)*0.25*P::dz_ini; + + if (abs(xyz[0]) < AMR_L3width) + { + CellID myCell = mpiGrid.get_existing_cell(xyz); + mpiGrid.refine_completely(myCell); + } + } + } + } + refinedCells = mpiGrid.stop_refining(true); + if(myRank == MASTER_RANK) std::cout << "Finished third level of refinement" << endl; + mpiGrid.balance_load(); + } + + if (P::amrMaxSpatialRefLevel > 3) { + // L4 refinement. + for (uint i = 0; i < 8*P::xcells_ini; ++i) { + for (uint j = 0; j < 8*P::ycells_ini; ++j) { + for (uint k = 0; k < 8*P::zcells_ini; ++k) { + + std::array xyz; + xyz[0] = P::xmin + (i+0.5)*0.125*P::dx_ini; + xyz[1] = P::ymin + (j+0.5)*0.125*P::dy_ini; + xyz[2] = P::zmin + (k+0.5)*0.125*P::dz_ini; + + if (abs(xyz[0]) < AMR_L4width) + { + CellID myCell = mpiGrid.get_existing_cell(xyz); + mpiGrid.refine_completely(myCell); + } + } + } + } + refinedCells = mpiGrid.stop_refining(true); + if(myRank == MASTER_RANK) std::cout << "Finished fourth level of refinement" << endl; + mpiGrid.balance_load(); + } + + + return true; + } + }//namespace projects diff --git a/projects/IPShock/IPShock.h b/projects/IPShock/IPShock.h index e4fd60566..aae3e3b4b 100644 --- a/projects/IPShock/IPShock.h +++ b/projects/IPShock/IPShock.h @@ -87,6 +87,7 @@ namespace projects { virtual std::vector> getV0(creal x, creal y, creal z, const uint popID) const; //virtual void calcCellParameters(Real* cellParams,creal& t); virtual void calcCellParameters(spatial_cell::SpatialCell* cell, creal& t); + bool refineSpatialCells( dccrg::Dccrg& mpiGrid ) const; // Interpolate between up- and downstream quantities // based on position Real interpolate(Real u, Real d, Real x) const; @@ -107,6 +108,10 @@ namespace projects { int Bzdsign; Real Shockwidth; + Real AMR_L1width; + Real AMR_L2width; + Real AMR_L3width; + Real AMR_L4width; std::vector speciesParams; From a6f1daf057697635d267f455a7c39a3ec846ae7c Mon Sep 17 00:00:00 2001 From: Urs Ganse Date: Tue, 20 Aug 2019 11:16:11 +0300 Subject: [PATCH 541/602] Fix for Taito makefile to compile with openmpi 3 --- MAKE/Makefile.taito_gcc | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/MAKE/Makefile.taito_gcc b/MAKE/Makefile.taito_gcc index b5021553f..b8c4dd268 100644 --- a/MAKE/Makefile.taito_gcc +++ b/MAKE/Makefile.taito_gcc @@ -1,5 +1,5 @@ -CMP = mpiCC -LNK = mpiCC +CMP = mpic++ +LNK = mpic++ #======== Vectorization ========== #Set vector backend type for vlasov solvers, sets precision and length. @@ -50,7 +50,7 @@ CXXFLAGS += -O3 -fopenmp -funroll-loops -std=c++0x -W -Wall -Wno-unused -fabi-ve testpackage: CXXFLAGS = -O2 -fopenmp -funroll-loops -std=c++0x -fabi-version=0 -mavx MATHFLAGS = -ffast-math -LDFLAGS = -lrt +LDFLAGS = -lrt -lgfortran LIB_MPI = -lgomp # BOOST_VERSION = current trilinos version @@ -58,7 +58,7 @@ LIB_MPI = -lgomp # #======== Libraries =========== -MPT_VERSION = 1.10.2 +MPT_VERSION = 3.1.3 JEMALLOC_VERSION = 4.0.4 LIBRARY_PREFIX = /proj/vlasov/libraries @@ -72,12 +72,14 @@ LIB_ZOLTAN = -L$(LIBRARY_PREFIX)/taito/openmpi/$(MPT_VERSION)/$(CC_BRAND)/$(CC_B INC_JEMALLOC = -I$(LIBRARY_PREFIX)/taito/openmpi/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/jemalloc/$(JEMALLOC_VERSION)/include LIB_JEMALLOC = -L$(LIBRARY_PREFIX)/taito/openmpi/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/jemalloc/$(JEMALLOC_VERSION)/lib -ljemalloc +LDFLAGS += -Wl,-rpath=$(LIBRARY_PREFIX)/taito/openmpi/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/jemalloc/$(JEMALLOC_VERSION)/lib INC_VLSV = -I$(LIBRARY_PREFIX)/taito/openmpi/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/vlsv LIB_VLSV = -L$(LIBRARY_PREFIX)/taito/openmpi/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/vlsv -lvlsv LIB_PROFILE = -L$(LIBRARY_PREFIX)/taito/openmpi/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/phiprof/2.0/lib -lphiprof INC_PROFILE = -I$(LIBRARY_PREFIX)/taito/openmpi/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/phiprof/2.0/include +LDFLAGS += -Wl,-rpath=$(LIBRARY_PREFIX)/taito/openmpi/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/phiprof/2.0/lib #LIB_PAPI = -L$(LIBRARY_PREFIX)/taito/openmpi/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/papi/5.5.0/lib -lpapi #INC_PAPI = -I$(LIBRARY_PREFIX)/taito/openmpi/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/papi/5.5.0/include From 785d387fed2913817b94f0176bfe3ffdb250f6e8 Mon Sep 17 00:00:00 2001 From: Urs Ganse Date: Tue, 20 Aug 2019 11:17:41 +0300 Subject: [PATCH 542/602] Particle-pusher: Replace old MPI-2 C++ interface by C interface. --- particles/histogram.h | 7 ++++--- particles/particle_post_pusher.cpp | 5 +++-- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/particles/histogram.h b/particles/histogram.h index ffa056b31..542c49f42 100644 --- a/particles/histogram.h +++ b/particles/histogram.h @@ -49,7 +49,8 @@ class Histogram1D return; } - MPI::COMM_WORLD.Allreduce(bins,targetbins,num_bins,MPI::DOUBLE,MPI_SUM); + MPI_Allreduce(bins,targetbins,num_bins,MPI_DOUBLE,MPI_SUM, MPI_COMM_WORLD); + /* Throw away the old bins. */ delete[] bins; @@ -150,7 +151,7 @@ class Histogram2D return; } - MPI::COMM_WORLD.Allreduce(bins, targetbins, num_bins[0] * num_bins[1], MPI::DOUBLE, MPI_SUM); + MPI_Allreduce(bins,targetbins, num_bins[0] * num_bins[1], MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); /* Throw away the old bins. */ delete[] bins; @@ -398,7 +399,7 @@ class Histogram3D return; } - MPI::COMM_WORLD.Allreduce(bins, targetbins, num_bins[0] * num_bins[1] * num_bins[2], MPI::FLOAT, MPI_SUM); + MPI_Allreduce(bins, targetbins, num_bins[0] * num_bins[1] * num_bins[2], MPI_FLOAT, MPI_SUM, MPI_COMM_WORLD); /* Throw away the old bins. */ delete[] bins; diff --git a/particles/particle_post_pusher.cpp b/particles/particle_post_pusher.cpp index f3258ac52..7ed3e1b6b 100644 --- a/particles/particle_post_pusher.cpp +++ b/particles/particle_post_pusher.cpp @@ -39,7 +39,7 @@ int main(int argc, char** argv) { - MPI::Init(argc, argv); + MPI_Init(&argc, &argv); /* Parse commandline and config*/ Readparameters parameters(argc, argv, MPI_COMM_WORLD); @@ -47,6 +47,7 @@ int main(int argc, char** argv) { parameters.parse(false); // Parse parameters and don't require run_config if(!ParticleParameters::getParameters()) { std::cerr << "Parsing parameters failed, aborting." << std::endl; + std::cerr << "Did you add a --run_config=file.cfg parameter?" << std::endl; return 1; } @@ -176,6 +177,6 @@ int main(int argc, char** argv) { std::cerr << std::endl; - MPI::Finalize(); + MPI_Finalize(); return 0; } From e652ee3a6f6e8b4c59cd5d75189bc05e7a534d70 Mon Sep 17 00:00:00 2001 From: Urs Ganse Date: Tue, 20 Aug 2019 13:17:15 +0300 Subject: [PATCH 543/602] Taito can't do -mavx2, changed to -mavx --- MAKE/Makefile.taito_gcc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/MAKE/Makefile.taito_gcc b/MAKE/Makefile.taito_gcc index b8c4dd268..d9c8f684f 100644 --- a/MAKE/Makefile.taito_gcc +++ b/MAKE/Makefile.taito_gcc @@ -46,7 +46,7 @@ FLAGS = #GNU flags: CC_BRAND = gcc CC_BRAND_VERSION = 4.9.3 -CXXFLAGS += -O3 -fopenmp -funroll-loops -std=c++0x -W -Wall -Wno-unused -fabi-version=0 -mavx2 +CXXFLAGS += -O3 -fopenmp -funroll-loops -std=c++0x -W -Wall -Wno-unused -fabi-version=0 -mavx testpackage: CXXFLAGS = -O2 -fopenmp -funroll-loops -std=c++0x -fabi-version=0 -mavx MATHFLAGS = -ffast-math From 05869e2504a64d36d03aeab4e0745deb8859ec2f Mon Sep 17 00:00:00 2001 From: Urs Ganse Date: Tue, 20 Aug 2019 13:19:59 +0300 Subject: [PATCH 544/602] Particles: Allow rhoV, rho and V field names to be specified by the user. This allows both old an new simulation input files to be used, and also allows the population to be selected in multipop simulation runs. --- particles/particleparameters.cpp | 13 +++++++-- particles/particleparameters.h | 3 +++ particles/particles.cfg | 6 ++++- particles/readfields.cpp | 1 + particles/readfields.h | 46 ++++++++++++++++++++------------ 5 files changed, 49 insertions(+), 20 deletions(-) diff --git a/particles/particleparameters.cpp b/particles/particleparameters.cpp index dac68c8a5..2d45abcda 100644 --- a/particles/particleparameters.cpp +++ b/particles/particleparameters.cpp @@ -40,6 +40,9 @@ Real P::input_dt = 1; Real P::start_time = 0; Real P::end_time = 0; uint64_t P::num_particles = 0; +std::string P::V_field_name = "V"; +std::string P::rho_field_name = "rho"; +bool P::divide_rhov_by_rho = false; std::default_random_engine::result_type P::random_seed = 1; Distribution* (*P::distribution)(std::default_random_engine&) = NULL; @@ -88,6 +91,9 @@ bool ParticleParameters::addParameters() { Readparameters::add("particles.start_time", "Simulation time (seconds) for particle start.",0); Readparameters::add("particles.end_time", "Simulation time (seconds) at which particle simulation stops.",0); Readparameters::add("particles.num_particles", "Number of particles to simulate.",10000); + Readparameters::add("particles.V_field_name", "Name of the Velocity data set in the input files", "V"); + Readparameters::add("particles.rho_field_name", "Name of the Density data set in the input files", "rho"); + Readparameters::add("particles.divide_rhov_by_rho", "Do the input file store rho_v and rho separately?", false); Readparameters::add("particles.random_seed", "Random seed for particle creation.",1); Readparameters::add("particles.distribution", "Type of distribution function to sample particles from.", std::string("maxwell")); @@ -161,10 +167,13 @@ bool ParticleParameters::getParameters() { Readparameters::get("particles.start_time",P::start_time); Readparameters::get("particles.end_time",P::end_time); Readparameters::get("particles.num_particles",P::num_particles); - if(P::dt == 0 || P::end_time <= P::start_time) { - std::cerr << "Error end_time <= start_time! Won't do anything (and will probably crash now)." << std::endl; + if(P::dt == 0 || P::end_time == P::start_time) { + std::cerr << "Error end_time == start_time! Won't do anything (and will probably crash now)." << std::endl; return false; } + Readparameters::get("particles.V_field_name",P::V_field_name); + Readparameters::get("particles.rho_field_name",P::rho_field_name); + Readparameters::get("particles.divide_rhov_by_rho",P::divide_rhov_by_rho); Readparameters::get("particles.random_seed",P::random_seed); diff --git a/particles/particleparameters.h b/particles/particleparameters.h index 30e1361aa..2f0c1a31b 100644 --- a/particles/particleparameters.h +++ b/particles/particleparameters.h @@ -45,6 +45,9 @@ struct ParticleParameters { static Real input_dt; /*!< Time interval between input files */ static uint64_t num_particles; /*!< Number of particles to generate */ + static std::string V_field_name; /*!< Name of the Velocity data set to read */ + static std::string rho_field_name; /*!< Name of the Density data set to read */ + static bool divide_rhov_by_rho; /*!< Does the file store rho_v and rho separately? */ static Boundary* boundary_behaviour_x; /*!< What to do with particles that reach the x boundary */ static Boundary* boundary_behaviour_y; /*!< What to do with particles that reach the y boundary */ diff --git a/particles/particles.cfg b/particles/particles.cfg index 0761d60d8..02bda4ad1 100644 --- a/particles/particles.cfg +++ b/particles/particles.cfg @@ -4,7 +4,11 @@ # with an integer argument counting up the input files. # Typical vlasiator output files have the form "bulk.%07i.vlsv" #input_filename_pattern = /lustre/tmp/alfthan/2D/sisu_equatorial_7/bulk.%07i.vlsv -input_filename_pattern = /tmp/lustre/tmp/alfthan/2D/ABC/bulk/bulk.%07i.vlsv +input_filename_pattern = /proj/vlasov/2D/ABC/bulk/bulk.%07i.vlsv + +V_field_name = rho_v +rho_field_name = rho +divide_rhov_by_rho = 1 # Output filename pattern, similar format as before. output_filename_pattern = particles.%07i.vlsv diff --git a/particles/readfields.cpp b/particles/readfields.cpp index a95a3fc46..be66add31 100644 --- a/particles/readfields.cpp +++ b/particles/readfields.cpp @@ -27,6 +27,7 @@ #include "readfields.h" #include "vectorclass.h" #include "vector3d.h" +#include "particleparameters.h" #include "../definitions.h" /* Debugging image output */ diff --git a/particles/readfields.h b/particles/readfields.h index 6972e4fa2..4d10594db 100644 --- a/particles/readfields.h +++ b/particles/readfields.h @@ -33,6 +33,8 @@ extern std::string B_field_name; extern std::string E_field_name; +extern std::string V_field_name; +extern bool do_divide_by_rho; /* Read the cellIDs into an array */ std::vector readCellIds(vlsvinterface::Reader& r); @@ -143,14 +145,16 @@ bool readNextTimestep(const std::string& filename_pattern, double t, int step, F std::vector Ebuffer = readFieldData(r,name,3u); std::vector Vbuffer; if(doV) { - name = "rho_v"; + name = ParticleParameters::V_field_name; std::vector rho_v_buffer = readFieldData(r,name,3u); - name = "rho"; - std::vector rho_buffer = readFieldData(r,name,1u); - for(unsigned int i=0; i rho_buffer = readFieldData(r,name,1u); + for(unsigned int i=0; i rho_v_buffer,rho_buffer; if(doV) { - name = "rho_v"; + name = ParticleParameters::V_field_name; rho_v_buffer = readFieldData(r,name,3u); - name = "rho"; - rho_buffer = readFieldData(r,name,1u); + if(ParticleParameters::divide_rhov_by_rho) { + name = ParticleParameters::rho_field_name; + rho_buffer = readFieldData(r,name,1u); + } } /* Coordinate Boundaries */ @@ -253,9 +259,9 @@ void readfields(const char* filename, Field& E, Field& B, Field& V, bool doV=tru /* Allocate space for the actual field structures */ E.data.resize(4*cells[0]*cells[1]*cells[2]); B.data.resize(4*cells[0]*cells[1]*cells[2]); - if(doV) { - V.data.resize(4*cells[0]*cells[1]*cells[2]); - } + if(doV) { + V.data.resize(4*cells[0]*cells[1]*cells[2]); + } /* Sanity-check stored data sizes */ if(3*cellIds.size() != Bbuffer.size()) { @@ -274,7 +280,7 @@ void readfields(const char* filename, Field& E, Field& B, Field& V, bool doV=tru << std::endl; exit(1); } - if(cellIds.size() != rho_buffer.size()) { + if(ParticleParameters::divide_rhov_by_rho && cellIds.size() != rho_buffer.size()) { std::cerr << "cellIDs.size (" << cellIds.size() << ") != rho_buffer.size (" << Ebuffer.size() << ")!" << std::endl; exit(1); @@ -318,9 +324,15 @@ void readfields(const char* filename, Field& E, Field& B, Field& V, bool doV=tru if(doV) { double* Vtgt = V.getCellRef(x,y,z); - Vtgt[0] = rho_v_buffer[3*i] / rho_buffer[i]; - Vtgt[1] = rho_v_buffer[3*i+1] / rho_buffer[i]; - Vtgt[2] = rho_v_buffer[3*i+2] / rho_buffer[i]; + if(ParticleParameters::divide_rhov_by_rho) { + Vtgt[0] = rho_v_buffer[3*i] / rho_buffer[i]; + Vtgt[1] = rho_v_buffer[3*i+1] / rho_buffer[i]; + Vtgt[2] = rho_v_buffer[3*i+2] / rho_buffer[i]; + } else { + Vtgt[0] = rho_v_buffer[3*i]; + Vtgt[1] = rho_v_buffer[3*i+1]; + Vtgt[2] = rho_v_buffer[3*i+2]; + } } } From 4430120d783e9422eb727bf554e6b99c22f26f46 Mon Sep 17 00:00:00 2001 From: Urs Ganse Date: Tue, 20 Aug 2019 13:32:54 +0300 Subject: [PATCH 545/602] Particle pusher: flip B if dt is negative --- particles/particle_post_pusher.cpp | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/particles/particle_post_pusher.cpp b/particles/particle_post_pusher.cpp index 7ed3e1b6b..0f9510eb4 100644 --- a/particles/particle_post_pusher.cpp +++ b/particles/particle_post_pusher.cpp @@ -137,6 +137,11 @@ int main(int argc, char** argv) { Eval = cur_E(particles[i].x); Bval = cur_B(particles[i].x); + if(dt < 0) { + // If propagating backwards in time, flip B-field pseudovector + Bval *= -1; + } + /* Push them around */ particles[i].push(Bval,Eval,dt); From 3961851551e175db62d214f253d2d8875b1bfd80 Mon Sep 17 00:00:00 2001 From: Markus Battarbee Date: Fri, 30 Aug 2019 10:50:09 +0300 Subject: [PATCH 546/602] method works, attempting to use threshold for scaling, but version.cpp throws errors --- mini-apps/acceleration-vlasiator/map_test.cpp | 16 ++++++++++------ vlasovsolver/cpu_1d_plm.hpp | 8 +++++--- vlasovsolver/cpu_1d_ppm.hpp | 6 +++--- vlasovsolver/cpu_1d_pqm.hpp | 4 ++-- vlasovsolver/cpu_acc_map.cpp | 6 +++--- vlasovsolver/cpu_face_estimates.hpp | 17 ++++++++++++----- vlasovsolver/cpu_trans_map.cpp | 6 +++--- 7 files changed, 38 insertions(+), 25 deletions(-) diff --git a/mini-apps/acceleration-vlasiator/map_test.cpp b/mini-apps/acceleration-vlasiator/map_test.cpp index b30ff83ff..c71268f44 100644 --- a/mini-apps/acceleration-vlasiator/map_test.cpp +++ b/mini-apps/acceleration-vlasiator/map_test.cpp @@ -5,6 +5,10 @@ #include "vlasovsolver/cpu_1d_ppm.hpp" #include "vlasovsolver/cpu_1d_plm.hpp" +#define cell_pop_threshold 1.e-15 +// In vlasiator, called via spatial_cell->getVelocityBlockMinValue(popID) + + /*print all values in the vector valued values array. In this array there are blocks_per_dim blocks with a width of WID*/ void print_values(int step, Vec *values, uint blocks_per_dim, Real v_min, Real dv){ @@ -56,15 +60,15 @@ void propagate(Vec values[], uint blocks_per_dim, Real v_min, Real dv, #ifdef ACC_SEMILAG_PLM Vec a[2]; - compute_plm_coeff(values, (k_block + 1) * WID + k_cell , a); + compute_plm_coeff(values, (k_block + 1) * WID + k_cell , a, cell_pop_threshold); #endif #ifdef ACC_SEMILAG_PPM Vec a[3]; - compute_ppm_coeff(values, h6, (k_block + 1) * WID + k_cell , a); + compute_ppm_coeff(values, h6, (k_block + 1) * WID + k_cell , a, cell_pop_threshold); #endif #ifdef ACC_SEMILAG_PQM Vec a[5]; - compute_pqm_coeff(values, h8, (k_block + 1) * WID + k_cell , a); + compute_pqm_coeff(values, h8, (k_block + 1) * WID + k_cell , a, cell_pop_threshold); #endif @@ -195,15 +199,15 @@ void print_reconstruction(int step, Vec values[], uint blocks_per_dim, Real v_m for (uint k_cell=0; k_cell + p_face = select(-(p_face - m_face) * (p_face - m_face) * one_sixth > (p_face - m_face) * (values[k] - 0.5 * (m_face + p_face)), 3 * values[k] - 2 * m_face, p_face); diff --git a/vlasovsolver/cpu_1d_pqm.hpp b/vlasovsolver/cpu_1d_pqm.hpp index 54c771e6f..d969fbd37 100644 --- a/vlasovsolver/cpu_1d_pqm.hpp +++ b/vlasovsolver/cpu_1d_pqm.hpp @@ -152,13 +152,13 @@ inline void filter_pqm_monotonicity(Vec *values, uint k, Vec &fv_l, Vec &fv_r, V // White, Laurent, and Alistair Adcroft. “A High-Order Finite Volume Remapping Scheme for Nonuniform Grids: The Piecewise Quartic Method (PQM).” Journal of Computational Physics 227, no. 15 (July 2008): 7394–7422. doi:10.1016/j.jcp.2008.04.026. // */ -inline void compute_pqm_coeff(Vec *values, face_estimate_order order, uint k, Vec a[5]){ +inline void compute_pqm_coeff(Vec *values, face_estimate_order order, uint k, Vec a[5], const Real threshold){ Vec fv_l; /*left face value*/ Vec fv_r; /*right face value*/ Vec fd_l; /*left face derivative*/ Vec fd_r; /*right face derivative*/ - compute_filtered_face_values_derivatives(values, k, order, fv_l, fv_r, fd_l, fd_r); + compute_filtered_face_values_derivatives(values, k, order, fv_l, fv_r, fd_l, fd_r, threshold); filter_pqm_monotonicity(values, k, fv_l, fv_r, fd_l, fd_r); //Fit a second order polynomial for reconstruction see, e.g., White diff --git a/vlasovsolver/cpu_acc_map.cpp b/vlasovsolver/cpu_acc_map.cpp index a27bbc594..8ead6e1b8 100644 --- a/vlasovsolver/cpu_acc_map.cpp +++ b/vlasovsolver/cpu_acc_map.cpp @@ -471,15 +471,15 @@ bool map_1d(SpatialCell* spatial_cell, // k + WID is the index where we have stored k index, WID amount of padding. #ifdef ACC_SEMILAG_PLM Vec a[2]; - compute_plm_coeff(values + valuesColumnOffset + i_pcolumnv(j, 0, -1, n_cblocks), k + WID , a); + compute_plm_coeff(values + valuesColumnOffset + i_pcolumnv(j, 0, -1, n_cblocks), k + WID , a, spatial_cell->getVelocityBlockMinValue(popID)); #endif #ifdef ACC_SEMILAG_PPM Vec a[3]; - compute_ppm_coeff(values + valuesColumnOffset + i_pcolumnv(j, 0, -1, n_cblocks), h4, k + WID, a); + compute_ppm_coeff(values + valuesColumnOffset + i_pcolumnv(j, 0, -1, n_cblocks), h4, k + WID, a, spatial_cell->getVelocityBlockMinValue(popID)); #endif #ifdef ACC_SEMILAG_PQM Vec a[5]; - compute_pqm_coeff(values + valuesColumnOffset + i_pcolumnv(j, 0, -1, n_cblocks), h8, k + WID, a); + compute_pqm_coeff(values + valuesColumnOffset + i_pcolumnv(j, 0, -1, n_cblocks), h8, k + WID, a, spatial_cell->getVelocityBlockMinValue(popID)); #endif // set the initial value for the integrand at the boundary at v = 0 diff --git a/vlasovsolver/cpu_face_estimates.hpp b/vlasovsolver/cpu_face_estimates.hpp index b71447b46..d95e60645 100644 --- a/vlasovsolver/cpu_face_estimates.hpp +++ b/vlasovsolver/cpu_face_estimates.hpp @@ -212,7 +212,8 @@ inline void compute_h3_left_face_derivative(const Vec * const values, uint k, Ve 3) Makes sure face slopes are consistent with PLM slope */ inline void compute_filtered_face_values_derivatives(const Vec * const values,uint k, face_estimate_order order, - Vec &fv_l, Vec &fv_r, Vec &fd_l, Vec &fd_r){ + Vec &fv_l, Vec &fv_r, Vec &fd_l, Vec &fd_r, + const Real threshold){ switch(order){ case h4: @@ -242,8 +243,11 @@ inline void compute_filtered_face_values_derivatives(const Vec * const values,ui } Vec slope_abs,slope_sign; - slope_limiter(values[k -1], values[k], values[k + 1], slope_abs, slope_sign); - + // scale values closer to 1 for more accurate slope limiter calculation + const real scale = 1./threshold + slope_limiter(values[k -1]*scale, values[k]*scale, values[k + 1]*scale, slope_abs, slope_sign); + slope_abs = slope_abs*scale; + //check for extrema, flatten if it is Vecb is_extrema = (slope_abs == Vec(0.0)); if(horizontal_or(is_extrema)) { @@ -278,7 +282,7 @@ inline void compute_filtered_face_values_derivatives(const Vec * const values,ui 2) Makes face values bounded 3) Makes sure face slopes are consistent with PLM slope */ -inline void compute_filtered_face_values(const Vec * const values,uint k, face_estimate_order order, Vec &fv_l, Vec &fv_r){ +inline void compute_filtered_face_values(const Vec * const values,uint k, face_estimate_order order, Vec &fv_l, Vec &fv_r, const Real threshold){ switch(order){ case h4: compute_h4_left_face_value(values, k, fv_l); @@ -298,7 +302,10 @@ inline void compute_filtered_face_values(const Vec * const values,uint k, face_e break; } Vec slope_abs,slope_sign; - slope_limiter(values[k -1], values[k], values[k + 1], slope_abs, slope_sign); + // scale values closer to 1 for more accurate slope limiter calculation + const real scale = 1./threshold + slope_limiter(values[k -1]*scale, values[k]*scale, values[k + 1]*scale, slope_abs, slope_sign); + slope_abs = slope_abs*scale; //check for extrema, flatten if it is Vecb is_extrema = (slope_abs == Vec(0.0)); diff --git a/vlasovsolver/cpu_trans_map.cpp b/vlasovsolver/cpu_trans_map.cpp index 5925e7684..fa99bdbfe 100644 --- a/vlasovsolver/cpu_trans_map.cpp +++ b/vlasovsolver/cpu_trans_map.cpp @@ -517,17 +517,17 @@ bool trans_map_1d(const dccrg::Dccrg& mpi //compute reconstruction #ifdef TRANS_SEMILAG_PLM Vec a[3]; - compute_plm_coeff(values + i_trans_ps_blockv(planeVector, k, -VLASOV_STENCIL_WIDTH), VLASOV_STENCIL_WIDTH, a); + compute_plm_coeff(values + i_trans_ps_blockv(planeVector, k, -VLASOV_STENCIL_WIDTH), VLASOV_STENCIL_WIDTH, a, spatial_cell->getVelocityBlockMinValue(popID)); #endif #ifdef TRANS_SEMILAG_PPM Vec a[3]; //Check that stencil width VLASOV_STENCIL_WIDTH in grid.h corresponds to order of face estimates (h4 & h5 =2, H6=3, h8=4) - compute_ppm_coeff(values + i_trans_ps_blockv(planeVector, k, -VLASOV_STENCIL_WIDTH), h4, VLASOV_STENCIL_WIDTH, a); + compute_ppm_coeff(values + i_trans_ps_blockv(planeVector, k, -VLASOV_STENCIL_WIDTH), h4, VLASOV_STENCIL_WIDTH, a, spatial_cell->getVelocityBlockMinValue(popID)); #endif #ifdef TRANS_SEMILAG_PQM Vec a[5]; //Check that stencil width VLASOV_STENCIL_WIDTH in grid.h corresponds to order of face estimates (h4 & h5 =2, H6=3, h8=4) - compute_pqm_coeff(values + i_trans_ps_blockv(planeVector, k, -VLASOV_STENCIL_WIDTH), h6, VLASOV_STENCIL_WIDTH, a); + compute_pqm_coeff(values + i_trans_ps_blockv(planeVector, k, -VLASOV_STENCIL_WIDTH), h6, VLASOV_STENCIL_WIDTH, a, spatial_cell->getVelocityBlockMinValue(popID)); #endif #ifdef TRANS_SEMILAG_PLM From 2eccc74dcfd6e08186e675d69740d6877049bdc6 Mon Sep 17 00:00:00 2001 From: Markus Battarbee Date: Fri, 30 Aug 2019 12:55:01 +0300 Subject: [PATCH 547/602] Fixed variable types and PLM scaling --- vlasovsolver/cpu_1d_plm.hpp | 6 +++--- vlasovsolver/cpu_face_estimates.hpp | 8 ++++---- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/vlasovsolver/cpu_1d_plm.hpp b/vlasovsolver/cpu_1d_plm.hpp index 9b6183611..9303fa40b 100644 --- a/vlasovsolver/cpu_1d_plm.hpp +++ b/vlasovsolver/cpu_1d_plm.hpp @@ -40,9 +40,9 @@ The factor 2.0 is in the polynom to ease integration, then integral is a[0]*t + inline void compute_plm_coeff(const Vec * const values, uint k, Vec a[2], const Real threshold){ // scale values closer to 1 for more accurate slope limiter calculation - const real scale = 1./threshold - const Vec d_cv=slope_limiter(values[k - 1]*scale, values[k]*scale, values[k + 1]*scale); - a[0] = values[k]*scale - d_cv * 0.5; + const Realv scale = 1./threshold; + const Vec d_cv=slope_limiter(values[k - 1]*scale, values[k]*scale, values[k + 1]*scale)*threshold; + a[0] = values[k] - d_cv * 0.5; a[1] = d_cv * 0.5; } diff --git a/vlasovsolver/cpu_face_estimates.hpp b/vlasovsolver/cpu_face_estimates.hpp index d95e60645..2bcf6fb0b 100644 --- a/vlasovsolver/cpu_face_estimates.hpp +++ b/vlasovsolver/cpu_face_estimates.hpp @@ -244,9 +244,9 @@ inline void compute_filtered_face_values_derivatives(const Vec * const values,ui Vec slope_abs,slope_sign; // scale values closer to 1 for more accurate slope limiter calculation - const real scale = 1./threshold + const Realv scale = 1./threshold; slope_limiter(values[k -1]*scale, values[k]*scale, values[k + 1]*scale, slope_abs, slope_sign); - slope_abs = slope_abs*scale; + slope_abs = slope_abs*threshold; //check for extrema, flatten if it is Vecb is_extrema = (slope_abs == Vec(0.0)); @@ -303,9 +303,9 @@ inline void compute_filtered_face_values(const Vec * const values,uint k, face_e } Vec slope_abs,slope_sign; // scale values closer to 1 for more accurate slope limiter calculation - const real scale = 1./threshold + const Realv scale = 1./threshold; slope_limiter(values[k -1]*scale, values[k]*scale, values[k + 1]*scale, slope_abs, slope_sign); - slope_abs = slope_abs*scale; + slope_abs = slope_abs*threshold; //check for extrema, flatten if it is Vecb is_extrema = (slope_abs == Vec(0.0)); From 20c54d8185f3b04feb9773777d6f065c8f18acf6 Mon Sep 17 00:00:00 2001 From: Markus Battarbee Date: Fri, 30 Aug 2019 13:48:37 +0300 Subject: [PATCH 548/602] Making required changes to AMR portions of code as well --- vlasovsolver/cpu_1d_plm.hpp | 2 +- vlasovsolver/cpu_1d_ppm.hpp | 2 +- vlasovsolver/cpu_1d_ppm_nonuniform.hpp | 4 ++-- .../cpu_1d_ppm_nonuniform_conserving.hpp | 4 ++-- vlasovsolver/cpu_1d_pqm.hpp | 2 +- vlasovsolver/cpu_face_estimates.hpp | 18 ++++++++++++------ vlasovsolver/cpu_trans_map_amr.cpp | 7 ++++--- vlasovsolver/cpu_trans_map_amr.hpp | 2 +- 8 files changed, 24 insertions(+), 17 deletions(-) diff --git a/vlasovsolver/cpu_1d_plm.hpp b/vlasovsolver/cpu_1d_plm.hpp index 9303fa40b..fe12ec89c 100644 --- a/vlasovsolver/cpu_1d_plm.hpp +++ b/vlasovsolver/cpu_1d_plm.hpp @@ -38,7 +38,7 @@ t=(v-v_{i-0.5})/dv where v_{i-0.5} is the left face of a cell The factor 2.0 is in the polynom to ease integration, then integral is a[0]*t + a[1]*t**2 */ -inline void compute_plm_coeff(const Vec * const values, uint k, Vec a[2], const Real threshold){ +inline void compute_plm_coeff(const Vec * const values, uint k, Vec a[2], const Realv threshold){ // scale values closer to 1 for more accurate slope limiter calculation const Realv scale = 1./threshold; const Vec d_cv=slope_limiter(values[k - 1]*scale, values[k]*scale, values[k + 1]*scale)*threshold; diff --git a/vlasovsolver/cpu_1d_ppm.hpp b/vlasovsolver/cpu_1d_ppm.hpp index 5bceecca1..2e32373d6 100644 --- a/vlasovsolver/cpu_1d_ppm.hpp +++ b/vlasovsolver/cpu_1d_ppm.hpp @@ -35,7 +35,7 @@ using namespace std; /* Compute parabolic reconstruction with an explicit scheme */ -inline void compute_ppm_coeff(const Vec * const values, face_estimate_order order, uint k, Vec a[3], const Real threshold){ +inline void compute_ppm_coeff(const Vec * const values, face_estimate_order order, uint k, Vec a[3], const Realv threshold){ Vec fv_l; /*left face value*/ Vec fv_r; /*right face value*/ compute_filtered_face_values(values, k, order, fv_l, fv_r, threshold); diff --git a/vlasovsolver/cpu_1d_ppm_nonuniform.hpp b/vlasovsolver/cpu_1d_ppm_nonuniform.hpp index 8e4808399..93eac5014 100644 --- a/vlasovsolver/cpu_1d_ppm_nonuniform.hpp +++ b/vlasovsolver/cpu_1d_ppm_nonuniform.hpp @@ -35,10 +35,10 @@ using namespace std; /* Compute parabolic reconstruction with an explicit scheme */ -inline void compute_ppm_coeff_nonuniform(const Vec * const dv, const Vec * const values, face_estimate_order order, uint k, Vec a[3]){ +inline void compute_ppm_coeff_nonuniform(const Vec * const dv, const Vec * const values, face_estimate_order order, uint k, Vec a[3], const Realv threshold){ Vec fv_l; /*left face value*/ Vec fv_r; /*right face value*/ - compute_filtered_face_values_nonuniform(dv, values, k, order, fv_l, fv_r); + compute_filtered_face_values_nonuniform(dv, values, k, order, fv_l, fv_r, threshold); //Coella et al, check for monotonicity Vec m_face = fv_l; diff --git a/vlasovsolver/cpu_1d_ppm_nonuniform_conserving.hpp b/vlasovsolver/cpu_1d_ppm_nonuniform_conserving.hpp index 3e30e6d32..19d7d46ad 100644 --- a/vlasovsolver/cpu_1d_ppm_nonuniform_conserving.hpp +++ b/vlasovsolver/cpu_1d_ppm_nonuniform_conserving.hpp @@ -35,10 +35,10 @@ using namespace std; /* Compute parabolic reconstruction with an explicit scheme */ -inline void compute_ppm_coeff_nonuniform(const Vec * const dv, const Vec * const values, face_estimate_order order, uint k, Vec a[3]){ +inline void compute_ppm_coeff_nonuniform(const Vec * const dv, const Vec * const values, face_estimate_order order, uint k, Vec a[3], const Realv threshold){ Vec fv_l; /*left face value*/ Vec fv_r; /*right face value*/ - compute_filtered_face_values_nonuniform_conserving(dv, values, k, order, fv_l, fv_r); + compute_filtered_face_values_nonuniform_conserving(dv, values, k, order, fv_l, fv_r, threshold); //Coella et al, check for monotonicity Vec m_face = fv_l; diff --git a/vlasovsolver/cpu_1d_pqm.hpp b/vlasovsolver/cpu_1d_pqm.hpp index d969fbd37..d134959c1 100644 --- a/vlasovsolver/cpu_1d_pqm.hpp +++ b/vlasovsolver/cpu_1d_pqm.hpp @@ -152,7 +152,7 @@ inline void filter_pqm_monotonicity(Vec *values, uint k, Vec &fv_l, Vec &fv_r, V // White, Laurent, and Alistair Adcroft. “A High-Order Finite Volume Remapping Scheme for Nonuniform Grids: The Piecewise Quartic Method (PQM).” Journal of Computational Physics 227, no. 15 (July 2008): 7394–7422. doi:10.1016/j.jcp.2008.04.026. // */ -inline void compute_pqm_coeff(Vec *values, face_estimate_order order, uint k, Vec a[5], const Real threshold){ +inline void compute_pqm_coeff(Vec *values, face_estimate_order order, uint k, Vec a[5], const Realv threshold){ Vec fv_l; /*left face value*/ Vec fv_r; /*right face value*/ Vec fd_l; /*left face derivative*/ diff --git a/vlasovsolver/cpu_face_estimates.hpp b/vlasovsolver/cpu_face_estimates.hpp index 2bcf6fb0b..1acf96de6 100644 --- a/vlasovsolver/cpu_face_estimates.hpp +++ b/vlasovsolver/cpu_face_estimates.hpp @@ -213,7 +213,7 @@ inline void compute_h3_left_face_derivative(const Vec * const values, uint k, Ve */ inline void compute_filtered_face_values_derivatives(const Vec * const values,uint k, face_estimate_order order, Vec &fv_l, Vec &fv_r, Vec &fd_l, Vec &fd_r, - const Real threshold){ + const Realv threshold){ switch(order){ case h4: @@ -282,7 +282,7 @@ inline void compute_filtered_face_values_derivatives(const Vec * const values,ui 2) Makes face values bounded 3) Makes sure face slopes are consistent with PLM slope */ -inline void compute_filtered_face_values(const Vec * const values,uint k, face_estimate_order order, Vec &fv_l, Vec &fv_r, const Real threshold){ +inline void compute_filtered_face_values(const Vec * const values,uint k, face_estimate_order order, Vec &fv_l, Vec &fv_r, const Realv threshold){ switch(order){ case h4: compute_h4_left_face_value(values, k, fv_l); @@ -330,7 +330,7 @@ inline void compute_filtered_face_values(const Vec * const values,uint k, face_e } -inline void compute_filtered_face_values_nonuniform(const Vec * const dv, const Vec * const values,uint k, face_estimate_order order, Vec &fv_l, Vec &fv_r){ +inline void compute_filtered_face_values_nonuniform(const Vec * const dv, const Vec * const values,uint k, face_estimate_order order, Vec &fv_l, Vec &fv_r, const Realv threshold){ switch(order){ case h4: compute_h4_left_face_value_nonuniform(dv, values, k, fv_l); @@ -352,7 +352,10 @@ inline void compute_filtered_face_values_nonuniform(const Vec * const dv, const break; } Vec slope_abs,slope_sign; - slope_limiter(values[k -1], values[k], values[k + 1], slope_abs, slope_sign); + // scale values closer to 1 for more accurate slope limiter calculation + const Realv scale = 1./threshold; + slope_limiter(values[k -1]*scale, values[k]*scale, values[k + 1]*scale, slope_abs, slope_sign); + slope_abs = slope_abs*threshold; //check for extrema, flatten if it is Vecb is_extrema = (slope_abs == Vec(0.0)); @@ -442,7 +445,7 @@ inline pair constrain_face_values(const Vec * h,const Vec * values,uint return faceInterpolants; } -inline void compute_filtered_face_values_nonuniform_conserving(const Vec * const dv, const Vec * const values,uint k, face_estimate_order order, Vec &fv_l, Vec &fv_r){ +inline void compute_filtered_face_values_nonuniform_conserving(const Vec * const dv, const Vec * const values,uint k, face_estimate_order order, Vec &fv_l, Vec &fv_r, const Realv threshold){ switch(order){ case h4: compute_h4_left_face_value_nonuniform(dv, values, k, fv_l); @@ -465,7 +468,10 @@ inline void compute_filtered_face_values_nonuniform_conserving(const Vec * const } Vec slope_abs,slope_sign; - slope_limiter(values[k -1], values[k], values[k + 1], slope_abs, slope_sign); + // scale values closer to 1 for more accurate slope limiter calculation + const Realv scale = 1./threshold; + slope_limiter(values[k -1]*scale, values[k]*scale, values[k + 1]*scale, slope_abs, slope_sign); + slope_abs = slope_abs*threshold; //check for extrema Vecb is_extrema = (slope_abs == Vec(0.0)); diff --git a/vlasovsolver/cpu_trans_map_amr.cpp b/vlasovsolver/cpu_trans_map_amr.cpp index 00a2a706f..750830e6d 100644 --- a/vlasovsolver/cpu_trans_map_amr.cpp +++ b/vlasovsolver/cpu_trans_map_amr.cpp @@ -510,7 +510,8 @@ setOfPencils buildPencilsWithNeighbors( const dccrg::Dccrg &vmesh, const uint lengthOfPencil) { + const vmesh::VelocityMesh &vmesh, + const uint lengthOfPencil, const Realv threshold) { // Get velocity data from vmesh that we need later to calculate the translation velocity_block_indices_t block_indices; @@ -572,7 +573,7 @@ void propagatePencil(Vec* dz, Vec* values, const uint dimension, // i + VLASOV_STENCIL_WIDTH will point to the right cell. Complicated! Why! Sad! MVGA! compute_ppm_coeff_nonuniform(dz, values + i_trans_ps_blockv_pencil(planeVector, k, i-VLASOV_STENCIL_WIDTH, lengthOfPencil), - h4, VLASOV_STENCIL_WIDTH, a); + h4, VLASOV_STENCIL_WIDTH, a, threshold); // Compute integral const Vec ngbr_target_density = @@ -1192,7 +1193,7 @@ bool trans_map_1d_amr(const dccrg::Dccrg& // Dz and sourceVecData are both padded by VLASOV_STENCIL_WIDTH // Dz has 1 value/cell, sourceVecData has WID3 values/cell - propagatePencil(dz.data(), sourceVecData.data(), dimension, blockGID, dt, vmesh, L); + propagatePencil(dz.data(), sourceVecData.data(), dimension, blockGID, dt, vmesh, L, sourceCells[0]->getVelocityBlockMinValue(popID)); // sourceVecData => targetBlockData[this pencil]) diff --git a/vlasovsolver/cpu_trans_map_amr.hpp b/vlasovsolver/cpu_trans_map_amr.hpp index d7738a663..ae9a60e0f 100644 --- a/vlasovsolver/cpu_trans_map_amr.hpp +++ b/vlasovsolver/cpu_trans_map_amr.hpp @@ -181,7 +181,7 @@ CellID selectNeighbor(const dccrg::Dccrg void propagatePencil(Vec* dz, Vec* values, const uint dimension, const uint blockGID, const Realv dt, const vmesh::VelocityMesh &vmesh, - const uint lengthOfPencil); + const uint lengthOfPencil, const Realv threshold); From 40663e34909004d860e0138e0059461d10ddef25 Mon Sep 17 00:00:00 2001 From: Markus Battarbee Date: Fri, 30 Aug 2019 13:59:03 +0300 Subject: [PATCH 549/602] Also updated amr_reconstruction mini-app --- mini-apps/amr_reconstruction/map_test.cpp | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/mini-apps/amr_reconstruction/map_test.cpp b/mini-apps/amr_reconstruction/map_test.cpp index b030785c9..9e1996c89 100644 --- a/mini-apps/amr_reconstruction/map_test.cpp +++ b/mini-apps/amr_reconstruction/map_test.cpp @@ -10,6 +10,9 @@ #include #include +#define cell_pop_threshold 1.e-15 +// In vlasiator, called via spatial_cell->getVelocityBlockMinValue(popID) + /*print all values in the vector valued values array. In this array there are blocks_per_dim blocks with a width of WID*/ void print_values(int step, Vec *values, uint blocks_per_dim, Real v_min, Real dv){ @@ -73,7 +76,7 @@ void propagate(Vec dr[], Vec values[], Real z_translation, uint blocks_per_dim ) // Compute polynomial coefficients Vec a[3]; //compute_ppm_coeff_nonuniform(dr, values, h4, gid + target_scell_index, a); - compute_ppm_coeff_nonuniform(dr, values, h4, gid, a); + compute_ppm_coeff_nonuniform(dr, values, h4, gid, a, cell_pop_threshold); // Compute integral const Vec ngbr_target_density = @@ -116,7 +119,7 @@ void print_reconstruction(int step, Vec dr[], Vec values[], uint blocks_per_dim #ifdef ACC_SEMILAG_PPM Vec a[3]; //compute_ppm_coeff( values, h4, (k_block + 1) * WID + k_cell, a); - compute_ppm_coeff_nonuniform(dr, values, h4, (k_block + 1) * WID + k_cell, a); + compute_ppm_coeff_nonuniform(dr, values, h4, (k_block + 1) * WID + k_cell, a, cell_pop_threshold); #endif int iend = k_block * WID + k_cell; From fc25b8243af30a2e02804a582330c52686961fbb Mon Sep 17 00:00:00 2001 From: Markus Battarbee Date: Tue, 3 Sep 2019 14:30:14 +0300 Subject: [PATCH 550/602] Required changes, variable names to diff updated, proton_thermal cfg group rename --- testpackage/small_test_definitions.sh | 34 +++++++++---------- .../Magnetosphere_polar_small.cfg | 2 +- 2 files changed, 18 insertions(+), 18 deletions(-) diff --git a/testpackage/small_test_definitions.sh b/testpackage/small_test_definitions.sh index 8e5fb1882..e552aaa2c 100644 --- a/testpackage/small_test_definitions.sh +++ b/testpackage/small_test_definitions.sh @@ -23,7 +23,7 @@ test_name[1]="acctest_2_maxw_500k_100k_20kms_10deg" comparison_vlsv[1]="fullf.0000001.vlsv" #only one process does anything -> in _1 phiprof here comparison_phiprof[1]="phiprof_1.txt" -variable_names[1]="proton/rho proton/V proton/V proton/V protons" +variable_names[1]="proton/vg_rho proton/vg_v proton/vg_v proton/vg_v protons" variable_components[1]="0 0 1 2" single_cell[1]=1 @@ -32,7 +32,7 @@ test_name[2]="acctest_3_substeps" comparison_vlsv[2]="fullf.0000001.vlsv" #only one process does anything -> in _1 phiprof here comparison_phiprof[2]="phiprof_1.txt" -variable_names[2]="proton/rho proton/V proton/V proton/V protons" +variable_names[2]="proton/vg_rho proton/vg_v proton/vg_v proton/vg_v protons" variable_components[2]="0 0 1 2" single_cell[2]=1 @@ -40,13 +40,13 @@ single_cell[2]=1 test_name[3]="transtest_2_maxw_500k_100k_20kms_20x20" comparison_vlsv[3]="fullf.0000001.vlsv" comparison_phiprof[3]="phiprof_0.txt" -variable_names[3]="proton/rho proton/V proton/V proton/V protons" +variable_names[3]="proton/vg_rho proton/vg_v proton/vg_v proton/vg_v protons" variable_components[3]="0 0 1 2" test_name[4]="acctest_4_helium" comparison_vlsv[4]="fullf.0000001.vlsv" comparison_phiprof[4]="phiprof_1.txt" -variable_names[4]="helium/rho helium/V helium/V helium/V" +variable_names[4]="helium/vg_rho helium/vg_v helium/vg_v helium/vg_v" variable_components[4]="0 0 1 2" single_cell[4]=1 @@ -55,7 +55,7 @@ test_name[5]="acctest_5_proton_antiproton" comparison_vlsv[5]="fullf.0000001.vlsv" #only one process does anything -> in _1 phiprof here comparison_phiprof[5]="phiprof_1.txt" -variable_names[5]="proton/rho proton/V proton/V proton/V protons" +variable_names[5]="proton/vg_rho proton/vg_v proton/vg_v proton/vg_v protons" variable_components[5]="0 0 1 2" single_cell[5]=1 @@ -63,66 +63,66 @@ single_cell[5]=1 test_name[6]="restart_write" comparison_vlsv[6]="bulk.0000001.vlsv" comparison_phiprof[6]="phiprof_0.txt" -variable_names[6]="proton/rho proton/V proton/V proton/V B B B E E E" +variable_names[6]="proton/vg_rho proton/vg_v proton/vg_v proton/vg_v fg_b fg_b fg_b fg_e fg_e fg_e" variable_components[6]="0 0 1 2 0 1 2 0 1 2" test_name[7]="restart_read" comparison_vlsv[7]="initial-grid.0000000.vlsv" comparison_phiprof[7]="phiprof_0.txt" -variable_names[7]="proton/rho proton/V proton/V proton/V B B B E E E" +variable_names[7]="proton/vg_rho proton/vg_v proton/vg_v proton/vg_v fg_b fg_b fg_b fg_e fg_e fg_e" variable_components[7]="0 0 1 2 0 1 2 0 1 2" #Very small ecliptic magnetosphere, no subcycling in ACC or FS test_name[8]="Magnetosphere_small" comparison_vlsv[8]="bulk.0000001.vlsv" comparison_phiprof[8]="phiprof_0.txt" -variable_names[8]="proton/rho proton/V proton/V proton/V B B B E E E protons" +variable_names[8]="proton/vg_rho proton/vg_v proton/vg_v proton/vg_v fg_b fg_b fg_b fg_e fg_e fg_e protons" variable_components[8]="0 0 1 2 0 1 2 0 1 2" #Very small polar magnetosphere, with subcycling in ACC or FS test_name[9]="Magnetosphere_polar_small" comparison_vlsv[9]="bulk.0000001.vlsv" comparison_phiprof[9]="phiprof_0.txt" -variable_names[9]="proton/rho proton/V proton/V proton/V B B B E E E protons proton/VNonBackstream proton/PTensorNonBackstreamDiagonal" +variable_names[9]="proton/vg_rho proton/vg_v proton/vg_v proton/vg_v fg_b fg_b fg_b fg_e fg_e fg_e protons proton/vg_v_nonthermal proton/vg_ptensor_nonthermal_diagonal" variable_components[9]="0 0 1 2 0 1 2 0 1 2" # Field solver test test_name[10]="test_fp_fsolver_only_3D" comparison_vlsv[10]="fullf.0000001.vlsv" comparison_phiprof[10]="phiprof_0.txt" -variable_names[10]="B B B E E E" +variable_names[10]="fg_b fg_b fg_b fg_e fg_e fg_e" variable_components[10]="0 1 2 0 1 2" # Field solver test w/ subcycles test_name[11]="test_fp_substeps" comparison_vlsv[11]="fullf.0000001.vlsv" comparison_phiprof[11]="phiprof_0.txt" -variable_names[11]="B B B E E E" +variable_names[11]="fg_b fg_b fg_b fg_e fg_e fg_e" variable_components[11]="0 1 2 0 1 2" # Flowthrough tests test_name[12]="Flowthrough_trans_periodic" comparison_vlsv[12]="bulk.0000001.vlsv" comparison_phiprof[12]="phiprof_0.txt" -variable_names[12]="proton/rho proton/V proton/V proton/V B B B E E E" +variable_names[12]="proton/vg_rho proton/vg_v proton/vg_v proton/vg_v fg_b fg_b fg_b fg_e fg_e fg_e" variable_components[12]="0 0 1 2 0 1 2 0 1 2" test_name[13]="Flowthrough_x_inflow_y_outflow" comparison_vlsv[13]="bulk.0000001.vlsv" comparison_phiprof[13]="phiprof_0.txt" -variable_names[13]="proton/rho proton/V proton/V proton/V B B B E E E" +variable_names[13]="proton/vg_rho proton/vg_v proton/vg_v proton/vg_v fg_b fg_b fg_b fg_e fg_e fg_e" variable_components[13]="0 0 1 2 0 1 2 0 1 2" test_name[14]="Flowthrough_x_inflow_y_outflow_acc" comparison_vlsv[14]="bulk.0000001.vlsv" comparison_phiprof[14]="phiprof_0.txt" -variable_names[14]="proton/rho proton/V proton/V proton/V B B B E E E" +variable_names[14]="proton/vg_rho proton/vg_v proton/vg_v proton/vg_v fg_b fg_b fg_b fg_e fg_e fg_e" variable_components[14]="0 0 1 2 0 1 2 0 1 2" # Self-consistent wave generation test test_name[15]="Selfgen_Waves_Periodic" comparison_vlsv[15]="fullf.0000001.vlsv" comparison_phiprof[15]="phiprof_0.txt" -variable_names[15]="proton/rho proton/V proton/V proton/V B B B E E E protons" +variable_names[15]="proton/vg_rho proton/vg_v proton/vg_v proton/vg_v fg_b fg_b fg_b fg_e fg_e fg_e protons" variable_components[15]="0 0 1 2 0 1 2 0 1 2" ##AMR tests @@ -130,12 +130,12 @@ variable_components[15]="0 0 1 2 0 1 2 0 1 2" test_name[16]="transtest_amr" comparison_vlsv[3]="fullf.0000001.vlsv" comparison_phiprof[3]="phiprof_0.txt" -variable_names[3]="proton/rho proton/V proton/V proton/V protons" +variable_names[3]="proton/vg_rho proton/vg_v proton/vg_v proton/vg_v protons" variable_components[3]="0 0 1 2" # Flowthrough test test_name[17]="Flowthrough_amr" comparison_vlsv[12]="bulk.0000001.vlsv" comparison_phiprof[12]="phiprof_0.txt" -variable_names[12]="proton/rho proton/V proton/V proton/V B B B E E E" +variable_names[12]="proton/vg_rho proton/vg_v proton/vg_v proton/vg_v fg_b fg_b fg_b fg_e fg_e fg_e" variable_components[12]="0 0 1 2 0 1 2 0 1 2" diff --git a/testpackage/tests/Magnetosphere_polar_small/Magnetosphere_polar_small.cfg b/testpackage/tests/Magnetosphere_polar_small/Magnetosphere_polar_small.cfg index 0a761b64f..6d0a5f41a 100644 --- a/testpackage/tests/Magnetosphere_polar_small/Magnetosphere_polar_small.cfg +++ b/testpackage/tests/Magnetosphere_polar_small/Magnetosphere_polar_small.cfg @@ -125,7 +125,7 @@ noDipoleInSW = 1.0 dipoleType = 2 dipoleMirrorLocationX = 625.0e6 -[proton_backstream] +[proton_thermal] # Pretty much bogus values, just so that the reducer has something # to play with. (This cuts the solar wind roughly in half) radius = 5e5 From e070acf8c9aa3d7e591c0f4af87236ba2b801ca5 Mon Sep 17 00:00:00 2001 From: Markus Battarbee Date: Tue, 3 Sep 2019 16:11:20 +0300 Subject: [PATCH 551/602] Added some more duplicate allowed datareducer calls. Updated testpackage config files to match new reducer names. Added python script(s) which can be used for updating config files to new naming convention. --- datareduction/datareducer.cpp | 22 +-- .../tests/Flowthrough_amr/Flowthrough_amr.cfg | 14 +- .../Flowthrough_trans_periodic.cfg | 22 +-- .../Flowthrough_x_inflow_y_outflow.cfg | 20 +-- .../Flowthrough_x_inflow_y_outflow_acc.cfg | 20 +-- .../Magnetosphere_polar_small.cfg | 28 ++-- .../Magnetosphere_small.cfg | 22 +-- .../Selfgen_Waves_Periodic.cfg | 18 +- .../acctest_1_maxw_500k_30kms_1deg.cfg | 86 +++++----- .../acctest_2_maxw_500k_100k_20kms_10deg.cfg | 18 +- .../acctest_3_substeps/acctest_3_substeps.cfg | 18 +- .../acctest_4_helium/acctest_4_helium.cfg | 18 +- .../acctest_5_proton_antiproton.cfg | 18 +- .../tests/restart_read/restart_read.cfg | 20 +-- .../tests/restart_write/restart_write.cfg | 20 +-- .../test_fp_fsolver_only_3D.cfg | 17 +- .../test_fp_substeps/test_fp_substeps.cfg | 17 +- ...transtest_2_maxw_500k_100k_20kms_20x20.cfg | 16 +- .../tests/transtest_amr/transtest_amr.cfg | 16 +- testpackage/tests/update_variables.py | 156 ++++++++++++++++++ tools/update_vlasiator_cfg_variables.py | 142 ++++++++++++++++ 21 files changed, 511 insertions(+), 217 deletions(-) create mode 100644 testpackage/tests/update_variables.py create mode 100644 tools/update_vlasiator_cfg_variables.py diff --git a/datareduction/datareducer.cpp b/datareduction/datareducer.cpp index 27e87f8db..a315a0520 100644 --- a/datareduction/datareducer.cpp +++ b/datareduction/datareducer.cpp @@ -375,7 +375,7 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti } continue; } - if(lowercase == "maxrdt") { + if(lowercase == "maxrdt" || lowercase == "vg_maxdt_translation") { // Overall maximum timestep constraint as calculated by the real space vlasov update outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("vg_maxdt_translation",CellParams::MAXRDT,1)); outputReducer->addMetadata(outputReducer->size()-1,"s","$\\mathrm{s}$","$\\Delta t_\\mathrm{R,max}$","1.0"); @@ -714,7 +714,7 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti } continue; } - if(lowercase == "bvolderivs" || lowercase == "b_vol_derivs" || lowercase == "b_vol_derivatives") { + if(lowercase == "bvolderivs" || lowercase == "b_vol_derivs" || lowercase == "b_vol_derivatives" || lowercase == "vg_b_vol_derivatives" || lowercase == "derivs" ) { // Volume-averaged derivatives outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("vg_dperbxvoldy",bvolderivatives::dPERBXVOLdy,1)); outputReducer->addOperator(new DRO::DataReductionOperatorBVOLDerivatives("vg_dperbxvoldz",bvolderivatives::dPERBXVOLdz,1)); @@ -906,19 +906,19 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti // Sidestep mixed case errors const std::string lowercase = boost::algorithm::to_lower_copy(*it); - if(lowercase == "populations_blocks") { + if(lowercase == "populations_blocks" || lowercase == "populations_vg_blocks") { // Per-population total block counts for(unsigned int i =0; i < getObjectWrapper().particleSpecies.size(); i++) { diagnosticReducer->addOperator(new DRO::Blocks(i)); } continue; } - if(lowercase == "rhom") { + if(lowercase == "vg_rhom" || lowercase == "rhom") { // Overall mass density diagnosticReducer->addOperator(new DRO::DataReductionOperatorCellParams("rhom",CellParams::RHOM,1)); continue; } - if(lowercase == "populations_rholossadjust" || lowercase == "populations_rho_loss_adjust") { + if(lowercase == "populations_rholossadjust" || lowercase == "populations_rho_loss_adjust" || lowercase == "populations_vg_rho_loss_adjust") { // Per-particle overall lost particle number for(unsigned int i =0; i < getObjectWrapper().particleSpecies.size(); i++) { species::Species& species=getObjectWrapper().particleSpecies[i]; @@ -931,19 +931,19 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti // diagnosticReducer->addOperator(new DRO::DataReductionOperatorCellParams("rho_loss_velocity_boundary",CellParams::RHOLOSSVELBOUNDARY,1)); // continue; //} - if(lowercase == "lbweight" || lowercase == "loadbalance_weight") { + if(lowercase == "lbweight" || lowercase == "vg_lbweight" || lowercase == "vg_loadbalanceweight" || lowercase == "vg_loadbalance_weight" || lowercase == "loadbalance_weight") { diagnosticReducer->addOperator(new DRO::DataReductionOperatorCellParams("loadbalance_weight",CellParams::LBWEIGHTCOUNTER,1)); continue; } - if(lowercase == "maxvdt" || lowercase == "maxdt_acceleration") { + if(lowercase == "maxvdt" || lowercase == "maxdt_acceleration" || lowercase == "vg_maxdt_acceleration") { diagnosticReducer->addOperator(new DRO::DataReductionOperatorCellParams("maxdt_acceleration",CellParams::MAXVDT,1)); continue; } - if(lowercase == "maxrdt" || lowercase == "maxdt_translation") { + if(lowercase == "maxrdt" || lowercase == "maxdt_translation" || lowercase == "vg_maxdt_translation") { diagnosticReducer->addOperator(new DRO::DataReductionOperatorCellParams("maxdt_translation",CellParams::MAXRDT,1)); continue; } - if(lowercase == "maxfieldsdt" || lowercase == "maxdt_fieldsolver") { + if(lowercase == "maxfieldsdt" || lowercase == "maxdt_fieldsolver" || lowercase == "fg_maxfieldsdt" || lowercase == "fg_maxdt_fieldsolver") { diagnosticReducer->addOperator(new DRO::DataReductionOperatorCellParams("maxdt_fieldsolver",CellParams::MAXFDT,1)); continue; } @@ -959,7 +959,7 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti } continue; } - if(lowercase == "populations_maxrdt" || lowercase == "populations_maxdt_translation") { + if(lowercase == "populations_maxrdt" || lowercase == "populations_maxdt_translation" || lowercase == "populations_vg_maxdt_translation") { for(unsigned int i =0; i < getObjectWrapper().particleSpecies.size(); i++) { species::Species& species=getObjectWrapper().particleSpecies[i]; const std::string& pop = species.name; @@ -967,7 +967,7 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti } continue; } - if(lowercase == "populations_maxvdt" || lowercase == "populations_maxdt_acceleration") { + if(lowercase == "populations_maxvdt" || lowercase == "populations_maxdt_acceleration" || lowercase == "populations_vg_maxdt_acceleration") { for(unsigned int i =0; i < getObjectWrapper().particleSpecies.size(); i++) { species::Species& species=getObjectWrapper().particleSpecies[i]; const std::string& pop = species.name; diff --git a/testpackage/tests/Flowthrough_amr/Flowthrough_amr.cfg b/testpackage/tests/Flowthrough_amr/Flowthrough_amr.cfg index ede3b7913..9d07e7394 100644 --- a/testpackage/tests/Flowthrough_amr/Flowthrough_amr.cfg +++ b/testpackage/tests/Flowthrough_amr/Flowthrough_amr.cfg @@ -52,13 +52,13 @@ system_write_distribution_yline_stride = 0 system_write_distribution_zline_stride = 0 [variables] -output = populations_Rho -output = E -output = B -output = BoundaryType -output = MPIrank -output = populations_Blocks -diagnostic = populations_Blocks +output = populations_vg_rho +output = fg_e +output = fg_b +output = vg_boundarytype +output = vg_rank +output = populations_vg_blocks +diagnostic = populations_vg_blocks [boundaries] periodic_x = no diff --git a/testpackage/tests/Flowthrough_trans_periodic/Flowthrough_trans_periodic.cfg b/testpackage/tests/Flowthrough_trans_periodic/Flowthrough_trans_periodic.cfg index 176a7336b..186f935e1 100644 --- a/testpackage/tests/Flowthrough_trans_periodic/Flowthrough_trans_periodic.cfg +++ b/testpackage/tests/Flowthrough_trans_periodic/Flowthrough_trans_periodic.cfg @@ -17,17 +17,17 @@ system_write_distribution_yline_stride = 0 system_write_distribution_zline_stride = 0 [variables] -output = Rhom -output = E -output = B -output = Pressure -output = populations_Rho -output = populations_V -output = BoundaryType -output = MPIrank -output = populations_Blocks -output = populations_Blocks -diagnostic = populations_Blocks +output = vg_rhom +output = fg_e +output = fg_b +output = vg_pressure +output = populations_vg_rho +output = populations_vg_v +output = vg_boundarytype +output = vg_rank +output = populations_vg_blocks +output = populations_vg_blocks +diagnostic = populations_vg_blocks [gridbuilder] x_length = 20 diff --git a/testpackage/tests/Flowthrough_x_inflow_y_outflow/Flowthrough_x_inflow_y_outflow.cfg b/testpackage/tests/Flowthrough_x_inflow_y_outflow/Flowthrough_x_inflow_y_outflow.cfg index db9d7e5b0..7cb5d7ded 100644 --- a/testpackage/tests/Flowthrough_x_inflow_y_outflow/Flowthrough_x_inflow_y_outflow.cfg +++ b/testpackage/tests/Flowthrough_x_inflow_y_outflow/Flowthrough_x_inflow_y_outflow.cfg @@ -17,16 +17,16 @@ system_write_distribution_yline_stride = 0 system_write_distribution_zline_stride = 0 [variables] -output = Rhom -output = E -output = B -output = Pressure -output = populations_V -output = BoundaryType -output = MPIrank -output = populations_Blocks -output = populations_Rho -diagnostic = populations_Blocks +output = vg_rhom +output = fg_e +output = fg_b +output = vg_pressure +output = populations_vg_v +output = vg_boundarytype +output = vg_rank +output = populations_vg_blocks +output = populations_vg_rho +diagnostic = populations_vg_blocks [gridbuilder] x_length = 20 diff --git a/testpackage/tests/Flowthrough_x_inflow_y_outflow_acc/Flowthrough_x_inflow_y_outflow_acc.cfg b/testpackage/tests/Flowthrough_x_inflow_y_outflow_acc/Flowthrough_x_inflow_y_outflow_acc.cfg index 30eb052b9..7bd4e0a0d 100644 --- a/testpackage/tests/Flowthrough_x_inflow_y_outflow_acc/Flowthrough_x_inflow_y_outflow_acc.cfg +++ b/testpackage/tests/Flowthrough_x_inflow_y_outflow_acc/Flowthrough_x_inflow_y_outflow_acc.cfg @@ -17,16 +17,16 @@ system_write_distribution_yline_stride = 0 system_write_distribution_zline_stride = 0 [variables] -output = Rhom -output = E -output = B -output = Pressure -output = populations_V -output = BoundaryType -output = MPIrank -output = populations_Blocks -output = populations_Rho -diagnostic = populations_Blocks +output = vg_rhom +output = fg_e +output = fg_b +output = vg_pressure +output = populations_vg_v +output = vg_boundarytype +output = vg_rank +output = populations_vg_blocks +output = populations_vg_rho +diagnostic = populations_vg_blocks [gridbuilder] x_length = 20 diff --git a/testpackage/tests/Magnetosphere_polar_small/Magnetosphere_polar_small.cfg b/testpackage/tests/Magnetosphere_polar_small/Magnetosphere_polar_small.cfg index 6d0a5f41a..87df1105e 100644 --- a/testpackage/tests/Magnetosphere_polar_small/Magnetosphere_polar_small.cfg +++ b/testpackage/tests/Magnetosphere_polar_small/Magnetosphere_polar_small.cfg @@ -64,20 +64,20 @@ maxSlAccelerationSubcycles = 2 rebalanceInterval = 50 [variables] -output = populations_Rho -output = B -output = E -output = Pressure -output = populations_V -output = populations_Rho -output = populations_moments_Backstream -output = populations_moments_NonBackstream -output = BoundaryType -output = MPIrank -output = populations_Blocks -output = fSaved -output = populations_PrecipitationFlux -diagnostic = populations_Blocks +output = populations_vg_rho +output = fg_b +output = fg_e +output = vg_pressure +output = populations_vg_v +output = populations_vg_rho +output = populations_vg_moments_nonthermal +output = populations_vg_moments_thermal +output = vg_boundarytype +output = vg_rank +output = populations_vg_blocks +output = vg_f_saved +output = populations_vg_precipitationdifferentialflux +diagnostic = populations_vg_blocks [boundaries] diff --git a/testpackage/tests/Magnetosphere_small/Magnetosphere_small.cfg b/testpackage/tests/Magnetosphere_small/Magnetosphere_small.cfg index 02ae2071e..e8d7d9faa 100644 --- a/testpackage/tests/Magnetosphere_small/Magnetosphere_small.cfg +++ b/testpackage/tests/Magnetosphere_small/Magnetosphere_small.cfg @@ -59,17 +59,17 @@ maxSlAccelerationRotation = 22 rebalanceInterval = 10 [variables] -output = populations_Rho -output = B -output = E -output = Pressure -output = populations_V -output = populations_Rho -output = BoundaryType -output = MPIrank -output = populations_Blocks -output = fSaved -diagnostic = populations_Blocks +output = populations_vg_rho +output = fg_b +output = fg_e +output = vg_pressure +output = populations_vg_v +output = populations_vg_rho +output = vg_boundarytype +output = vg_rank +output = populations_vg_blocks +output = vg_f_saved +diagnostic = populations_vg_blocks [boundaries] periodic_x = no diff --git a/testpackage/tests/Selfgen_Waves_Periodic/Selfgen_Waves_Periodic.cfg b/testpackage/tests/Selfgen_Waves_Periodic/Selfgen_Waves_Periodic.cfg index 5ce3f6c13..5a49e7c8d 100644 --- a/testpackage/tests/Selfgen_Waves_Periodic/Selfgen_Waves_Periodic.cfg +++ b/testpackage/tests/Selfgen_Waves_Periodic/Selfgen_Waves_Periodic.cfg @@ -52,17 +52,17 @@ periodic_y = yes periodic_z = yes [variables] -output = populations_Rho -output = B -output = Pressure -output = populations_V -output = populations_Rho -output = E -output = MPIrank -output = populations_Blocks +output = populations_vg_rho +output = fg_b +output = vg_pressure +output = populations_vg_v +output = populations_vg_rho +output = fg_e +output = vg_rank +output = populations_vg_blocks #output = VelocitySubSteps -diagnostic = populations_Blocks +diagnostic = populations_vg_blocks #diagnostic = Pressure #diagnostic = populations_Rho #diagnostic = populations_RhoLossAdjust diff --git a/testpackage/tests/acctest_1_maxw_500k_30kms_1deg/acctest_1_maxw_500k_30kms_1deg.cfg b/testpackage/tests/acctest_1_maxw_500k_30kms_1deg/acctest_1_maxw_500k_30kms_1deg.cfg index 7e85b12d5..80e04ba1a 100644 --- a/testpackage/tests/acctest_1_maxw_500k_30kms_1deg/acctest_1_maxw_500k_30kms_1deg.cfg +++ b/testpackage/tests/acctest_1_maxw_500k_30kms_1deg/acctest_1_maxw_500k_30kms_1deg.cfg @@ -18,55 +18,53 @@ system_write_distribution_yline_stride = 0 system_write_distribution_zline_stride = 0 [variables] -output = B -output = BackgroundB -output = PerturbedB -output = E -output = Rhom -output = Rhoq -output = populations_Rho -output = V -output = populations_V -output = populations_moments_Backstream -output = populations_moments_NonBackstream -output = populations_EffectiveSparsityThreshold -output = populations_RhoLossAdjust -output = LBweight -output = MaxVdt -output = MaxRdt -output = populations_MaxVdt -output = populations_MaxRdt -output = MaxFieldsdt -output = MPIrank -output = FsGridRank -output = FsGridBoundaryType -output = BoundaryType -output = BoundaryLayer -output = populations_Blocks -output = fSaved -output = populations_accSubcycles +output = fg_b +output = fg_b_background +output = fg_b_perturbed +output = fg_e +output = vg_rhom +output = vg_rhoq +output = populations_vg_rho +output = vg_v +output = populations_vg_v +output = populations_vg_moments_nonthermal +output = populations_vg_moments_thermal +output = populations_vg_effectivesparsitythreshold +output = populations_vg_rho_loss_adjust +output = vg_loadbalance_weight +output = vg_maxdt_acceleration +output = vg_maxdt_translation +output = populations_vg_maxdt_acceleration +output = populations_vg_maxdt_translation +output = fg_maxdt_fieldsolver +output = vg_rank +output = fg_rank +output = fg_boundarytype +output = vg_boundarytype +output = vg_boundarylayer +output = populations_vg_blocks +output = vg_f_saved +output = populations_vg_acceleration_subcycles output = VolE -output = HallE -output = GradPeE -output = VolB -output = BackgroundVolB -output = PerturbedVolB -output = Pressure -output = populations_PTensor -output = derivs -output = BVOLderivs +output = fg_e_hall +output = vg_e_gradpe +output = vg_b_vol +output = vg_b_background_vol +output = vg_b_perturbed_vol +output = vg_pressure +output = populations_vg_ptensor +output = vg_b_vol_derivatives +output = vg_b_vol_derivatives output = GridCoordinates output = MeshData #output = VelocitySubSteps -diagnostic = populations_Blocks -diagnostic = FluxE -diagnostic = FluxB -diagnostic = Rhom -diagnostic = populations_RhoLossAdjust -diagnostic = LBweight -diagnostic = MaxVdt -diagnostic = MaxRdt +diagnostic = populations_vg_blocks +diagnostic = vg_rhom +diagnostic = populations_vg_rho_loss_adjust +diagnostic = vg_loadbalance_weight +diagnostic = vg_maxdt_acceleration +diagnostic = vg_maxdt_translation diagnostic = populations_MaxDistributionFunction diagnostic = populations_MinDistributionFunction diff --git a/testpackage/tests/acctest_2_maxw_500k_100k_20kms_10deg/acctest_2_maxw_500k_100k_20kms_10deg.cfg b/testpackage/tests/acctest_2_maxw_500k_100k_20kms_10deg/acctest_2_maxw_500k_100k_20kms_10deg.cfg index 93a955776..56e3d86f1 100644 --- a/testpackage/tests/acctest_2_maxw_500k_100k_20kms_10deg/acctest_2_maxw_500k_100k_20kms_10deg.cfg +++ b/testpackage/tests/acctest_2_maxw_500k_100k_20kms_10deg/acctest_2_maxw_500k_100k_20kms_10deg.cfg @@ -18,17 +18,17 @@ system_write_distribution_yline_stride = 0 system_write_distribution_zline_stride = 0 [variables] -output = Rhom -output = B -output = Pressure -output = populations_V -output = E -output = MPIrank -output = populations_Blocks -output = populations_Rho +output = vg_rhom +output = fg_b +output = vg_pressure +output = populations_vg_v +output = fg_e +output = vg_rank +output = populations_vg_blocks +output = populations_vg_rho #output = VelocitySubSteps -diagnostic = populations_Blocks +diagnostic = populations_vg_blocks #diagnostic = Pressure #diagnostic = populations_Rho #diagnostic = populations_RhoLossAdjust diff --git a/testpackage/tests/acctest_3_substeps/acctest_3_substeps.cfg b/testpackage/tests/acctest_3_substeps/acctest_3_substeps.cfg index 64477b5dc..233266fd9 100644 --- a/testpackage/tests/acctest_3_substeps/acctest_3_substeps.cfg +++ b/testpackage/tests/acctest_3_substeps/acctest_3_substeps.cfg @@ -18,17 +18,17 @@ system_write_distribution_yline_stride = 0 system_write_distribution_zline_stride = 0 [variables] -output = Rhom -output = B -output = Pressure -output = populations_V -output = E -output = MPIrank -output = populations_Blocks -output = populations_Rho +output = vg_rhom +output = fg_b +output = vg_pressure +output = populations_vg_v +output = fg_e +output = vg_rank +output = populations_vg_blocks +output = populations_vg_rho #output = VelocitySubSteps -diagnostic = populations_Blocks +diagnostic = populations_vg_blocks #diagnostic = Pressure #diagnostic = populations_Rho #diagnostic = populations_RhoLossAdjust diff --git a/testpackage/tests/acctest_4_helium/acctest_4_helium.cfg b/testpackage/tests/acctest_4_helium/acctest_4_helium.cfg index 4d2854e27..80360da4e 100644 --- a/testpackage/tests/acctest_4_helium/acctest_4_helium.cfg +++ b/testpackage/tests/acctest_4_helium/acctest_4_helium.cfg @@ -18,17 +18,17 @@ system_write_distribution_yline_stride = 0 system_write_distribution_zline_stride = 0 [variables] -output = Rhom -output = B -output = Pressure -output = populations_V -output = E -output = MPIrank -output = populations_Blocks -output = populations_Rho +output = vg_rhom +output = fg_b +output = vg_pressure +output = populations_vg_v +output = fg_e +output = vg_rank +output = populations_vg_blocks +output = populations_vg_rho #output = VelocitySubSteps -diagnostic = populations_Blocks +diagnostic = populations_vg_blocks #diagnostic = Pressure #diagnostic = populations_Rho #diagnostic = populations_RhoLossAdjust diff --git a/testpackage/tests/acctest_5_proton_antiproton/acctest_5_proton_antiproton.cfg b/testpackage/tests/acctest_5_proton_antiproton/acctest_5_proton_antiproton.cfg index 2ad0dd99b..dba928f8a 100644 --- a/testpackage/tests/acctest_5_proton_antiproton/acctest_5_proton_antiproton.cfg +++ b/testpackage/tests/acctest_5_proton_antiproton/acctest_5_proton_antiproton.cfg @@ -20,21 +20,21 @@ system_write_distribution_yline_stride = 0 system_write_distribution_zline_stride = 0 [variables] -output = B -output = E -output = Rhom -output = Rhoq -output = populations_Rho -output = V -output = populations_V +output = fg_b +output = fg_e +output = vg_rhom +output = vg_rhoq +output = populations_vg_rho +output = vg_v +output = populations_vg_v #output = MaxVdt #output = MaxRdt #output = populations_MaxVdt #output = populations_MaxRdt -output = populations_Blocks +output = populations_vg_blocks #output = populations_accSubcycles -diagnostic = populations_Blocks +diagnostic = populations_vg_blocks [gridbuilder] x_length = 1 diff --git a/testpackage/tests/restart_read/restart_read.cfg b/testpackage/tests/restart_read/restart_read.cfg index 7a52594f8..5fbda528a 100644 --- a/testpackage/tests/restart_read/restart_read.cfg +++ b/testpackage/tests/restart_read/restart_read.cfg @@ -20,16 +20,16 @@ system_write_distribution_yline_stride = 0 system_write_distribution_zline_stride = 0 [variables] -output = Rhom -output = E -output = B -output = Pressure -output = populations_V -output = populations_Rho -output = BoundaryType -output = MPIrank -output = populations_Blocks -diagnostic = populations_Blocks +output = vg_rhom +output = fg_e +output = fg_b +output = vg_pressure +output = populations_vg_v +output = populations_vg_rho +output = vg_boundarytype +output = vg_rank +output = populations_vg_blocks +diagnostic = populations_vg_blocks [gridbuilder] x_length = 20 diff --git a/testpackage/tests/restart_write/restart_write.cfg b/testpackage/tests/restart_write/restart_write.cfg index 8d2fd5b84..857a08da8 100644 --- a/testpackage/tests/restart_write/restart_write.cfg +++ b/testpackage/tests/restart_write/restart_write.cfg @@ -18,16 +18,16 @@ system_write_distribution_yline_stride = 0 system_write_distribution_zline_stride = 0 [variables] -output = Rhom -output = E -output = B -output = Pressure -output = populations_V -output = populations_Rho -output = BoundaryType -output = MPIrank -output = populations_Blocks -diagnostic = populations_Blocks +output = vg_rhom +output = fg_e +output = fg_b +output = vg_pressure +output = populations_vg_v +output = populations_vg_rho +output = vg_boundarytype +output = vg_rank +output = populations_vg_blocks +diagnostic = populations_vg_blocks [gridbuilder] x_length = 20 diff --git a/testpackage/tests/test_fp_fsolver_only_3D/test_fp_fsolver_only_3D.cfg b/testpackage/tests/test_fp_fsolver_only_3D/test_fp_fsolver_only_3D.cfg index 26fde321e..eec4fe3a9 100644 --- a/testpackage/tests/test_fp_fsolver_only_3D/test_fp_fsolver_only_3D.cfg +++ b/testpackage/tests/test_fp_fsolver_only_3D/test_fp_fsolver_only_3D.cfg @@ -57,15 +57,14 @@ periodic_y = yes periodic_z = yes [variables] -output = populations_Rho -output = E -output = B -output = BackgroundB -output = PerturbedB -output = populations_V -output = populations_Blocks -diagnostic = FluxB -diagnostic = populations_Blocks +output = populations_vg_rho +output = fg_e +output = fg_b +output = fg_b_background +output = fg_b_perturbed +output = populations_vg_v +output = populations_vg_blocks +diagnostic = populations_vg_blocks [test_fp] B0 = 1.0e-9 diff --git a/testpackage/tests/test_fp_substeps/test_fp_substeps.cfg b/testpackage/tests/test_fp_substeps/test_fp_substeps.cfg index a2607d710..f840cb57f 100644 --- a/testpackage/tests/test_fp_substeps/test_fp_substeps.cfg +++ b/testpackage/tests/test_fp_substeps/test_fp_substeps.cfg @@ -57,15 +57,14 @@ periodic_y = yes periodic_z = yes [variables] -output = populations_Rho -output = E -output = B -output = BackgroundB -output = PerturbedB -output = populations_V -output = populations_Blocks -diagnostic = FluxB -diagnostic = populations_Blocks +output = populations_vg_rho +output = fg_e +output = fg_b +output = fg_b_background +output = fg_b_perturbed +output = populations_vg_v +output = populations_vg_blocks +diagnostic = populations_vg_blocks [test_fp] B0 = 1.0e-9 diff --git a/testpackage/tests/transtest_2_maxw_500k_100k_20kms_20x20/transtest_2_maxw_500k_100k_20kms_20x20.cfg b/testpackage/tests/transtest_2_maxw_500k_100k_20kms_20x20/transtest_2_maxw_500k_100k_20kms_20x20.cfg index 914069d41..dac3ae93f 100644 --- a/testpackage/tests/transtest_2_maxw_500k_100k_20kms_20x20/transtest_2_maxw_500k_100k_20kms_20x20.cfg +++ b/testpackage/tests/transtest_2_maxw_500k_100k_20kms_20x20/transtest_2_maxw_500k_100k_20kms_20x20.cfg @@ -54,16 +54,16 @@ periodic_y = yes periodic_z = yes [variables] -output = populations_Rho -output = B -output = Pressure -output = populations_V -output = E -output = MPIrank -output = populations_Blocks +output = populations_vg_rho +output = fg_b +output = vg_pressure +output = populations_vg_v +output = fg_e +output = vg_rank +output = populations_vg_blocks #output = VelocitySubSteps -diagnostic = populations_Blocks +diagnostic = populations_vg_blocks #diagnostic = Pressure #diagnostic = populations_Rho #diagnostic = populations_RhoLossAdjust diff --git a/testpackage/tests/transtest_amr/transtest_amr.cfg b/testpackage/tests/transtest_amr/transtest_amr.cfg index 6e53193c0..a36fbfc5a 100644 --- a/testpackage/tests/transtest_amr/transtest_amr.cfg +++ b/testpackage/tests/transtest_amr/transtest_amr.cfg @@ -62,16 +62,16 @@ periodic_y = yes periodic_z = yes [variables] -output = populations_Rho -output = B -output = Pressure -output = populations_V -output = E -output = MPIrank -output = populations_Blocks +output = populations_vg_rho +output = fg_b +output = vg_pressure +output = populations_vg_v +output = fg_e +output = vg_rank +output = populations_vg_blocks #output = VelocitySubSteps -diagnostic = populations_Blocks +diagnostic = populations_vg_blocks #diagnostic = Pressure #diagnostic = populations_Rho #diagnostic = populations_RhoLossAdjust diff --git a/testpackage/tests/update_variables.py b/testpackage/tests/update_variables.py new file mode 100644 index 000000000..a0a7a2243 --- /dev/null +++ b/testpackage/tests/update_variables.py @@ -0,0 +1,156 @@ +import numpy as np +import sys,os +import shutil + +tests = [ + 'acctest_1_maxw_500k_30kms_1deg', + 'acctest_2_maxw_500k_100k_20kms_10deg', + 'acctest_3_substeps', + 'acctest_4_helium', + 'acctest_5_proton_antiproton', + 'Flowthrough_amr', + 'Flowthrough_trans_periodic', + 'Flowthrough_x_inflow_y_outflow', + 'Flowthrough_x_inflow_y_outflow_acc', + 'Magnetosphere_polar_small', + 'Magnetosphere_small', + 'restart_read', + 'restart_write', + 'Selfgen_Waves_Periodic', + 'test_fp_fsolver_only_3D', + 'test_fp_substeps', + 'transtest_2_maxw_500k_100k_20kms_20x20', + 'transtest_amr', +] + +replacecategories = { + 'backstream': 'thermal', +} + +delete = ['fluxb', 'fluxe'] + +replace = { + 'b': 'fg_b', + 'fg_backgroundb': 'fg_b_background', + 'backgroundb': 'fg_b_background', + 'perturbedb': 'fg_b_perturbed', + 'fg_perturbedb': 'fg_b_perturbed', + 'e': 'fg_e', + 'rhom': 'vg_rhom', + 'rhoq': 'vg_rhoq', + 'populations_rho': 'populations_vg_rho', + # '': 'fg_rhom', + # '': 'fg_rhoq', + 'v': 'vg_v', + # '': 'fg_v', + 'populations_v': 'populations_vg_v', + 'populations_moments_nonbackstream': 'populations_vg_moments_thermal', + 'populations_moments_thermal': 'populations_vg_moments_thermal', + 'populations_moments_backstream': 'populations_vg_moments_nonthermal', + 'populations_moments_nonthermal': 'populations_vg_moments_nonthermal', + 'populations_minvalue': 'populations_vg_effectivesparsitythreshold', + 'populations_effectivesparsitythreshold': 'populations_vg_effectivesparsitythreshold', + 'populations_rholossadjust': 'populations_vg_rho_loss_adjust', + 'populations_rho_loss_adjust': 'populations_vg_rho_loss_adjust', + 'populations_energydensity': 'populations_vg_energydensity', + 'populations_precipitationflux': 'populations_vg_precipitationdifferentialflux', + 'populations_precipitationdifferentialflux': 'populations_vg_precipitationdifferentialflux', + 'maxvdt': 'vg_maxdt_acceleration', + 'maxrdt': 'vg_maxdt_translation', + 'populations_maxvdt': 'populations_vg_maxdt_acceleration', + 'populations_maxdt_acceleration': 'populations_vg_maxdt_acceleration', + 'populations_maxrdt': 'populations_vg_maxdt_translation', + 'populations_maxdt_translation': 'populations_vg_maxdt_translation', + 'maxfieldsdt': 'fg_maxdt_fieldsolver', + 'fg_maxfieldsdt': 'fg_maxdt_fieldsolver', + 'mpirank': 'vg_rank', + 'fsgridrank': 'fg_rank', + 'lbweight': 'vg_loadbalance_weight', + 'vg_lbweight': 'vg_loadbalance_weight', + 'vg_loadbalanceweight': 'vg_loadbalance_weight', + 'boundarytype': 'vg_boundarytype', + 'fsgridboundarytype': 'fg_boundarytype', + 'boundarylayer': 'vg_boundarylayer', + 'fsgridboundarylayer': 'fg_boundarylayer', + 'populations_blocks': 'populations_vg_blocks', + 'fsaved': 'vg_f_saved', + 'vg_fsaved': 'vg_f_saved', + 'populations_accsubcycles': 'populations_vg_acceleration_subcycles', + 'populations_acceleration_subcycles': 'populations_vg_acceleration_subcycles', + 'halle': 'fg_e_hall', + 'fg_halle': 'fg_e_hall', + 'gradpee': 'vg_e_gradpe', + 'e_gradpe': 'vg_e_gradpe', + 'volb': 'vg_b_vol', + 'vg_volb': 'vg_b_vol', + 'b_vol': 'vg_b_vol', + 'bvol': 'vg_b_vol', + 'vg_bvol': 'vg_b_vol', + 'fg_volb': 'fg_b_vol', + 'fg_bvol': 'fg_b_vol', + 'backgroundvolb': 'vg_b_background_vol', + 'perturbedvolb': 'vg_b_perturbed_vol', + 'pressure': 'vg_pressure', + # '': 'fg_pressure', + 'populations_ptensor': 'populations_vg_ptensor', + 'bvolderivs': 'vg_b_vol_derivatives', + 'derivs': 'vg_b_vol_derivatives', + 'b_vol_derivs': 'vg_b_vol_derivatives', + 'b_vol_derivatives': 'vg_b_vol_derivatives', + # '': 'vg_gridcoordinates', + # '': 'fg_gridcoordinates meshdata', +} + +for dir in tests: + infile = dir+'/'+dir+'.cfg' + oldinfile = dir+'/'+dir+'_old.cfg' + outfile = dir+'/'+dir+'_new.cfg' + + inf = open(infile, 'r') + outf = open(outfile, 'w') + + for line in inf: + sline = line.strip() + columns = sline.split() + passdirect = True + if len(columns)>=3: + if columns[0] == 'output' and columns[1] == '=': + if columns[2].lower() in replace: + outf.write('output = '+replace[columns[2].lower()]+'\n') + passdirect = False + elif columns[2].lower() in delete: + print('Removed deprecated output line:') + print(' '+line) + passdirect = False + else: + print('passing following output line directly through:') + print(' '+line) + elif columns[0] == 'diagnostic' and columns[1] == '=': + if columns[2].lower() in replace: + outf.write('diagnostic = '+replace[columns[2].lower()]+'\n') + passdirect = False + elif columns[2].lower() in delete: + print('Removed deprecated diagnostic line:') + print(' '+line) + passdirect = False + else: + print('passing following diagnostic line directly through:') + print(' '+line) + elif len(columns)==1: + if columns[0][0]=='[': + for category in replacecategories: + lencat = len(category) + if category == columns[0][-1-lencat:-1]: + outf.write(columns[0][:-1-lencat]+replacecategories[category]+']\n') + passdirect = False + + if passdirect: + outf.write(line) + + inf.close() + outf.flush() + outf.close() + + shutil.copy2(infile,oldinfile) + shutil.move(outfile,infile) + diff --git a/tools/update_vlasiator_cfg_variables.py b/tools/update_vlasiator_cfg_variables.py new file mode 100644 index 000000000..88695b99a --- /dev/null +++ b/tools/update_vlasiator_cfg_variables.py @@ -0,0 +1,142 @@ +import numpy as np +import sys,os +import shutil + +if len(sys.argv)!=2: + print("Usage: python update_vlasiator_cfg_variables.py full_path_to_config_file.cfg") +else: + infile = sys.argv[1] + if not os.path.exists(infile): + print("Error locating config file "+infile) + exit + +replacecategories = { + 'backstream': 'thermal', +} + +delete = ['fluxb', 'fluxe'] + +replace = { + 'b': 'fg_b', + 'fg_backgroundb': 'fg_b_background', + 'backgroundb': 'fg_b_background', + 'perturbedb': 'fg_b_perturbed', + 'fg_perturbedb': 'fg_b_perturbed', + 'e': 'fg_e', + 'rhom': 'vg_rhom', + 'rhoq': 'vg_rhoq', + 'populations_rho': 'populations_vg_rho', + # '': 'fg_rhom', + # '': 'fg_rhoq', + 'v': 'vg_v', + # '': 'fg_v', + 'populations_v': 'populations_vg_v', + 'populations_moments_nonbackstream': 'populations_vg_moments_thermal', + 'populations_moments_thermal': 'populations_vg_moments_thermal', + 'populations_moments_backstream': 'populations_vg_moments_nonthermal', + 'populations_moments_nonthermal': 'populations_vg_moments_nonthermal', + 'populations_minvalue': 'populations_vg_effectivesparsitythreshold', + 'populations_effectivesparsitythreshold': 'populations_vg_effectivesparsitythreshold', + 'populations_rholossadjust': 'populations_vg_rho_loss_adjust', + 'populations_rho_loss_adjust': 'populations_vg_rho_loss_adjust', + 'populations_energydensity': 'populations_vg_energydensity', + 'populations_precipitationflux': 'populations_vg_precipitationdifferentialflux', + 'populations_precipitationdifferentialflux': 'populations_vg_precipitationdifferentialflux', + 'maxvdt': 'vg_maxdt_acceleration', + 'maxrdt': 'vg_maxdt_translation', + 'populations_maxvdt': 'populations_vg_maxdt_acceleration', + 'populations_maxdt_acceleration': 'populations_vg_maxdt_acceleration', + 'populations_maxrdt': 'populations_vg_maxdt_translation', + 'populations_maxdt_translation': 'populations_vg_maxdt_translation', + 'maxfieldsdt': 'fg_maxdt_fieldsolver', + 'fg_maxfieldsdt': 'fg_maxdt_fieldsolver', + 'mpirank': 'vg_rank', + 'fsgridrank': 'fg_rank', + 'lbweight': 'vg_loadbalance_weight', + 'vg_lbweight': 'vg_loadbalance_weight', + 'vg_loadbalanceweight': 'vg_loadbalance_weight', + 'boundarytype': 'vg_boundarytype', + 'fsgridboundarytype': 'fg_boundarytype', + 'boundarylayer': 'vg_boundarylayer', + 'fsgridboundarylayer': 'fg_boundarylayer', + 'populations_blocks': 'populations_vg_blocks', + 'fsaved': 'vg_f_saved', + 'vg_fsaved': 'vg_f_saved', + 'populations_accsubcycles': 'populations_vg_acceleration_subcycles', + 'populations_acceleration_subcycles': 'populations_vg_acceleration_subcycles', + 'halle': 'fg_e_hall', + 'fg_halle': 'fg_e_hall', + 'gradpee': 'vg_e_gradpe', + 'e_gradpe': 'vg_e_gradpe', + 'volb': 'vg_b_vol', + 'vg_volb': 'vg_b_vol', + 'b_vol': 'vg_b_vol', + 'bvol': 'vg_b_vol', + 'vg_bvol': 'vg_b_vol', + 'fg_volb': 'fg_b_vol', + 'fg_bvol': 'fg_b_vol', + 'backgroundvolb': 'vg_b_background_vol', + 'perturbedvolb': 'vg_b_perturbed_vol', + 'pressure': 'vg_pressure', + # '': 'fg_pressure', + 'populations_ptensor': 'populations_vg_ptensor', + 'bvolderivs': 'vg_b_vol_derivatives', + 'derivs': 'vg_b_vol_derivatives', + 'b_vol_derivs': 'vg_b_vol_derivatives', + 'b_vol_derivatives': 'vg_b_vol_derivatives', + # '': 'vg_gridcoordinates', + # '': 'fg_gridcoordinates meshdata', +} + +oldinfile = infile+'_old' +outfile = infile+'_new' + +inf = open(infile, 'r') +outf = open(outfile, 'w') + +for line in inf: + sline = line.strip() + columns = sline.split() + passdirect = True + if len(columns)>=3: + if columns[0] == 'output' and columns[1] == '=': + if columns[2].lower() in replace: + outf.write('output = '+replace[columns[2].lower()]+'\n') + passdirect = False + elif columns[2].lower() in delete: + print('Removed deprecated output line:') + print(' '+line) + passdirect = False + else: + print('passing following output line directly through:') + print(' '+line) + elif columns[0] == 'diagnostic' and columns[1] == '=': + if columns[2].lower() in replace: + outf.write('diagnostic = '+replace[columns[2].lower()]+'\n') + passdirect = False + elif columns[2].lower() in delete: + print('Removed deprecated diagnostic line:') + print(' '+line) + passdirect = False + else: + print('passing following diagnostic line directly through:') + print(' '+line) + elif len(columns)==1: + if columns[0][0]=='[': + for category in replacecategories: + lencat = len(category) + if category == columns[0][-1-lencat:-1]: + outf.write(columns[0][:-1-lencat]+replacecategories[category]+']\n') + passdirect = False + + if passdirect: + outf.write(line) + +inf.close() +outf.flush() +outf.close() + +# Backup the old config file, and replace it with the new one +shutil.copy2(infile,oldinfile) +shutil.move(outfile,infile) + From 8ea9b0c57bba76917b042b0150e53b5a38fe77ae Mon Sep 17 00:00:00 2001 From: Markus Battarbee Date: Thu, 5 Sep 2019 16:14:27 +0300 Subject: [PATCH 552/602] scripts for updating config files (and they were run as well) --- projects/Alfven/Alfven.cfg | 7 +- projects/Diffusion/Diffusion.cfg | 7 +- projects/Dispersion/Dispersion.cfg | 7 +- projects/Distributions/Distributions.cfg | 25 +- projects/Firehose/Firehose.cfg | 19 +- projects/Flowthrough/Flowthrough.cfg | 14 +- .../Flowthrough_amr_test_20190611_YPK.cfg | 34 +-- projects/Fluctuations/Fluctuations.cfg | 7 +- projects/Harris/Harris.cfg | 19 +- projects/IPShock/IPShock.cfg | 50 ++-- projects/KHB/KHB.cfg | 14 +- projects/KHB/KHBig.cfg | 16 +- projects/Larmor/Larmor.cfg | 11 +- projects/Magnetosphere/Magnetosphere.cfg | 50 ++-- .../Magnetosphere/Magnetosphere_BCH-like.cfg | 55 ++-- projects/MultiPeak/MultiPeak.cfg | 25 +- projects/MultiPeak/MultiPeak120.cfg | 25 +- projects/MultiPeak/MultiPeak2.cfg | 25 +- projects/MultiPeak/MultiPeak9.cfg | 25 +- projects/Riemann1/Riemann1.cfg | 10 +- projects/Shock/Shock.cfg | 12 +- projects/Shocktest/Shocktest.cfg | 29 +- projects/Template/Template.cfg | 25 +- projects/VelocityBox/VelocityBox.cfg | 21 +- projects/testAmr/testAmr.cfg | 25 +- projects/testHall/testHall.cfg | 7 +- projects/test_fp/test_fp.cfg | 17 +- projects/test_trans/test_trans.cfg | 8 +- .../verificationLarmor/verificationLarmor.cfg | 15 +- testpackage/small_test_definitions.sh | 11 +- .../Selfgen_Waves_Periodic.cfg | 9 +- .../acctest_1_maxw_500k_30kms_1deg.cfg | 5 +- .../acctest_2_maxw_500k_100k_20kms_10deg.cfg | 9 +- .../acctest_3_substeps/acctest_3_substeps.cfg | 9 +- .../acctest_4_helium/acctest_4_helium.cfg | 9 +- .../acctest_5_proton_antiproton.cfg | 10 +- ...transtest_2_maxw_500k_100k_20kms_20x20.cfg | 9 +- .../tests/transtest_amr/transtest_amr.cfg | 9 +- testpackage/tests/update_variables.py | 156 ---------- tools/update_project_configfiles.py | 38 +++ tools/update_testpackage_configfiles.py | 32 ++ tools/update_vlasiator_cfg_variables.py | 281 ++++++++++-------- 42 files changed, 547 insertions(+), 644 deletions(-) delete mode 100644 testpackage/tests/update_variables.py create mode 100644 tools/update_project_configfiles.py create mode 100644 tools/update_testpackage_configfiles.py diff --git a/projects/Alfven/Alfven.cfg b/projects/Alfven/Alfven.cfg index 930849976..cb479f6f4 100644 --- a/projects/Alfven/Alfven.cfg +++ b/projects/Alfven/Alfven.cfg @@ -42,10 +42,9 @@ periodic_y = yes periodic_z = yes [variables] -output = Rho -output = B -diagnostic = FluxB -diagnostic = Blocks +output = populations_vg_rho +output = fg_b +diagnostic = populations_vg_blocks [Alfven] diff --git a/projects/Diffusion/Diffusion.cfg b/projects/Diffusion/Diffusion.cfg index 5b398db05..9fa80c024 100644 --- a/projects/Diffusion/Diffusion.cfg +++ b/projects/Diffusion/Diffusion.cfg @@ -48,10 +48,9 @@ tolerance = 1.05 minValue = 1.0e-12 [variables] -output = Rho -output = B -diagnostic = FluxB -diagnostic = Blocks +output = populations_vg_rho +output = fg_b +diagnostic = populations_vg_blocks [Diffusion] B0 = 0.0 diff --git a/projects/Dispersion/Dispersion.cfg b/projects/Dispersion/Dispersion.cfg index 7225dd082..b9412727f 100644 --- a/projects/Dispersion/Dispersion.cfg +++ b/projects/Dispersion/Dispersion.cfg @@ -43,10 +43,9 @@ periodic_y = yes periodic_z = yes [variables] -output = Rho -output = B -diagnostic = FluxB -diagnostic = Blocks +output = populations_vg_rho +output = fg_b +diagnostic = populations_vg_blocks [loadBalance] algorithm = RCB diff --git a/projects/Distributions/Distributions.cfg b/projects/Distributions/Distributions.cfg index b4b2ee297..c71196e73 100644 --- a/projects/Distributions/Distributions.cfg +++ b/projects/Distributions/Distributions.cfg @@ -46,19 +46,18 @@ periodic_y = yes periodic_z = yes [variables] -output = Rho -output = B -output = VolB -output = BackgroundB -output = PerturbedB -output = RhoV -output = E -output = PTensor -output = MPIrank -output = Blocks -diagnostic = Blocks -diagnostic = populations_RhoLossAdjust -diagnostic = RhoLossVelBoundary +output = populations_vg_rho +output = fg_b +output = vg_b_vol +output = fg_b_background +output = fg_b_perturbed +output = populations_vg_v +output = fg_e +output = populations_vg_ptensor +output = vg_rank +output = populations_vg_blocks +diagnostic = populations_vg_blocks +diagnostic = populations_vg_rho_loss_adjust diagnostic = MaxDistributionFunction diagnostic = MinDistributionFunction diff --git a/projects/Firehose/Firehose.cfg b/projects/Firehose/Firehose.cfg index 499703e65..5aae6e94e 100644 --- a/projects/Firehose/Firehose.cfg +++ b/projects/Firehose/Firehose.cfg @@ -42,16 +42,15 @@ periodic_y = yes periodic_z = yes [variables] -output = Rho -output = B -output = Pressure -output = RhoV -output = E -output = PTensor -output = MPIrank -diagnostic = Blocks -diagnostic = populations_RhoLossAdjust -diagnostic = RhoLossVelBoundary +output = populations_vg_rho +output = fg_b +output = vg_pressure +output = populations_vg_v +output = fg_e +output = populations_vg_ptensor +output = vg_rank +diagnostic = populations_vg_blocks +diagnostic = populations_vg_rho_loss_adjust [sparse] minValue = 1.0e-15 diff --git a/projects/Flowthrough/Flowthrough.cfg b/projects/Flowthrough/Flowthrough.cfg index 0cb2478f4..146bf111f 100644 --- a/projects/Flowthrough/Flowthrough.cfg +++ b/projects/Flowthrough/Flowthrough.cfg @@ -53,13 +53,13 @@ system_write_distribution_yline_stride = 0 system_write_distribution_zline_stride = 0 [variables] -output = populations_Rho -output = E -output = B -output = BoundaryType -output = MPIrank -output = populations_Blocks -diagnostic = populations_Blocks +output = populations_vg_rho +output = fg_e +output = fg_b +output = vg_boundarytype +output = vg_rank +output = populations_vg_blocks +diagnostic = populations_vg_blocks [boundaries] periodic_x = no diff --git a/projects/Flowthrough/Flowthrough_amr_test_20190611_YPK.cfg b/projects/Flowthrough/Flowthrough_amr_test_20190611_YPK.cfg index 48ecede6a..831a6bbcd 100644 --- a/projects/Flowthrough/Flowthrough_amr_test_20190611_YPK.cfg +++ b/projects/Flowthrough/Flowthrough_amr_test_20190611_YPK.cfg @@ -52,20 +52,20 @@ system_write_distribution_yline_stride = 0 system_write_distribution_zline_stride = 0 [variables] -output = populations_Rho +output = populations_vg_rho output = vg_Rhom output = fg_Rhom -output = B -output = VolB -output = fg_PerturbedB -output = fg_BackgroundB -output = E -output = GradPeE -output = Pressure -output = populations_V -output = populations_Rho -output = populations_moments_Backstream -output = populations_moments_NonBackstream +output = fg_b +output = vg_b_vol +output = fg_b_perturbed +output = fg_b_background +output = fg_e +output = vg_e_gradpe +output = vg_pressure +output = populations_vg_v +output = populations_vg_rho +output = populations_vg_moments_nonthermal +output = populations_vg_moments_thermal output = vg_BoundaryType output = fg_BoundaryType output = vg_BoundaryLayer @@ -74,11 +74,11 @@ output = vg_GridCoordinates output = fg_GridCoordinates output = vg_rank output = fg_rank -output = populations_Blocks -output = fSaved -output = populations_MaxVdt -output = populations_MaxRdt -output = MaxFieldsdt +output = populations_vg_blocks +output = vg_f_saved +output = populations_vg_maxdt_acceleration +output = populations_vg_maxdt_translation +output = fg_maxdt_fieldsolver [boundaries] periodic_x = no diff --git a/projects/Fluctuations/Fluctuations.cfg b/projects/Fluctuations/Fluctuations.cfg index c060888cf..9550fb108 100644 --- a/projects/Fluctuations/Fluctuations.cfg +++ b/projects/Fluctuations/Fluctuations.cfg @@ -50,10 +50,9 @@ periodic_y = yes periodic_z = yes [variables] -output = populations_Rho -output = B -diagnostic = FluxB -diagnostic = populations_Blocks +output = populations_vg_rho +output = fg_b +diagnostic = populations_vg_blocks [loadBalance] algorithm = RCB diff --git a/projects/Harris/Harris.cfg b/projects/Harris/Harris.cfg index 547303ef1..c41c768c7 100644 --- a/projects/Harris/Harris.cfg +++ b/projects/Harris/Harris.cfg @@ -59,16 +59,15 @@ nSpaceSamples = 1 nVelocitySamples = 1 [variables] -output = Rho -output = RhoV -output = B -output = E -output = HallE -output = GradPeE -output = derivs -output = BoundaryType -diagnostic = FluxB -diagnostic = Blocks +output = populations_vg_rho +output = populations_vg_v +output = fg_b +output = fg_e +output = fg_e_hall +output = vg_e_gradpe +output = vg_b_vol_derivatives +output = vg_boundarytype +diagnostic = populations_vg_blocks [Harris] <<<<<<< HEAD diff --git a/projects/IPShock/IPShock.cfg b/projects/IPShock/IPShock.cfg index 4d88dfb4a..8ad7a509c 100644 --- a/projects/IPShock/IPShock.cfg +++ b/projects/IPShock/IPShock.cfg @@ -67,32 +67,30 @@ boundary = Maxwellian min_dt = 1.e-7 [variables] -output = Rho -output = RhoV -output = B -output = E -output = Pressure -output = HallE -output = PTensor -#output = populations_RhoLossAdjust -#output = RhoLossVelBoundary -output = RhoBackstream -output = RhoNonBackstream -output = RhoVBackstream -output = RhoVNonBackstream -#output = PTensorBackstream -#output = PTensorNonBackstream -#output = MaxVdt -#output = MaxRdt -#output = MaxFieldsdt -output = BoundaryType -#output = MPIrank -#output = BoundaryLayer -#output = accSubcycles -output = fSaved -#output = MinValue -#diagnostic = FluxB -diagnostic = Blocks +output = populations_vg_rho +output = populations_vg_v +output = fg_b +output = fg_e +output = vg_pressure +output = fg_e_hall +output = populations_vg_ptensor +#output = populations_vg_rho_loss_adjust +output = populations_vg_moments_nonthermal +output = populations_vg_moments_thermal +output = populations_vg_moments_nonthermal +output = populations_vg_moments_thermal +#output = populations_vg_moments_nonthermal +#output = populations_vg_moments_thermal +#output = vg_maxdt_acceleration +#output = vg_maxdt_translation +#output = fg_maxdt_fieldsolver +output = vg_boundarytype +#output = vg_rank +#output = vg_boundarylayer +#output = populations_vg_acceleration_subcycles +output = vg_f_saved +#output = populations_vg_effectivesparsitythreshold +diagnostic = populations_vg_blocks dr_backstream_radius = 81264 dr_backstream_vx = -250000 diff --git a/projects/KHB/KHB.cfg b/projects/KHB/KHB.cfg index 0a594737e..098ba35ae 100644 --- a/projects/KHB/KHB.cfg +++ b/projects/KHB/KHB.cfg @@ -58,13 +58,13 @@ file_x+ = mxp.dat precedence = 3 [variables] -output = Rho -output = B -output = Pressure -output = RhoV -output = Blocks -output = BoundaryType -diagnostic = Blocks +output = populations_vg_rho +output = fg_b +output = vg_pressure +output = populations_vg_v +output = populations_vg_blocks +output = vg_boundarytype +diagnostic = populations_vg_blocks [sparse] minValue = 1.0e-16 diff --git a/projects/KHB/KHBig.cfg b/projects/KHB/KHBig.cfg index f959131a1..3a68d718c 100644 --- a/projects/KHB/KHBig.cfg +++ b/projects/KHB/KHBig.cfg @@ -57,14 +57,14 @@ file_x+ = mxp.dat precedence = 3 [variables] -output = Rho -output = B -output = E -output = Pressure -output = RhoV -output = Blocks -output = BoundaryType -diagnostic = Blocks +output = populations_vg_rho +output = fg_b +output = fg_e +output = vg_pressure +output = populations_vg_v +output = populations_vg_blocks +output = vg_boundarytype +diagnostic = populations_vg_blocks [sparse] minValue = 1.0e-16 diff --git a/projects/Larmor/Larmor.cfg b/projects/Larmor/Larmor.cfg index 5db84be70..08ec20a4f 100644 --- a/projects/Larmor/Larmor.cfg +++ b/projects/Larmor/Larmor.cfg @@ -42,12 +42,11 @@ periodic_y = yes periodic_z = yes [variables] -output = Rho -output = RhoV -output = B -output = VolB -diagnostic = FluxB -diagnostic = Blocks +output = populations_vg_rho +output = populations_vg_v +output = fg_b +output = vg_b_vol +diagnostic = populations_vg_blocks [loadBalance] algorithm = RCB diff --git a/projects/Magnetosphere/Magnetosphere.cfg b/projects/Magnetosphere/Magnetosphere.cfg index 19abe396b..d932ad280 100644 --- a/projects/Magnetosphere/Magnetosphere.cfg +++ b/projects/Magnetosphere/Magnetosphere.cfg @@ -156,32 +156,30 @@ VZ0 = 0.0 rebalanceInterval = 10 [variables] -output = Rhom -output = Rhoq -output = B -output = VolB -output = E -output = VolE -output = Pressure -output = RhoV -output = BoundaryType -output = MPIrank -output = derivs -output = BVOLderivs -output = BoundaryLayer -output = BackgroundB -output = PerturbedB -output = LBweight -output = MaxVdt -output = MaxRdt -output = MaxFieldsdt -output = Blocks -output = PTensor -output = fSaved -output = populations_Blocks -diagnostic = Blocks -diagnostic = populations_RhoLossAdjust -diagnostic = RhoLossVelBoundary +output = vg_rhom +output = vg_rhoq +output = fg_b +output = vg_b_vol +output = fg_e +output = vg_pressure +output = populations_vg_v +output = vg_boundarytype +output = vg_rank +output = vg_b_vol_derivatives +output = vg_b_vol_derivatives +output = vg_boundarylayer +output = fg_b_background +output = fg_b_perturbed +output = vg_loadbalance_weight +output = vg_maxdt_acceleration +output = vg_maxdt_translation +output = fg_maxdt_fieldsolver +output = populations_vg_blocks +output = populations_vg_ptensor +output = vg_f_saved +output = populations_vg_blocks +diagnostic = populations_vg_blocks +diagnostic = populations_vg_rho_loss_adjust diagnostic = MaxDistributionFunction diagnostic = MinDistributionFunction diff --git a/projects/Magnetosphere/Magnetosphere_BCH-like.cfg b/projects/Magnetosphere/Magnetosphere_BCH-like.cfg index c0b3765cd..0062e8187 100644 --- a/projects/Magnetosphere/Magnetosphere_BCH-like.cfg +++ b/projects/Magnetosphere/Magnetosphere_BCH-like.cfg @@ -101,34 +101,33 @@ rebalanceInterval = 10 tolerance = 1.2 [variables] -output = Rhom -output = Rhoq -output = V -output = populations_Rho -output = populations_V -output = populations_moments_Backstream -output = populations_moments_NonBackstream -#output = populations_RhomLossAdjust -output = populations_accSubcycles -output = B -output = VolB -output = E -output = HallE -output = VolE -output = populations_PTensor -output = BoundaryType -output = BoundaryLayer -output = MPIrank -output = FsGridRank -output = LBweight -output = MaxVdt -output = MaxRdt -output = MaxFieldsdt -output = populations_Blocks -output = fSaved -output = populations_MinValue -diagnostic = populations_Blocks -diagnostic = Rhom +output = vg_rhom +output = vg_rhoq +output = vg_v +output = populations_vg_rho +output = populations_vg_v +output = populations_vg_moments_nonthermal +output = populations_vg_moments_thermal +#output = populations_vg_rho_loss_adjust +output = populations_vg_acceleration_subcycles +output = fg_b +output = vg_b_vol +output = fg_e +output = fg_e_hall +output = populations_vg_ptensor +output = vg_boundarytype +output = vg_boundarylayer +output = vg_rank +output = fg_rank +output = vg_loadbalance_weight +output = vg_maxdt_acceleration +output = vg_maxdt_translation +output = fg_maxdt_fieldsolver +output = populations_vg_blocks +output = vg_f_saved +output = populations_vg_effectivesparsitythreshold +diagnostic = populations_vg_blocks +diagnostic = vg_rhom [boundaries] periodic_x = no diff --git a/projects/MultiPeak/MultiPeak.cfg b/projects/MultiPeak/MultiPeak.cfg index 16bcae4ec..7ceb7c3a4 100644 --- a/projects/MultiPeak/MultiPeak.cfg +++ b/projects/MultiPeak/MultiPeak.cfg @@ -50,19 +50,18 @@ periodic_y = yes periodic_z = yes [variables] -output = Rho -output = B -output = BackgroundB -output = PerturbedB -output = Pressure -output = RhoV -output = E -output = PTensor -output = MPIrank -output = Blocks -diagnostic = Blocks -diagnostic = populations_RhoLossAdjust -diagnostic = RhoLossVelBoundary +output = populations_vg_rho +output = fg_b +output = fg_b_background +output = fg_b_perturbed +output = vg_pressure +output = populations_vg_v +output = fg_e +output = populations_vg_ptensor +output = vg_rank +output = populations_vg_blocks +diagnostic = populations_vg_blocks +diagnostic = populations_vg_rho_loss_adjust diagnostic = MaxDistributionFunction diagnostic = MinDistributionFunction diff --git a/projects/MultiPeak/MultiPeak120.cfg b/projects/MultiPeak/MultiPeak120.cfg index 218908e2d..e85983a63 100644 --- a/projects/MultiPeak/MultiPeak120.cfg +++ b/projects/MultiPeak/MultiPeak120.cfg @@ -65,19 +65,18 @@ periodic_y = yes periodic_z = yes [variables] -output = Rho -output = B -output = BackgroundB -output = PerturbedB -output = Pressure -output = RhoV -output = E -output = PTensor -output = MPIrank -output = Blocks -diagnostic = Blocks -diagnostic = populations_RhoLossAdjust -diagnostic = RhoLossVelBoundary +output = populations_vg_rho +output = fg_b +output = fg_b_background +output = fg_b_perturbed +output = vg_pressure +output = populations_vg_v +output = fg_e +output = populations_vg_ptensor +output = vg_rank +output = populations_vg_blocks +diagnostic = populations_vg_blocks +diagnostic = populations_vg_rho_loss_adjust diagnostic = MaxDistributionFunction diagnostic = MinDistributionFunction diff --git a/projects/MultiPeak/MultiPeak2.cfg b/projects/MultiPeak/MultiPeak2.cfg index 9faf54acb..c2e23d5d7 100644 --- a/projects/MultiPeak/MultiPeak2.cfg +++ b/projects/MultiPeak/MultiPeak2.cfg @@ -56,19 +56,18 @@ periodic_y = yes periodic_z = yes [variables] -output = Rho -output = B -output = BackgroundB -output = PerturbedB -output = Pressure -output = RhoV -output = E -output = PTensor -output = MPIrank -output = Blocks -diagnostic = Blocks -diagnostic = populations_RhoLossAdjust -diagnostic = RhoLossVelBoundary +output = populations_vg_rho +output = fg_b +output = fg_b_background +output = fg_b_perturbed +output = vg_pressure +output = populations_vg_v +output = fg_e +output = populations_vg_ptensor +output = vg_rank +output = populations_vg_blocks +diagnostic = populations_vg_blocks +diagnostic = populations_vg_rho_loss_adjust diagnostic = MaxDistributionFunction diagnostic = MinDistributionFunction diff --git a/projects/MultiPeak/MultiPeak9.cfg b/projects/MultiPeak/MultiPeak9.cfg index 248491ebc..ac779bd31 100644 --- a/projects/MultiPeak/MultiPeak9.cfg +++ b/projects/MultiPeak/MultiPeak9.cfg @@ -48,19 +48,18 @@ periodic_y = yes periodic_z = yes [variables] -output = Rho -output = B -output = BackgroundB -output = PerturbedB -output = Pressure -output = RhoV -output = E -output = PTensor -output = MPIrank -output = Blocks -diagnostic = Blocks -diagnostic = populations_RhoLossAdjust -diagnostic = RhoLossVelBoundary +output = populations_vg_rho +output = fg_b +output = fg_b_background +output = fg_b_perturbed +output = vg_pressure +output = populations_vg_v +output = fg_e +output = populations_vg_ptensor +output = vg_rank +output = populations_vg_blocks +diagnostic = populations_vg_blocks +diagnostic = populations_vg_rho_loss_adjust diagnostic = MaxDistributionFunction diagnostic = MinDistributionFunction diff --git a/projects/Riemann1/Riemann1.cfg b/projects/Riemann1/Riemann1.cfg index 0b8652058..aafbf3217 100644 --- a/projects/Riemann1/Riemann1.cfg +++ b/projects/Riemann1/Riemann1.cfg @@ -36,11 +36,11 @@ timestep_max = 20000 dt = 0.004 [variables] -output = Rho -output = B -output = Pressure -output = RhoV -diagnostic = Blocks +output = populations_vg_rho +output = fg_b +output = vg_pressure +output = populations_vg_v +diagnostic = populations_vg_blocks [sparse] minValue = 1.0e-15 diff --git a/projects/Shock/Shock.cfg b/projects/Shock/Shock.cfg index c8c4f9488..c5ab81183 100644 --- a/projects/Shock/Shock.cfg +++ b/projects/Shock/Shock.cfg @@ -44,13 +44,11 @@ periodic_y = yes periodic_z = yes [variables] -output = Rho -output = RhoV -output = B -output = VolB -output = VolE -diagnostic = FluxB -diagnostic = Blocks +output = populations_vg_rho +output = populations_vg_v +output = fg_b +output = vg_b_vol +diagnostic = populations_vg_blocks [loadBalance] algorithm = RCB diff --git a/projects/Shocktest/Shocktest.cfg b/projects/Shocktest/Shocktest.cfg index c382caa7e..58a7aecc9 100644 --- a/projects/Shocktest/Shocktest.cfg +++ b/projects/Shocktest/Shocktest.cfg @@ -61,23 +61,20 @@ nSpaceSamples = 2 nVelocitySamples = 2 [variables] -output = Rho -output = RhoV -output = B -output = populations_RhoLossAdjust -output = RhoLossVelBoundary -output = Blocks -output = PTensor -output = BoundaryType -output = MPIrank -output = BoundaryLayer -output = Pressure +output = populations_vg_rho +output = populations_vg_v +output = fg_b +output = populations_vg_rho_loss_adjust +output = populations_vg_blocks +output = populations_vg_ptensor +output = vg_boundarytype +output = vg_rank +output = vg_boundarylayer +output = vg_pressure -diagnostic = Blocks -diagnostic = Rho -diagnostic = populations_RhoLossAdjust -diagnostic = RhoLossVelBoundary -diagnostic = FluxB +diagnostic = populations_vg_blocks +diagnostic = populations_vg_rho +diagnostic = populations_vg_rho_loss_adjust [sparse] minValue = 1.0e-15 diff --git a/projects/Template/Template.cfg b/projects/Template/Template.cfg index 6fb8c8d71..29f14c785 100644 --- a/projects/Template/Template.cfg +++ b/projects/Template/Template.cfg @@ -45,19 +45,18 @@ periodic_y = yes periodic_z = yes [variables] -output = Rho -output = B -output = BackgroundB -output = PerturbedB -output = Pressure -output = RhoV -output = E -output = PTensor -output = MPIrank -output = Blocks -diagnostic = Blocks -diagnostic = populations_RhoLossAdjust -diagnostic = RhoLossVelBoundary +output = populations_vg_rho +output = fg_b +output = fg_b_background +output = fg_b_perturbed +output = vg_pressure +output = populations_vg_v +output = fg_e +output = populations_vg_ptensor +output = vg_rank +output = populations_vg_blocks +diagnostic = populations_vg_blocks +diagnostic = populations_vg_rho_loss_adjust diagnostic = MaxDistributionFunction diagnostic = MinDistributionFunction diff --git a/projects/VelocityBox/VelocityBox.cfg b/projects/VelocityBox/VelocityBox.cfg index 5b8d9c5e7..13f662340 100644 --- a/projects/VelocityBox/VelocityBox.cfg +++ b/projects/VelocityBox/VelocityBox.cfg @@ -46,17 +46,16 @@ periodic_y = yes periodic_z = yes [variables] -output = Rho -output = B -output = Pressure -output = RhoV -output = E -output = PTensor -output = MPIrank -diagnostic = Blocks -diagnostic = Rho -diagnostic = populations_RhoLossAdjust -diagnostic = RhoLossVelBoundary +output = populations_vg_rho +output = fg_b +output = vg_pressure +output = populations_vg_v +output = fg_e +output = populations_vg_ptensor +output = vg_rank +diagnostic = populations_vg_blocks +diagnostic = populations_vg_rho +diagnostic = populations_vg_rho_loss_adjust [sparse] minValue = 1.0e-15 diff --git a/projects/testAmr/testAmr.cfg b/projects/testAmr/testAmr.cfg index c17bcfabd..8b52162d9 100644 --- a/projects/testAmr/testAmr.cfg +++ b/projects/testAmr/testAmr.cfg @@ -62,20 +62,19 @@ periodic_y = yes periodic_z = yes [variables] -output = populations_Rho -output = B -output = Pressure -output = populations_V -output = E -output = MPIrank -output = populations_Blocks -#output = VelocitySubSteps +output = populations_vg_rho +output = fg_b +output = vg_pressure +output = populations_vg_v +output = fg_e +output = vg_rank +output = populations_vg_blocks +#output = populations_vg_acceleration_subcycles -diagnostic = populations_Blocks -#diagnostic = Pressure -#diagnostic = populations_Rho -#diagnostic = populations_RhoLossAdjust -#diagnostic = populations_RhoLossVelBoundary +diagnostic = populations_vg_blocks +#diagnostic = vg_pressure +#diagnostic = populations_vg_rho +#diagnostic = populations_vg_rho_loss_adjust [testAmr] #magnitude of 1.82206867e-10 gives a period of 360s, useful for testing... diff --git a/projects/testHall/testHall.cfg b/projects/testHall/testHall.cfg index 6512361d3..a54117077 100644 --- a/projects/testHall/testHall.cfg +++ b/projects/testHall/testHall.cfg @@ -44,10 +44,9 @@ periodic_y = yes periodic_z = yes [variables] -output = Rho -output = B -diagnostic = FluxB -diagnostic = Blocks +output = populations_vg_rho +output = fg_b +diagnostic = populations_vg_blocks [TestHall] BX0 = 1.0e-9 diff --git a/projects/test_fp/test_fp.cfg b/projects/test_fp/test_fp.cfg index 7d0ed5b06..eabac537b 100644 --- a/projects/test_fp/test_fp.cfg +++ b/projects/test_fp/test_fp.cfg @@ -59,15 +59,14 @@ periodic_y = yes periodic_z = yes [variables] -output = Rho -output = E -output = B -output = BackgroundB -output = PerturbedB -output = RhoV -output = populations_Blocks -diagnostic = FluxB -diagnostic = populations_Blocks +output = populations_vg_rho +output = fg_e +output = fg_b +output = fg_b_background +output = fg_b_perturbed +output = populations_vg_v +output = populations_vg_blocks +diagnostic = populations_vg_blocks [sparse] minValue = 1e-15 diff --git a/projects/test_trans/test_trans.cfg b/projects/test_trans/test_trans.cfg index c01d38fee..61c4d0b7b 100644 --- a/projects/test_trans/test_trans.cfg +++ b/projects/test_trans/test_trans.cfg @@ -60,10 +60,10 @@ sparse_min_value = 1e-7 mesh = IonMesh [variables] -output = Rho -output = Blocks -diagnostic = Rho -diagnostic = Blocks +output = populations_vg_rho +output = populations_vg_blocks +diagnostic = populations_vg_rho +diagnostic = populations_vg_blocks [test_trans] cellPosition = 1.5 diff --git a/projects/verificationLarmor/verificationLarmor.cfg b/projects/verificationLarmor/verificationLarmor.cfg index e286183cd..acda9868d 100644 --- a/projects/verificationLarmor/verificationLarmor.cfg +++ b/projects/verificationLarmor/verificationLarmor.cfg @@ -47,14 +47,13 @@ periodic_y = yes periodic_z = yes [variables] -output = Rho -output = RhoV -output = B -output = VolB -output = MPIrank -output = Blocks -diagnostic = FluxB -diagnostic = Blocks +output = populations_vg_rho +output = populations_vg_v +output = fg_b +output = vg_b_vol +output = vg_rank +output = populations_vg_blocks +diagnostic = populations_vg_blocks [loadBalance] algorithm = RCB diff --git a/testpackage/small_test_definitions.sh b/testpackage/small_test_definitions.sh index e552aaa2c..5b4c4ac60 100644 --- a/testpackage/small_test_definitions.sh +++ b/testpackage/small_test_definitions.sh @@ -21,8 +21,7 @@ run_tests=( 1 2 3 4 5 6 7 8 9 10 11 12 13 14) # acceleration test test_name[1]="acctest_2_maxw_500k_100k_20kms_10deg" comparison_vlsv[1]="fullf.0000001.vlsv" -#only one process does anything -> in _1 phiprof here -comparison_phiprof[1]="phiprof_1.txt" +comparison_phiprof[1]="phiprof_0.txt" variable_names[1]="proton/vg_rho proton/vg_v proton/vg_v proton/vg_v protons" variable_components[1]="0 0 1 2" single_cell[1]=1 @@ -30,8 +29,7 @@ single_cell[1]=1 # acceleration test w/ substepping test_name[2]="acctest_3_substeps" comparison_vlsv[2]="fullf.0000001.vlsv" -#only one process does anything -> in _1 phiprof here -comparison_phiprof[2]="phiprof_1.txt" +comparison_phiprof[2]="phiprof_0.txt" variable_names[2]="proton/vg_rho proton/vg_v proton/vg_v proton/vg_v protons" variable_components[2]="0 0 1 2" single_cell[2]=1 @@ -45,7 +43,7 @@ variable_components[3]="0 0 1 2" test_name[4]="acctest_4_helium" comparison_vlsv[4]="fullf.0000001.vlsv" -comparison_phiprof[4]="phiprof_1.txt" +comparison_phiprof[4]="phiprof_0.txt" variable_names[4]="helium/vg_rho helium/vg_v helium/vg_v helium/vg_v" variable_components[4]="0 0 1 2" single_cell[4]=1 @@ -53,8 +51,7 @@ single_cell[4]=1 # Gyration test with protons and antiprotons test_name[5]="acctest_5_proton_antiproton" comparison_vlsv[5]="fullf.0000001.vlsv" -#only one process does anything -> in _1 phiprof here -comparison_phiprof[5]="phiprof_1.txt" +comparison_phiprof[5]="phiprof_0.txt" variable_names[5]="proton/vg_rho proton/vg_v proton/vg_v proton/vg_v protons" variable_components[5]="0 0 1 2" single_cell[5]=1 diff --git a/testpackage/tests/Selfgen_Waves_Periodic/Selfgen_Waves_Periodic.cfg b/testpackage/tests/Selfgen_Waves_Periodic/Selfgen_Waves_Periodic.cfg index 5a49e7c8d..118c5d57f 100644 --- a/testpackage/tests/Selfgen_Waves_Periodic/Selfgen_Waves_Periodic.cfg +++ b/testpackage/tests/Selfgen_Waves_Periodic/Selfgen_Waves_Periodic.cfg @@ -60,13 +60,12 @@ output = populations_vg_rho output = fg_e output = vg_rank output = populations_vg_blocks -#output = VelocitySubSteps +#output = populations_vg_acceleration_subcycles diagnostic = populations_vg_blocks -#diagnostic = Pressure -#diagnostic = populations_Rho -#diagnostic = populations_RhoLossAdjust -#diagnostic = populations_RhoLossVelBoundary +#diagnostic = vg_pressure +#diagnostic = populations_vg_rho +#diagnostic = populations_vg_rho_loss_adjust [MultiPeak] #magnitude of 1.82206867e-10 gives a period of 360s, useful for testing... diff --git a/testpackage/tests/acctest_1_maxw_500k_30kms_1deg/acctest_1_maxw_500k_30kms_1deg.cfg b/testpackage/tests/acctest_1_maxw_500k_30kms_1deg/acctest_1_maxw_500k_30kms_1deg.cfg index 80e04ba1a..012b973b9 100644 --- a/testpackage/tests/acctest_1_maxw_500k_30kms_1deg/acctest_1_maxw_500k_30kms_1deg.cfg +++ b/testpackage/tests/acctest_1_maxw_500k_30kms_1deg/acctest_1_maxw_500k_30kms_1deg.cfg @@ -45,7 +45,6 @@ output = vg_boundarylayer output = populations_vg_blocks output = vg_f_saved output = populations_vg_acceleration_subcycles -output = VolE output = fg_e_hall output = vg_e_gradpe output = vg_b_vol @@ -55,9 +54,9 @@ output = vg_pressure output = populations_vg_ptensor output = vg_b_vol_derivatives output = vg_b_vol_derivatives -output = GridCoordinates +output = vg_gridcoordinates output = MeshData -#output = VelocitySubSteps +#output = populations_vg_acceleration_subcycles diagnostic = populations_vg_blocks diagnostic = vg_rhom diff --git a/testpackage/tests/acctest_2_maxw_500k_100k_20kms_10deg/acctest_2_maxw_500k_100k_20kms_10deg.cfg b/testpackage/tests/acctest_2_maxw_500k_100k_20kms_10deg/acctest_2_maxw_500k_100k_20kms_10deg.cfg index 56e3d86f1..8428248cd 100644 --- a/testpackage/tests/acctest_2_maxw_500k_100k_20kms_10deg/acctest_2_maxw_500k_100k_20kms_10deg.cfg +++ b/testpackage/tests/acctest_2_maxw_500k_100k_20kms_10deg/acctest_2_maxw_500k_100k_20kms_10deg.cfg @@ -26,13 +26,12 @@ output = fg_e output = vg_rank output = populations_vg_blocks output = populations_vg_rho -#output = VelocitySubSteps +#output = populations_vg_acceleration_subcycles diagnostic = populations_vg_blocks -#diagnostic = Pressure -#diagnostic = populations_Rho -#diagnostic = populations_RhoLossAdjust -#diagnostic = populations_RhoLossVelBoundary +#diagnostic = vg_pressure +#diagnostic = populations_vg_rho +#diagnostic = populations_vg_rho_loss_adjust [gridbuilder] x_length = 1 diff --git a/testpackage/tests/acctest_3_substeps/acctest_3_substeps.cfg b/testpackage/tests/acctest_3_substeps/acctest_3_substeps.cfg index 233266fd9..13708dd1a 100644 --- a/testpackage/tests/acctest_3_substeps/acctest_3_substeps.cfg +++ b/testpackage/tests/acctest_3_substeps/acctest_3_substeps.cfg @@ -26,13 +26,12 @@ output = fg_e output = vg_rank output = populations_vg_blocks output = populations_vg_rho -#output = VelocitySubSteps +#output = populations_vg_acceleration_subcycles diagnostic = populations_vg_blocks -#diagnostic = Pressure -#diagnostic = populations_Rho -#diagnostic = populations_RhoLossAdjust -#diagnostic = populations_RhoLossVelBoundary +#diagnostic = vg_pressure +#diagnostic = populations_vg_rho +#diagnostic = populations_vg_rho_loss_adjust [gridbuilder] x_length = 1 diff --git a/testpackage/tests/acctest_4_helium/acctest_4_helium.cfg b/testpackage/tests/acctest_4_helium/acctest_4_helium.cfg index 80360da4e..7c885693d 100644 --- a/testpackage/tests/acctest_4_helium/acctest_4_helium.cfg +++ b/testpackage/tests/acctest_4_helium/acctest_4_helium.cfg @@ -26,13 +26,12 @@ output = fg_e output = vg_rank output = populations_vg_blocks output = populations_vg_rho -#output = VelocitySubSteps +#output = populations_vg_acceleration_subcycles diagnostic = populations_vg_blocks -#diagnostic = Pressure -#diagnostic = populations_Rho -#diagnostic = populations_RhoLossAdjust -#diagnostic = populations_RhoLossVelBoundary +#diagnostic = vg_pressure +#diagnostic = populations_vg_rho +#diagnostic = populations_vg_rho_loss_adjust [gridbuilder] x_length = 1 diff --git a/testpackage/tests/acctest_5_proton_antiproton/acctest_5_proton_antiproton.cfg b/testpackage/tests/acctest_5_proton_antiproton/acctest_5_proton_antiproton.cfg index dba928f8a..11918b346 100644 --- a/testpackage/tests/acctest_5_proton_antiproton/acctest_5_proton_antiproton.cfg +++ b/testpackage/tests/acctest_5_proton_antiproton/acctest_5_proton_antiproton.cfg @@ -27,12 +27,12 @@ output = vg_rhoq output = populations_vg_rho output = vg_v output = populations_vg_v -#output = MaxVdt -#output = MaxRdt -#output = populations_MaxVdt -#output = populations_MaxRdt +#output = vg_maxdt_acceleration +#output = vg_maxdt_translation +#output = populations_vg_maxdt_acceleration +#output = populations_vg_maxdt_translation output = populations_vg_blocks -#output = populations_accSubcycles +#output = populations_vg_acceleration_subcycles diagnostic = populations_vg_blocks diff --git a/testpackage/tests/transtest_2_maxw_500k_100k_20kms_20x20/transtest_2_maxw_500k_100k_20kms_20x20.cfg b/testpackage/tests/transtest_2_maxw_500k_100k_20kms_20x20/transtest_2_maxw_500k_100k_20kms_20x20.cfg index dac3ae93f..ca7ca25b0 100644 --- a/testpackage/tests/transtest_2_maxw_500k_100k_20kms_20x20/transtest_2_maxw_500k_100k_20kms_20x20.cfg +++ b/testpackage/tests/transtest_2_maxw_500k_100k_20kms_20x20/transtest_2_maxw_500k_100k_20kms_20x20.cfg @@ -61,13 +61,12 @@ output = populations_vg_v output = fg_e output = vg_rank output = populations_vg_blocks -#output = VelocitySubSteps +#output = populations_vg_acceleration_subcycles diagnostic = populations_vg_blocks -#diagnostic = Pressure -#diagnostic = populations_Rho -#diagnostic = populations_RhoLossAdjust -#diagnostic = populations_RhoLossVelBoundary +#diagnostic = vg_pressure +#diagnostic = populations_vg_rho +#diagnostic = populations_vg_rho_loss_adjust [MultiPeak] #magnitude of 1.82206867e-10 gives a period of 360s, useful for testing... diff --git a/testpackage/tests/transtest_amr/transtest_amr.cfg b/testpackage/tests/transtest_amr/transtest_amr.cfg index a36fbfc5a..676f1f965 100644 --- a/testpackage/tests/transtest_amr/transtest_amr.cfg +++ b/testpackage/tests/transtest_amr/transtest_amr.cfg @@ -69,13 +69,12 @@ output = populations_vg_v output = fg_e output = vg_rank output = populations_vg_blocks -#output = VelocitySubSteps +#output = populations_vg_acceleration_subcycles diagnostic = populations_vg_blocks -#diagnostic = Pressure -#diagnostic = populations_Rho -#diagnostic = populations_RhoLossAdjust -#diagnostic = populations_RhoLossVelBoundary +#diagnostic = vg_pressure +#diagnostic = populations_vg_rho +#diagnostic = populations_vg_rho_loss_adjust [testAmr] #magnitude of 1.82206867e-10 gives a period of 360s, useful for testing... diff --git a/testpackage/tests/update_variables.py b/testpackage/tests/update_variables.py deleted file mode 100644 index a0a7a2243..000000000 --- a/testpackage/tests/update_variables.py +++ /dev/null @@ -1,156 +0,0 @@ -import numpy as np -import sys,os -import shutil - -tests = [ - 'acctest_1_maxw_500k_30kms_1deg', - 'acctest_2_maxw_500k_100k_20kms_10deg', - 'acctest_3_substeps', - 'acctest_4_helium', - 'acctest_5_proton_antiproton', - 'Flowthrough_amr', - 'Flowthrough_trans_periodic', - 'Flowthrough_x_inflow_y_outflow', - 'Flowthrough_x_inflow_y_outflow_acc', - 'Magnetosphere_polar_small', - 'Magnetosphere_small', - 'restart_read', - 'restart_write', - 'Selfgen_Waves_Periodic', - 'test_fp_fsolver_only_3D', - 'test_fp_substeps', - 'transtest_2_maxw_500k_100k_20kms_20x20', - 'transtest_amr', -] - -replacecategories = { - 'backstream': 'thermal', -} - -delete = ['fluxb', 'fluxe'] - -replace = { - 'b': 'fg_b', - 'fg_backgroundb': 'fg_b_background', - 'backgroundb': 'fg_b_background', - 'perturbedb': 'fg_b_perturbed', - 'fg_perturbedb': 'fg_b_perturbed', - 'e': 'fg_e', - 'rhom': 'vg_rhom', - 'rhoq': 'vg_rhoq', - 'populations_rho': 'populations_vg_rho', - # '': 'fg_rhom', - # '': 'fg_rhoq', - 'v': 'vg_v', - # '': 'fg_v', - 'populations_v': 'populations_vg_v', - 'populations_moments_nonbackstream': 'populations_vg_moments_thermal', - 'populations_moments_thermal': 'populations_vg_moments_thermal', - 'populations_moments_backstream': 'populations_vg_moments_nonthermal', - 'populations_moments_nonthermal': 'populations_vg_moments_nonthermal', - 'populations_minvalue': 'populations_vg_effectivesparsitythreshold', - 'populations_effectivesparsitythreshold': 'populations_vg_effectivesparsitythreshold', - 'populations_rholossadjust': 'populations_vg_rho_loss_adjust', - 'populations_rho_loss_adjust': 'populations_vg_rho_loss_adjust', - 'populations_energydensity': 'populations_vg_energydensity', - 'populations_precipitationflux': 'populations_vg_precipitationdifferentialflux', - 'populations_precipitationdifferentialflux': 'populations_vg_precipitationdifferentialflux', - 'maxvdt': 'vg_maxdt_acceleration', - 'maxrdt': 'vg_maxdt_translation', - 'populations_maxvdt': 'populations_vg_maxdt_acceleration', - 'populations_maxdt_acceleration': 'populations_vg_maxdt_acceleration', - 'populations_maxrdt': 'populations_vg_maxdt_translation', - 'populations_maxdt_translation': 'populations_vg_maxdt_translation', - 'maxfieldsdt': 'fg_maxdt_fieldsolver', - 'fg_maxfieldsdt': 'fg_maxdt_fieldsolver', - 'mpirank': 'vg_rank', - 'fsgridrank': 'fg_rank', - 'lbweight': 'vg_loadbalance_weight', - 'vg_lbweight': 'vg_loadbalance_weight', - 'vg_loadbalanceweight': 'vg_loadbalance_weight', - 'boundarytype': 'vg_boundarytype', - 'fsgridboundarytype': 'fg_boundarytype', - 'boundarylayer': 'vg_boundarylayer', - 'fsgridboundarylayer': 'fg_boundarylayer', - 'populations_blocks': 'populations_vg_blocks', - 'fsaved': 'vg_f_saved', - 'vg_fsaved': 'vg_f_saved', - 'populations_accsubcycles': 'populations_vg_acceleration_subcycles', - 'populations_acceleration_subcycles': 'populations_vg_acceleration_subcycles', - 'halle': 'fg_e_hall', - 'fg_halle': 'fg_e_hall', - 'gradpee': 'vg_e_gradpe', - 'e_gradpe': 'vg_e_gradpe', - 'volb': 'vg_b_vol', - 'vg_volb': 'vg_b_vol', - 'b_vol': 'vg_b_vol', - 'bvol': 'vg_b_vol', - 'vg_bvol': 'vg_b_vol', - 'fg_volb': 'fg_b_vol', - 'fg_bvol': 'fg_b_vol', - 'backgroundvolb': 'vg_b_background_vol', - 'perturbedvolb': 'vg_b_perturbed_vol', - 'pressure': 'vg_pressure', - # '': 'fg_pressure', - 'populations_ptensor': 'populations_vg_ptensor', - 'bvolderivs': 'vg_b_vol_derivatives', - 'derivs': 'vg_b_vol_derivatives', - 'b_vol_derivs': 'vg_b_vol_derivatives', - 'b_vol_derivatives': 'vg_b_vol_derivatives', - # '': 'vg_gridcoordinates', - # '': 'fg_gridcoordinates meshdata', -} - -for dir in tests: - infile = dir+'/'+dir+'.cfg' - oldinfile = dir+'/'+dir+'_old.cfg' - outfile = dir+'/'+dir+'_new.cfg' - - inf = open(infile, 'r') - outf = open(outfile, 'w') - - for line in inf: - sline = line.strip() - columns = sline.split() - passdirect = True - if len(columns)>=3: - if columns[0] == 'output' and columns[1] == '=': - if columns[2].lower() in replace: - outf.write('output = '+replace[columns[2].lower()]+'\n') - passdirect = False - elif columns[2].lower() in delete: - print('Removed deprecated output line:') - print(' '+line) - passdirect = False - else: - print('passing following output line directly through:') - print(' '+line) - elif columns[0] == 'diagnostic' and columns[1] == '=': - if columns[2].lower() in replace: - outf.write('diagnostic = '+replace[columns[2].lower()]+'\n') - passdirect = False - elif columns[2].lower() in delete: - print('Removed deprecated diagnostic line:') - print(' '+line) - passdirect = False - else: - print('passing following diagnostic line directly through:') - print(' '+line) - elif len(columns)==1: - if columns[0][0]=='[': - for category in replacecategories: - lencat = len(category) - if category == columns[0][-1-lencat:-1]: - outf.write(columns[0][:-1-lencat]+replacecategories[category]+']\n') - passdirect = False - - if passdirect: - outf.write(line) - - inf.close() - outf.flush() - outf.close() - - shutil.copy2(infile,oldinfile) - shutil.move(outfile,infile) - diff --git a/tools/update_project_configfiles.py b/tools/update_project_configfiles.py new file mode 100644 index 000000000..e3f60f81f --- /dev/null +++ b/tools/update_project_configfiles.py @@ -0,0 +1,38 @@ +import numpy as np +import sys,os +import glob +from update_vlasiator_cfg_variables import updatecfg + +projects = [ + 'Alfven', + 'Diffusion', + 'Dispersion', + 'Distributions', + 'Firehose', + 'Flowthrough', + 'Fluctuations', + 'Harris', + 'IPShock', + 'KHB', + 'Larmor', + 'Magnetosphere', + 'MultiPeak', + 'Riemann1', + 'Shock', + 'Shocktest', + 'Template', + 'testAmr', + 'test_fp', + 'testHall', + 'test_trans', + 'unsupported', + 'VelocityBox', + 'verificationLarmor' +] + +for project in projects: + directory = '../projects/'+project+'/' + for file in glob.glob(directory+"*.cfg"): + print(file) + updatecfg(file, verbose=True) + diff --git a/tools/update_testpackage_configfiles.py b/tools/update_testpackage_configfiles.py new file mode 100644 index 000000000..669eed783 --- /dev/null +++ b/tools/update_testpackage_configfiles.py @@ -0,0 +1,32 @@ +import numpy as np +import sys,os +import glob +from update_vlasiator_cfg_variables import updatecfg + +tests = [ + 'acctest_1_maxw_500k_30kms_1deg', + 'acctest_2_maxw_500k_100k_20kms_10deg', + 'acctest_3_substeps', + 'acctest_4_helium', + 'acctest_5_proton_antiproton', + 'Flowthrough_amr', + 'Flowthrough_trans_periodic', + 'Flowthrough_x_inflow_y_outflow', + 'Flowthrough_x_inflow_y_outflow_acc', + 'Magnetosphere_polar_small', + 'Magnetosphere_small', + 'restart_read', + 'restart_write', + 'Selfgen_Waves_Periodic', + 'test_fp_fsolver_only_3D', + 'test_fp_substeps', + 'transtest_2_maxw_500k_100k_20kms_20x20', + 'transtest_amr', +] + +for test in tests: + directory = '../testpackage/tests/'+test+'/' + for file in glob.glob(directory+"*.cfg"): + print(file) + updatecfg(file, verbose=True) + diff --git a/tools/update_vlasiator_cfg_variables.py b/tools/update_vlasiator_cfg_variables.py index 88695b99a..b228310a9 100644 --- a/tools/update_vlasiator_cfg_variables.py +++ b/tools/update_vlasiator_cfg_variables.py @@ -2,141 +2,168 @@ import sys,os import shutil -if len(sys.argv)!=2: - print("Usage: python update_vlasiator_cfg_variables.py full_path_to_config_file.cfg") -else: - infile = sys.argv[1] - if not os.path.exists(infile): - print("Error locating config file "+infile) - exit +def updatecfg(infile, verbose=False): -replacecategories = { - 'backstream': 'thermal', -} + replacecategories = { + 'backstream': 'thermal', + } -delete = ['fluxb', 'fluxe'] + delete = ['fluxb', 'fluxe', 'populations_rholossvelboundary', 'rholossvelboundary', 'vole'] -replace = { - 'b': 'fg_b', - 'fg_backgroundb': 'fg_b_background', - 'backgroundb': 'fg_b_background', - 'perturbedb': 'fg_b_perturbed', - 'fg_perturbedb': 'fg_b_perturbed', - 'e': 'fg_e', - 'rhom': 'vg_rhom', - 'rhoq': 'vg_rhoq', - 'populations_rho': 'populations_vg_rho', - # '': 'fg_rhom', - # '': 'fg_rhoq', - 'v': 'vg_v', - # '': 'fg_v', - 'populations_v': 'populations_vg_v', - 'populations_moments_nonbackstream': 'populations_vg_moments_thermal', - 'populations_moments_thermal': 'populations_vg_moments_thermal', - 'populations_moments_backstream': 'populations_vg_moments_nonthermal', - 'populations_moments_nonthermal': 'populations_vg_moments_nonthermal', - 'populations_minvalue': 'populations_vg_effectivesparsitythreshold', - 'populations_effectivesparsitythreshold': 'populations_vg_effectivesparsitythreshold', - 'populations_rholossadjust': 'populations_vg_rho_loss_adjust', - 'populations_rho_loss_adjust': 'populations_vg_rho_loss_adjust', - 'populations_energydensity': 'populations_vg_energydensity', - 'populations_precipitationflux': 'populations_vg_precipitationdifferentialflux', - 'populations_precipitationdifferentialflux': 'populations_vg_precipitationdifferentialflux', - 'maxvdt': 'vg_maxdt_acceleration', - 'maxrdt': 'vg_maxdt_translation', - 'populations_maxvdt': 'populations_vg_maxdt_acceleration', - 'populations_maxdt_acceleration': 'populations_vg_maxdt_acceleration', - 'populations_maxrdt': 'populations_vg_maxdt_translation', - 'populations_maxdt_translation': 'populations_vg_maxdt_translation', - 'maxfieldsdt': 'fg_maxdt_fieldsolver', - 'fg_maxfieldsdt': 'fg_maxdt_fieldsolver', - 'mpirank': 'vg_rank', - 'fsgridrank': 'fg_rank', - 'lbweight': 'vg_loadbalance_weight', - 'vg_lbweight': 'vg_loadbalance_weight', - 'vg_loadbalanceweight': 'vg_loadbalance_weight', - 'boundarytype': 'vg_boundarytype', - 'fsgridboundarytype': 'fg_boundarytype', - 'boundarylayer': 'vg_boundarylayer', - 'fsgridboundarylayer': 'fg_boundarylayer', - 'populations_blocks': 'populations_vg_blocks', - 'fsaved': 'vg_f_saved', - 'vg_fsaved': 'vg_f_saved', - 'populations_accsubcycles': 'populations_vg_acceleration_subcycles', - 'populations_acceleration_subcycles': 'populations_vg_acceleration_subcycles', - 'halle': 'fg_e_hall', - 'fg_halle': 'fg_e_hall', - 'gradpee': 'vg_e_gradpe', - 'e_gradpe': 'vg_e_gradpe', - 'volb': 'vg_b_vol', - 'vg_volb': 'vg_b_vol', - 'b_vol': 'vg_b_vol', - 'bvol': 'vg_b_vol', - 'vg_bvol': 'vg_b_vol', - 'fg_volb': 'fg_b_vol', - 'fg_bvol': 'fg_b_vol', - 'backgroundvolb': 'vg_b_background_vol', - 'perturbedvolb': 'vg_b_perturbed_vol', - 'pressure': 'vg_pressure', - # '': 'fg_pressure', - 'populations_ptensor': 'populations_vg_ptensor', - 'bvolderivs': 'vg_b_vol_derivatives', - 'derivs': 'vg_b_vol_derivatives', - 'b_vol_derivs': 'vg_b_vol_derivatives', - 'b_vol_derivatives': 'vg_b_vol_derivatives', - # '': 'vg_gridcoordinates', - # '': 'fg_gridcoordinates meshdata', -} + replace = { + # First deal with some ancient pre-multipop variables + 'rho': 'populations_vg_rho', + 'rhov': 'populations_vg_v', + 'blocks': 'populations_vg_blocks', + 'ptensor': 'populations_vg_ptensor', + 'rhobackstream': 'populations_vg_moments_nonthermal', + 'rhononbackstream': 'populations_vg_moments_thermal', + 'rhovbackstream': 'populations_vg_moments_nonthermal', + 'rhovnonbackstream': 'populations_vg_moments_thermal', + 'ptensorbackstream': 'populations_vg_moments_nonthermal', + 'ptensornonbackstream': 'populations_vg_moments_thermal', + 'accsubcycles': 'populations_vg_acceleration_subcycles', + 'minvalue': 'populations_vg_effectivesparsitythreshold', + # fields + 'b': 'fg_b', + 'fg_backgroundb': 'fg_b_background', + 'backgroundb': 'fg_b_background', + 'perturbedb': 'fg_b_perturbed', + 'fg_perturbedb': 'fg_b_perturbed', + 'e': 'fg_e', + 'rhom': 'vg_rhom', + 'rhoq': 'vg_rhoq', + 'v': 'vg_v', + # per-population + 'populations_rho': 'populations_vg_rho', + 'populations_v': 'populations_vg_v', + 'populations_blocks': 'populations_vg_blocks', + 'populations_ptensor': 'populations_vg_ptensor', + 'populations_moments_nonbackstream': 'populations_vg_moments_thermal', + 'populations_moments_thermal': 'populations_vg_moments_thermal', + 'populations_moments_backstream': 'populations_vg_moments_nonthermal', + 'populations_moments_nonthermal': 'populations_vg_moments_nonthermal', + 'populations_minvalue': 'populations_vg_effectivesparsitythreshold', + 'populations_effectivesparsitythreshold': 'populations_vg_effectivesparsitythreshold', + 'populations_rholossadjust': 'populations_vg_rho_loss_adjust', + 'populations_rhomlossadjust': 'populations_vg_rho_loss_adjust', + 'populations_rho_loss_adjust': 'populations_vg_rho_loss_adjust', + 'populations_energydensity': 'populations_vg_energydensity', + 'populations_precipitationflux': 'populations_vg_precipitationdifferentialflux', + 'populations_precipitationdifferentialflux': 'populations_vg_precipitationdifferentialflux', + 'populations_maxvdt': 'populations_vg_maxdt_acceleration', + 'populations_maxdt_acceleration': 'populations_vg_maxdt_acceleration', + 'populations_maxrdt': 'populations_vg_maxdt_translation', + 'populations_maxdt_translation': 'populations_vg_maxdt_translation', + 'populations_accsubcycles': 'populations_vg_acceleration_subcycles', + 'populations_acceleration_subcycles': 'populations_vg_acceleration_subcycles', + 'velocitysubsteps': 'populations_vg_acceleration_subcycles', + # Other variables + 'maxvdt': 'vg_maxdt_acceleration', + 'maxrdt': 'vg_maxdt_translation', + 'maxfieldsdt': 'fg_maxdt_fieldsolver', + 'fg_maxfieldsdt': 'fg_maxdt_fieldsolver', + 'mpirank': 'vg_rank', + 'fsgridrank': 'fg_rank', + 'lbweight': 'vg_loadbalance_weight', + 'vg_lbweight': 'vg_loadbalance_weight', + 'vg_loadbalanceweight': 'vg_loadbalance_weight', + 'boundarytype': 'vg_boundarytype', + 'fsgridboundarytype': 'fg_boundarytype', + 'boundarylayer': 'vg_boundarylayer', + 'fsgridboundarylayer': 'fg_boundarylayer', + 'gridcoordinates': 'vg_gridcoordinates', + 'fsaved': 'vg_f_saved', + 'vg_fsaved': 'vg_f_saved', + 'halle': 'fg_e_hall', + 'fg_halle': 'fg_e_hall', + 'gradpee': 'vg_e_gradpe', + 'e_gradpe': 'vg_e_gradpe', + 'volb': 'vg_b_vol', + 'vg_volb': 'vg_b_vol', + 'b_vol': 'vg_b_vol', + 'bvol': 'vg_b_vol', + 'vg_bvol': 'vg_b_vol', + 'fg_volb': 'fg_b_vol', + 'fg_bvol': 'fg_b_vol', + 'backgroundvolb': 'vg_b_background_vol', + 'perturbedvolb': 'vg_b_perturbed_vol', + 'pressure': 'vg_pressure', + 'bvolderivs': 'vg_b_vol_derivatives', + 'derivs': 'vg_b_vol_derivatives', + 'b_vol_derivs': 'vg_b_vol_derivatives', + 'b_vol_derivatives': 'vg_b_vol_derivatives', + # Does make unlisted (accepted) variables lowercase, as Vlasiator-5 is case-insensitive in output variable names + } -oldinfile = infile+'_old' -outfile = infile+'_new' + oldinfile = infile+'_old' + outfile = infile+'_new' -inf = open(infile, 'r') -outf = open(outfile, 'w') + inf = open(infile, 'r') + outf = open(outfile, 'w') -for line in inf: - sline = line.strip() - columns = sline.split() - passdirect = True - if len(columns)>=3: - if columns[0] == 'output' and columns[1] == '=': - if columns[2].lower() in replace: - outf.write('output = '+replace[columns[2].lower()]+'\n') - passdirect = False - elif columns[2].lower() in delete: - print('Removed deprecated output line:') - print(' '+line) - passdirect = False - else: - print('passing following output line directly through:') - print(' '+line) - elif columns[0] == 'diagnostic' and columns[1] == '=': - if columns[2].lower() in replace: - outf.write('diagnostic = '+replace[columns[2].lower()]+'\n') - passdirect = False - elif columns[2].lower() in delete: - print('Removed deprecated diagnostic line:') - print(' '+line) - passdirect = False - else: - print('passing following diagnostic line directly through:') - print(' '+line) - elif len(columns)==1: - if columns[0][0]=='[': - for category in replacecategories: - lencat = len(category) - if category == columns[0][-1-lencat:-1]: - outf.write(columns[0][:-1-lencat]+replacecategories[category]+']\n') + for line in inf: + sline = line.strip() + columns = sline.split() + passdirect = True + if len(columns)>=3: + if ((columns[0] == 'output') or (columns[0] == '#output')) and columns[1] == '=': + if columns[2].lower() in replace: + genline = columns[0]+' = '+replace[columns[2].lower()]+'\n' + outf.write(genline) + if verbose: print('Replaced '+line[:-1]+"\n with "+genline[:-1]) passdirect = False + elif columns[2].lower() in delete: + if verbose: print('Removed '+line[:-1]) + passdirect = False + else: + if verbose: print('Passed '+line[:-1]) + pass + elif ((columns[0] == 'diagnostic') or (columns[0] == '#diagnostic')) and columns[1] == '=': + if columns[2].lower() in replace: + genline = columns[0]+' = '+replace[columns[2].lower()]+'\n' + if verbose: print('Replaced '+line[:-1]+"\n with "+genline[:-1]) + outf.write(genline) + passdirect = False + elif columns[2].lower() in delete: + if verbose: print('Removed '+line[:-1]) + passdirect = False + else: + if verbose: print('Passed '+line[:-1]) + pass + + elif len(columns)==1: + if columns[0][0]=='[': + for category in replacecategories: + lencat = len(category) + if category == columns[0][-1-lencat:-1]: + genline = columns[0][:-1-lencat]+replacecategories[category]+']\n' + if verbose: print('Replaced '+line[:-1]+"\n with "+genline[:-1]) + outf.write(genline) + passdirect = False - if passdirect: - outf.write(line) + if passdirect: + outf.write(line) -inf.close() -outf.flush() -outf.close() + inf.close() + outf.flush() + outf.close() -# Backup the old config file, and replace it with the new one -shutil.copy2(infile,oldinfile) -shutil.move(outfile,infile) + # Backup the old config file, and replace it with the new one + shutil.copy2(infile,oldinfile) + shutil.move(outfile,infile) + +if __name__== "__main__": + if len(sys.argv)!=2: + print("Usage: python update_vlasiator_cfg_variables.py full_path_to_config_file.cfg [--verbose]") + else: + verbose = False + infile = sys.argv[1] + if not os.path.exists(infile): + print("Error locating config file "+infile) + exit + if len(sys.argv) > 1: + if sys.argv=="--verbose": + verbose=True + updatecfg(infile, verbose=verbose) From df0a75802727c37bd40b93a3c970037e2b76f50f Mon Sep 17 00:00:00 2001 From: Markus Battarbee Date: Thu, 5 Sep 2019 16:28:33 +0300 Subject: [PATCH 553/602] More fixes to small_test_definition vlsvdiff calling --- testpackage/small_test_definitions.sh | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/testpackage/small_test_definitions.sh b/testpackage/small_test_definitions.sh index 5b4c4ac60..a925bc787 100644 --- a/testpackage/small_test_definitions.sh +++ b/testpackage/small_test_definitions.sh @@ -22,7 +22,7 @@ run_tests=( 1 2 3 4 5 6 7 8 9 10 11 12 13 14) test_name[1]="acctest_2_maxw_500k_100k_20kms_10deg" comparison_vlsv[1]="fullf.0000001.vlsv" comparison_phiprof[1]="phiprof_0.txt" -variable_names[1]="proton/vg_rho proton/vg_v proton/vg_v proton/vg_v protons" +variable_names[1]="proton/vg_rho proton/vg_v proton/vg_v proton/vg_v proton" variable_components[1]="0 0 1 2" single_cell[1]=1 @@ -30,7 +30,7 @@ single_cell[1]=1 test_name[2]="acctest_3_substeps" comparison_vlsv[2]="fullf.0000001.vlsv" comparison_phiprof[2]="phiprof_0.txt" -variable_names[2]="proton/vg_rho proton/vg_v proton/vg_v proton/vg_v protons" +variable_names[2]="proton/vg_rho proton/vg_v proton/vg_v proton/vg_v proton" variable_components[2]="0 0 1 2" single_cell[2]=1 @@ -38,7 +38,7 @@ single_cell[2]=1 test_name[3]="transtest_2_maxw_500k_100k_20kms_20x20" comparison_vlsv[3]="fullf.0000001.vlsv" comparison_phiprof[3]="phiprof_0.txt" -variable_names[3]="proton/vg_rho proton/vg_v proton/vg_v proton/vg_v protons" +variable_names[3]="proton/vg_rho proton/vg_v proton/vg_v proton/vg_v proton" variable_components[3]="0 0 1 2" test_name[4]="acctest_4_helium" @@ -52,7 +52,7 @@ single_cell[4]=1 test_name[5]="acctest_5_proton_antiproton" comparison_vlsv[5]="fullf.0000001.vlsv" comparison_phiprof[5]="phiprof_0.txt" -variable_names[5]="proton/vg_rho proton/vg_v proton/vg_v proton/vg_v protons" +variable_names[5]="proton/vg_rho proton/vg_v proton/vg_v proton/vg_v proton" variable_components[5]="0 0 1 2" single_cell[5]=1 @@ -72,15 +72,15 @@ variable_components[7]="0 0 1 2 0 1 2 0 1 2" test_name[8]="Magnetosphere_small" comparison_vlsv[8]="bulk.0000001.vlsv" comparison_phiprof[8]="phiprof_0.txt" -variable_names[8]="proton/vg_rho proton/vg_v proton/vg_v proton/vg_v fg_b fg_b fg_b fg_e fg_e fg_e protons" +variable_names[8]="proton/vg_rho proton/vg_v proton/vg_v proton/vg_v fg_b fg_b fg_b fg_e fg_e fg_e proton" variable_components[8]="0 0 1 2 0 1 2 0 1 2" #Very small polar magnetosphere, with subcycling in ACC or FS test_name[9]="Magnetosphere_polar_small" comparison_vlsv[9]="bulk.0000001.vlsv" comparison_phiprof[9]="phiprof_0.txt" -variable_names[9]="proton/vg_rho proton/vg_v proton/vg_v proton/vg_v fg_b fg_b fg_b fg_e fg_e fg_e protons proton/vg_v_nonthermal proton/vg_ptensor_nonthermal_diagonal" -variable_components[9]="0 0 1 2 0 1 2 0 1 2" +variable_names[9]="proton/vg_rho proton/vg_v proton/vg_v proton/vg_v fg_b fg_b fg_b fg_e fg_e fg_e proton/vg_v_nonthermal proton/vg_v_nonthermal proton/vg_v_nonthermal proton/vg_ptensor_nonthermal_diagonal proton/vg_ptensor_nonthermal_diagonal proton/vg_ptensor_nonthermal_diagonal proton" +variable_components[9]="0 0 1 2 0 1 2 0 1 2 0 1 2 0 1 2" # Field solver test test_name[10]="test_fp_fsolver_only_3D" @@ -119,7 +119,7 @@ variable_components[14]="0 0 1 2 0 1 2 0 1 2" test_name[15]="Selfgen_Waves_Periodic" comparison_vlsv[15]="fullf.0000001.vlsv" comparison_phiprof[15]="phiprof_0.txt" -variable_names[15]="proton/vg_rho proton/vg_v proton/vg_v proton/vg_v fg_b fg_b fg_b fg_e fg_e fg_e protons" +variable_names[15]="proton/vg_rho proton/vg_v proton/vg_v proton/vg_v fg_b fg_b fg_b fg_e fg_e fg_e proton" variable_components[15]="0 0 1 2 0 1 2 0 1 2" ##AMR tests @@ -127,7 +127,7 @@ variable_components[15]="0 0 1 2 0 1 2 0 1 2" test_name[16]="transtest_amr" comparison_vlsv[3]="fullf.0000001.vlsv" comparison_phiprof[3]="phiprof_0.txt" -variable_names[3]="proton/vg_rho proton/vg_v proton/vg_v proton/vg_v protons" +variable_names[3]="proton/vg_rho proton/vg_v proton/vg_v proton/vg_v proton" variable_components[3]="0 0 1 2" # Flowthrough test From efbe831d26a07c3edd93bf4b5bc3ae34549a71b7 Mon Sep 17 00:00:00 2001 From: Urs Ganse Date: Mon, 2 Sep 2019 16:24:03 +0300 Subject: [PATCH 554/602] Puhti makefile and small fixes for new vectorclass version. --- MAKE/Makefile.puhti_gcc | 96 ++++++++++++++++++++++++++++++++++++ vlasovsolver/cpu_acc_map.cpp | 8 +-- vlasovsolver/vec.h | 2 +- 3 files changed, 101 insertions(+), 5 deletions(-) create mode 100644 MAKE/Makefile.puhti_gcc diff --git a/MAKE/Makefile.puhti_gcc b/MAKE/Makefile.puhti_gcc new file mode 100644 index 000000000..e968854f6 --- /dev/null +++ b/MAKE/Makefile.puhti_gcc @@ -0,0 +1,96 @@ +CMP = mpic++ +LNK = mpic++ + +#======== Vectorization ========== +#Set vector backend type for vlasov solvers, sets precision and length. +#Options: +# AVX: VEC4D_AGNER, VEC4F_AGNER, VEC8F_AGNER +# AVX512: VEC8D_AGNER, VEC16F_AGNER +# Fallback: VEC4D_FALLBACK, VEC4F_FALLBACK, VEC8F_FALLBACK + +ifeq ($(DISTRIBUTION_FP_PRECISION),SPF) +#Single-precision + VECTORCLASS = VEC8F_AGNER +else +#Double-precision + VECTORCLASS = VEC4D_AGNER +endif + +#======== PAPI ========== +#Add PAPI_MEM define to use papi to report memory consumption? +#CXXFLAGS += -DPAPI_MEM + + +#======== Allocator ========= +#Use jemalloc instead of system malloc to reduce memory fragmentation? https://github.com/jemalloc/jemalloc +#Configure jemalloc with --with-jemalloc-prefix=je_ when installing it +CXXFLAGS += -DUSE_JEMALLOC -DJEMALLOC_NO_DEMANGLE + + +#======= Compiler and compilation flags ========= +# NOTES on compiler flags: +# CXXFLAGS is for compiler flags, they are always used +# MATHFLAGS are for special math etc. flags, these are only applied on solver functions +# LDFLAGS flags for linker + +#-DNO_WRITE_AT_ALL: Define to disable write at all to +# avoid memleak (much slower IO) +#-DMPICH_IGNORE_CXX_SEEK: Ignores some multiple definition +# errors that come up when using +# mpi.h in c++ on Cray + +CXXFLAGS = -DMPICH_IGNORE_CXX_SEEK + +FLAGS = + +#GNU flags: +CC_BRAND = gcc +CC_BRAND_VERSION = 9.1.0 +CXXFLAGS += -O3 -fopenmp -funroll-loops -std=c++17 -W -Wall -Wno-unused -fabi-version=0 -march=native -mavx2 -mfma +testpackage: CXXFLAGS = -O2 -fopenmp -funroll-loops -std=c++17 -fabi-version=0 -mavx2 -mfma + +MATHFLAGS = -ffast-math +LDFLAGS = -lrt +LIB_MPI = -lgomp + +# BOOST_VERSION = current trilinos version +# ZOLTAN_VERSION = current trilinos verson +# +#======== Libraries =========== + +MPT_VERSION = 2.4.0 +JEMALLOC_VERSION = 5.2.1 +LIBRARY_PREFIX = /projappl/project_2000203/libraries + + +#compiled libraries +#INC_BOOST = -I$(LIBRARY_PREFIX)/hpcx-mpi/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/boost/1.61.0/include/ +#LIB_BOOST = -L$(LIBRARY_PREFIX)/hpcx-mpi/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/boost/1.61.0/lib -lboost_program_options +LIB_BOOST = -lboost_program_options + +INC_ZOLTAN = -I$(LIBRARY_PREFIX)/hpcx-mpi/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/zoltan/3.8/include +LIB_ZOLTAN = -L$(LIBRARY_PREFIX)/hpcx-mpi/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/zoltan/3.8/lib -lzoltan + +INC_JEMALLOC = -I$(LIBRARY_PREFIX)/hpcx-mpi/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/jemalloc/$(JEMALLOC_VERSION)/include +LIB_JEMALLOC = -L$(LIBRARY_PREFIX)/hpcx-mpi/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/jemalloc/$(JEMALLOC_VERSION)/lib -ljemalloc -Wl,-rpath=$(LIBRARY_PREFIX)/hpcx-mpi/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/jemalloc/$(JEMALLOC_VERSION)/lib + +INC_VLSV = -I$(LIBRARY_PREFIX)/hpcx-mpi/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/vlsv +LIB_VLSV = -L$(LIBRARY_PREFIX)/hpcx-mpi/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/vlsv -lvlsv + +LIB_PROFILE = -L$(LIBRARY_PREFIX)/hpcx-mpi/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/phiprof/lib -lphiprof -lgfortran -Wl,-rpath=$(LIBRARY_PREFIX)/hpcx-mpi/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/phiprof/lib +INC_PROFILE = -I$(LIBRARY_PREFIX)/hpcx-mpi/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/phiprof/include + +LIB_PAPI = -L$(LIBRARY_PREFIX)/hpcx-mpi/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/papi/lib -lpapi -Wl,-rpath=$(LIBRARY_PREFIX)/hpcx-mpi/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/papi/lib +INC_PAPI = -I$(LIBRARY_PREFIX)/hpcx-mpi/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/papi/include + + +#header libraries + +INC_FSGRID = -I$(LIBRARY_PREFIX)/fsgrid +INC_EIGEN = -I$(LIBRARY_PREFIX)/eigen/3.3.7/ +INC_DCCRG = -I$(LIBRARY_PREFIX)/dccrg/ +INC_VECTORCLASS = -I$(LIBRARY_PREFIX)/vectorclass + + + + diff --git a/vlasovsolver/cpu_acc_map.cpp b/vlasovsolver/cpu_acc_map.cpp index a27bbc594..67da16a43 100644 --- a/vlasovsolver/cpu_acc_map.cpp +++ b/vlasovsolver/cpu_acc_map.cpp @@ -440,7 +440,7 @@ bool map_1d(SpatialCell* spatial_cell, * explanations of their meaning*/ Vec v_r((WID * block_indices_begin[2]) * dv + v_min); Vec lagrangian_v_r((v_r-intersection_min)/intersection_dk); - Veci lagrangian_gk_r=truncate_to_int(lagrangian_v_r); + Veci lagrangian_gk_r=truncatei(lagrangian_v_r); /*compute location of min and max, this does not change for one * column (or even for this set of intersections, and can be used @@ -492,12 +492,12 @@ bool map_1d(SpatialCell* spatial_cell, // left(l) and right(r) k values (global index) in the target // Lagrangian grid, the intersecting cells. Again old right is new left. const Veci lagrangian_gk_l = lagrangian_gk_r; - lagrangian_gk_r = truncate_to_int((v_r-intersection_min)/intersection_dk); + lagrangian_gk_r = truncatei((v_r-intersection_min)/intersection_dk); //limits in lagrangian k for target column. Also take into //account limits of target column - int minGk = std::max(lagrangian_gk_l[minGkIndex], int(columnMinBlockK[columnIndex] * WID)); - int maxGk = std::min(lagrangian_gk_r[maxGkIndex], int((columnMaxBlockK[columnIndex] + 1) * WID - 1)); + int minGk = std::max(int(lagrangian_gk_l[minGkIndex]), int(columnMinBlockK[columnIndex] * WID)); + int maxGk = std::min(int(lagrangian_gk_r[maxGkIndex]), int((columnMaxBlockK[columnIndex] + 1) * WID - 1)); for(int gk = minGk; gk <= maxGk; gk++){ const int blockK = gk/WID; diff --git a/vlasovsolver/vec.h b/vlasovsolver/vec.h index fac0101fd..2b0789a33 100644 --- a/vlasovsolver/vec.h +++ b/vlasovsolver/vec.h @@ -59,7 +59,7 @@ VEC8F_AGNER //user Agner's AVX2 optimized datatypes, double precision accuracy #include "vectorclass.h" typedef Vec4d Vec; -typedef Vec4i Veci; +typedef Vec4q Veci; typedef Vec4db Vecb; typedef double Realv; #define to_realv(v) to_double(v) From 097391edd1e74b260063f10584af9ecba644b84d Mon Sep 17 00:00:00 2001 From: Urs Ganse Date: Mon, 2 Sep 2019 16:26:13 +0300 Subject: [PATCH 555/602] Testpackage run file for puhti --- testpackage/small_test_puhti.sh | 65 +++++++++++++++++++++++++++++++++ 1 file changed, 65 insertions(+) create mode 100755 testpackage/small_test_puhti.sh diff --git a/testpackage/small_test_puhti.sh b/testpackage/small_test_puhti.sh new file mode 100755 index 000000000..9d8cc8ba3 --- /dev/null +++ b/testpackage/small_test_puhti.sh @@ -0,0 +1,65 @@ +#!/bin/bash -l +#SBATCH --time=00:30:00 +#SBATCH --job-name=testpackage +#SBATCH --account=project_2000203 +#SBATCH --partition=small +#SBATCH --ntasks=2 +#SBATH --mem-per-cpu=4000 + +ht=1 #hyper threads per physical core, can only be 1 +t=3 #threads per process + +#Compute and set stuff, do not change +if [ -z $SLURM_NNODES ] +then + #if running interactively we use 2 nodes + nodes=2 +else + nodes=$SLURM_NNODES +fi + +#sisu has 2 x 12 cores +cores_per_node=24 +#Change PBS parameters above + the ones here +total_units=$(echo $nodes $cores_per_node $ht | gawk '{print $1*$2*$3}') +units_per_node=$(echo $cores_per_node $ht | gawk '{print $1*$2}') +tasks=$(echo $total_units $t | gawk '{print $1/$2}') +tasks_per_node=$(echo $units_per_node $t | gawk '{print $1/$2}') +export OMP_NUM_THREADS=$t + + +module load gcc +module load boost +#export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/proj/vlasiato/libraries/taito/openmpi/1.10.2/gcc/4.9.3/papi/5.5.0/lib/ + + +umask 007 +# Launch the OpenMP job to the allocated compute node +echo "Running $exec on $tasks mpi tasks, with $t threads per task on $nodes nodes ($ht threads per physical core)" +#command for running stuff +run_command="srun" +small_run_command="srun -n 1" +run_command_tools="srun -n 1" + +#get baseddir from PBS_O_WORKDIR if set (batch job), otherwise go to current folder +#http://stackoverflow.com/questions/307503/whats-the-best-way-to-check-that-environment-variables-are-set-in-unix-shellscr +base_dir=${PBS_O_WORKDIR:=$(pwd)} +cd $base_dir + +#If 1, the reference vlsv files are generated +# if 0 then we check the v1 +create_verification_files=1 + + +#folder for all reference data +reference_dir="/scratch/project_2000203/testpackage_data" +#compare agains which revision +reference_revision="c36241b84ce8179f7491ebf2a94c377d7279e8c9__DACC_SEMILAG_PQM__DTRANS_SEMILAG_PPM__DDP__DDPF__DVEC4D_AGNER" + + + +# Define test +source small_test_definitions.sh +wait +# Run tests +source run_tests.sh From 9d0ff51f261d81e9105ccd1573b63ead26cd081f Mon Sep 17 00:00:00 2001 From: Urs Ganse Date: Wed, 4 Sep 2019 16:27:38 +0300 Subject: [PATCH 556/602] Disable jemalloc for puthi, enable debugging symbols --- MAKE/Makefile.puhti_gcc | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/MAKE/Makefile.puhti_gcc b/MAKE/Makefile.puhti_gcc index e968854f6..fc73e38f6 100644 --- a/MAKE/Makefile.puhti_gcc +++ b/MAKE/Makefile.puhti_gcc @@ -24,7 +24,7 @@ endif #======== Allocator ========= #Use jemalloc instead of system malloc to reduce memory fragmentation? https://github.com/jemalloc/jemalloc #Configure jemalloc with --with-jemalloc-prefix=je_ when installing it -CXXFLAGS += -DUSE_JEMALLOC -DJEMALLOC_NO_DEMANGLE +#CXXFLAGS += -DUSE_JEMALLOC -DJEMALLOC_NO_DEMANGLE #======= Compiler and compilation flags ========= @@ -46,7 +46,7 @@ FLAGS = #GNU flags: CC_BRAND = gcc CC_BRAND_VERSION = 9.1.0 -CXXFLAGS += -O3 -fopenmp -funroll-loops -std=c++17 -W -Wall -Wno-unused -fabi-version=0 -march=native -mavx2 -mfma +CXXFLAGS += -g -O3 -fopenmp -funroll-loops -std=c++17 -W -Wall -Wno-unused -fabi-version=0 -march=native -mavx2 -mfma testpackage: CXXFLAGS = -O2 -fopenmp -funroll-loops -std=c++17 -fabi-version=0 -mavx2 -mfma MATHFLAGS = -ffast-math @@ -71,8 +71,8 @@ LIB_BOOST = -lboost_program_options INC_ZOLTAN = -I$(LIBRARY_PREFIX)/hpcx-mpi/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/zoltan/3.8/include LIB_ZOLTAN = -L$(LIBRARY_PREFIX)/hpcx-mpi/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/zoltan/3.8/lib -lzoltan -INC_JEMALLOC = -I$(LIBRARY_PREFIX)/hpcx-mpi/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/jemalloc/$(JEMALLOC_VERSION)/include -LIB_JEMALLOC = -L$(LIBRARY_PREFIX)/hpcx-mpi/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/jemalloc/$(JEMALLOC_VERSION)/lib -ljemalloc -Wl,-rpath=$(LIBRARY_PREFIX)/hpcx-mpi/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/jemalloc/$(JEMALLOC_VERSION)/lib +#INC_JEMALLOC = -I$(LIBRARY_PREFIX)/hpcx-mpi/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/jemalloc/$(JEMALLOC_VERSION)/include +#LIB_JEMALLOC = -L$(LIBRARY_PREFIX)/hpcx-mpi/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/jemalloc/$(JEMALLOC_VERSION)/lib -ljemalloc -Wl,-rpath=$(LIBRARY_PREFIX)/hpcx-mpi/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/jemalloc/$(JEMALLOC_VERSION)/lib INC_VLSV = -I$(LIBRARY_PREFIX)/hpcx-mpi/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/vlsv LIB_VLSV = -L$(LIBRARY_PREFIX)/hpcx-mpi/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/vlsv -lvlsv From 85a9e4d2b3a6ae663ad01edec72f58352dbbb24a Mon Sep 17 00:00:00 2001 From: Urs Ganse Date: Tue, 10 Sep 2019 12:09:52 +0300 Subject: [PATCH 557/602] Old/new vectorclass compatibility defines. --- vlasovsolver/cpu_acc_map.cpp | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/vlasovsolver/cpu_acc_map.cpp b/vlasovsolver/cpu_acc_map.cpp index 67da16a43..01287fa3f 100644 --- a/vlasovsolver/cpu_acc_map.cpp +++ b/vlasovsolver/cpu_acc_map.cpp @@ -440,7 +440,11 @@ bool map_1d(SpatialCell* spatial_cell, * explanations of their meaning*/ Vec v_r((WID * block_indices_begin[2]) * dv + v_min); Vec lagrangian_v_r((v_r-intersection_min)/intersection_dk); +#if VECTORCLASS_H >= 20000 Veci lagrangian_gk_r=truncatei(lagrangian_v_r); +#else + Veci lagrangian_gk_r=truncate_to_int(lagrangian_v_r); +#endif /*compute location of min and max, this does not change for one * column (or even for this set of intersections, and can be used @@ -492,7 +496,11 @@ bool map_1d(SpatialCell* spatial_cell, // left(l) and right(r) k values (global index) in the target // Lagrangian grid, the intersecting cells. Again old right is new left. const Veci lagrangian_gk_l = lagrangian_gk_r; +#if VECTORCLASS_H >= 20000 lagrangian_gk_r = truncatei((v_r-intersection_min)/intersection_dk); +#else + lagrangian_gk_r = truncate_to_int((v_r-intersection_min)/intersection_dk); +#endif //limits in lagrangian k for target column. Also take into //account limits of target column From fb30d9bbf388fe8b11f475180fc3276dfca00b18 Mon Sep 17 00:00:00 2001 From: Urs Ganse Date: Tue, 10 Sep 2019 12:01:58 +0300 Subject: [PATCH 558/602] Fixes and makefile to build on puhti with intel compiler --- MAKE/Makefile.puhti_intel | 95 ++++++++++++++++++++++++ backgroundfield/backgroundfield.cpp | 4 +- fieldsolver/gridGlue.cpp | 4 +- grid.cpp | 1 - projects/Alfven/Alfven.cpp | 2 +- projects/Dispersion/Dispersion.cpp | 2 +- projects/Distributions/Distributions.cpp | 2 +- projects/Fluctuations/Fluctuations.cpp | 2 +- projects/Harris/Harris.cpp | 2 +- projects/IPShock/IPShock.cpp | 2 +- projects/KHB/KHB.cpp | 2 +- projects/Magnetosphere/Magnetosphere.cpp | 2 +- projects/MultiPeak/MultiPeak.cpp | 2 +- projects/Riemann1/Riemann1.cpp | 2 +- projects/Shock/Shock.cpp | 2 +- projects/Shocktest/Shocktest.cpp | 2 +- projects/testAmr/testAmr.cpp | 2 +- projects/testHall/testHall.cpp | 2 +- projects/test_fp/test_fp.cpp | 2 +- sysboundary/sysboundary.cpp | 2 +- 20 files changed, 115 insertions(+), 21 deletions(-) create mode 100644 MAKE/Makefile.puhti_intel diff --git a/MAKE/Makefile.puhti_intel b/MAKE/Makefile.puhti_intel new file mode 100644 index 000000000..f97da7bc2 --- /dev/null +++ b/MAKE/Makefile.puhti_intel @@ -0,0 +1,95 @@ +CMP = mpic++ +LNK = mpic++ + +#======== Vectorization ========== +#Set vector backend type for vlasov solvers, sets precision and length. +#Options: +# AVX: VEC4D_AGNER, VEC4F_AGNER, VEC8F_AGNER +# AVX512: VEC8D_AGNER, VEC16F_AGNER +# Fallback: VEC4D_FALLBACK, VEC4F_FALLBACK, VEC8F_FALLBACK + +ifeq ($(DISTRIBUTION_FP_PRECISION),SPF) +#Single-precision + VECTORCLASS = VEC8F_AGNER +else +#Double-precision + VECTORCLASS = VEC4D_AGNER +endif + +#======== PAPI ========== +#Add PAPI_MEM define to use papi to report memory consumption? +#CXXFLAGS += -DPAPI_MEM + + +#======== Allocator ========= +#Use jemalloc instead of system malloc to reduce memory fragmentation? https://github.com/jemalloc/jemalloc +#Configure jemalloc with --with-jemalloc-prefix=je_ when installing it +#CXXFLAGS += -DUSE_JEMALLOC -DJEMALLOC_NO_DEMANGLE + + +#======= Compiler and compilation flags ========= +# NOTES on compiler flags: +# CXXFLAGS is for compiler flags, they are always used +# MATHFLAGS are for special math etc. flags, these are only applied on solver functions +# LDFLAGS flags for linker + +#-DNO_WRITE_AT_ALL: Define to disable write at all to +# avoid memleak (much slower IO) +#-DMPICH_IGNORE_CXX_SEEK: Ignores some multiple definition +# errors that come up when using +# mpi.h in c++ on Cray + +CXXFLAGS = + +FLAGS = + +#GNU flags: +CC_BRAND = intel +CC_BRAND_VERSION = 19.0.4 +CXXFLAGS += -traceback -g -O3 -qopenmp -funroll-loops -std=c++17 -W -Wall -Wno-unused -fabi-version=0 -march=native -mavx2 -mfma +testpackage: CXXFLAGS = -g -traceback -O2 -qopenmp -funroll-loops -std=c++17 -fabi-version=0 -mavx2 -mfma + +MATHFLAGS = -ffast-math +LDFLAGS = -qopenmp -lifcore +LIB_MPI = + +# BOOST_VERSION = current trilinos version +# ZOLTAN_VERSION = current trilinos verson +# +#======== Libraries =========== + +MPT_VERSION = 2.4.0 +JEMALLOC_VERSION = 5.2.1 +LIBRARY_PREFIX = /projappl/project_2000203/libraries + + +#compiled libraries +INC_BOOST = -I$(LIBRARY_PREFIX)/hpcx-mpi/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/boost/ +LIB_BOOST = -L$(LIBRARY_PREFIX)/hpcx-mpi/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/boost/lib -lboost_program_options -Wl,-rpath=$(LIBRARY_PREFIX)/hpcx-mpi/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/boost/lib + +INC_ZOLTAN = -I$(LIBRARY_PREFIX)/hpcx-mpi/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/zoltan/3.8/include +LIB_ZOLTAN = -L$(LIBRARY_PREFIX)/hpcx-mpi/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/zoltan/3.8/lib -lzoltan + +#INC_JEMALLOC = -I$(LIBRARY_PREFIX)/hpcx-mpi/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/jemalloc/$(JEMALLOC_VERSION)/include +#LIB_JEMALLOC = -L$(LIBRARY_PREFIX)/hpcx-mpi/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/jemalloc/$(JEMALLOC_VERSION)/lib -ljemalloc -Wl,-rpath=$(LIBRARY_PREFIX)/hpcx-mpi/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/jemalloc/$(JEMALLOC_VERSION)/lib + +INC_VLSV = -I$(LIBRARY_PREFIX)/hpcx-mpi/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/vlsv +LIB_VLSV = -L$(LIBRARY_PREFIX)/hpcx-mpi/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/vlsv -lvlsv + +LIB_PROFILE = -L$(LIBRARY_PREFIX)/hpcx-mpi/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/phiprof/lib -lphiprof -lgfortran -Wl,-rpath=$(LIBRARY_PREFIX)/hpcx-mpi/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/phiprof/lib +INC_PROFILE = -I$(LIBRARY_PREFIX)/hpcx-mpi/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/phiprof/include + +LIB_PAPI = -L$(LIBRARY_PREFIX)/hpcx-mpi/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/papi/lib -lpapi -Wl,-rpath=$(LIBRARY_PREFIX)/hpcx-mpi/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/papi/lib +INC_PAPI = -I$(LIBRARY_PREFIX)/hpcx-mpi/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/papi/include + + +#header libraries + +INC_FSGRID = -I$(LIBRARY_PREFIX)/fsgrid +INC_EIGEN = -I$(LIBRARY_PREFIX)/eigen/3.3.7/ +INC_DCCRG = -I$(LIBRARY_PREFIX)/dccrg/ +INC_VECTORCLASS = -I$(LIBRARY_PREFIX)/vectorclass-old + + + + diff --git a/backgroundfield/backgroundfield.cpp b/backgroundfield/backgroundfield.cpp index 5456a7665..ac4c0baf1 100644 --- a/backgroundfield/backgroundfield.cpp +++ b/backgroundfield/backgroundfield.cpp @@ -136,7 +136,7 @@ void setBackgroundField( void setBackgroundFieldToZero( FsGrid< std::array, 2>& BgBGrid ) { - auto localSize = BgBGrid.getLocalSize(); + auto localSize = BgBGrid.getLocalSize().data(); #pragma omp parallel for collapse(3) for (int x = 0; x < localSize[0]; ++x) { @@ -220,7 +220,7 @@ void setPerturbedField( void setPerturbedFieldToZero( FsGrid< std::array, 2> & perBGrid) { - auto localSize = perBGrid.getLocalSize(); + auto localSize = perBGrid.getLocalSize().data(); #pragma omp parallel for collapse(3) for (int x = 0; x < localSize[0]; ++x) { diff --git a/fieldsolver/gridGlue.cpp b/fieldsolver/gridGlue.cpp index d51fb31c7..2412db6cb 100644 --- a/fieldsolver/gridGlue.cpp +++ b/fieldsolver/gridGlue.cpp @@ -198,13 +198,13 @@ void getFieldsFromFsGrid( Average() { cells = 0; for(int i = 0; i < fieldsToCommunicate; i++){ - sums[i] = 0; + sums[i] = 0; } } Average operator+=(const Average& rhs) { this->cells += rhs.cells; for(int i = 0; i < fieldsToCommunicate; i++){ - this->sums[i] += rhs.sums[i]; + this->sums[i] += rhs.sums[i]; } } }; diff --git a/grid.cpp b/grid.cpp index 1b33128b4..962a37178 100644 --- a/grid.cpp +++ b/grid.cpp @@ -20,7 +20,6 @@ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ -#include #include #include #include // for setprecision() diff --git a/projects/Alfven/Alfven.cpp b/projects/Alfven/Alfven.cpp index 6044dded1..1a48bec20 100644 --- a/projects/Alfven/Alfven.cpp +++ b/projects/Alfven/Alfven.cpp @@ -170,7 +170,7 @@ namespace projects { setBackgroundFieldToZero(BgBGrid); if (!P::isRestart) { - auto localSize = perBGrid.getLocalSize(); + auto localSize = perBGrid.getLocalSize().data(); #pragma omp parallel for collapse(3) for (int x = 0; x < localSize[0]; ++x) { diff --git a/projects/Dispersion/Dispersion.cpp b/projects/Dispersion/Dispersion.cpp index 04499a5e8..1034f9f3c 100644 --- a/projects/Dispersion/Dispersion.cpp +++ b/projects/Dispersion/Dispersion.cpp @@ -217,7 +217,7 @@ namespace projects { setBackgroundField(bgField, BgBGrid); if(!P::isRestart) { - const auto localSize = BgBGrid.getLocalSize(); + const auto localSize = BgBGrid.getLocalSize().data(); #pragma omp parallel for collapse(3) for (int x = 0; x < localSize[0]; ++x) { diff --git a/projects/Distributions/Distributions.cpp b/projects/Distributions/Distributions.cpp index 7422be8ad..7c68241f7 100644 --- a/projects/Distributions/Distributions.cpp +++ b/projects/Distributions/Distributions.cpp @@ -169,7 +169,7 @@ namespace projects { setBackgroundField(bgField, BgBGrid); if(!P::isRestart) { - const auto localSize = BgBGrid.getLocalSize(); + const auto localSize = BgBGrid.getLocalSize().data(); #pragma omp parallel for collapse(3) for (int x = 0; x < localSize[0]; ++x) { diff --git a/projects/Fluctuations/Fluctuations.cpp b/projects/Fluctuations/Fluctuations.cpp index ad47a8582..8b00deef0 100644 --- a/projects/Fluctuations/Fluctuations.cpp +++ b/projects/Fluctuations/Fluctuations.cpp @@ -183,7 +183,7 @@ namespace projects { setBackgroundField(bgField, BgBGrid); if(!P::isRestart) { - const auto localSize = BgBGrid.getLocalSize(); + const auto localSize = BgBGrid.getLocalSize().data(); #pragma omp parallel for collapse(3) for (int x = 0; x < localSize[0]; ++x) { diff --git a/projects/Harris/Harris.cpp b/projects/Harris/Harris.cpp index 1da458a65..e02c1d455 100644 --- a/projects/Harris/Harris.cpp +++ b/projects/Harris/Harris.cpp @@ -156,7 +156,7 @@ namespace projects { setBackgroundFieldToZero(BgBGrid); if(!P::isRestart) { - auto localSize = perBGrid.getLocalSize(); + auto localSize = perBGrid.getLocalSize().data(); #pragma omp parallel for collapse(3) for (int x = 0; x < localSize[0]; ++x) { diff --git a/projects/IPShock/IPShock.cpp b/projects/IPShock/IPShock.cpp index 26c54042b..d4ea71317 100644 --- a/projects/IPShock/IPShock.cpp +++ b/projects/IPShock/IPShock.cpp @@ -445,7 +445,7 @@ namespace projects { setBackgroundFieldToZero(BgBGrid); if(!P::isRestart) { - auto localSize = perBGrid.getLocalSize(); + auto localSize = perBGrid.getLocalSize().data(); #pragma omp parallel for collapse(3) for (int x = 0; x < localSize[0]; ++x) { diff --git a/projects/KHB/KHB.cpp b/projects/KHB/KHB.cpp index c748bb77b..5698d38b4 100644 --- a/projects/KHB/KHB.cpp +++ b/projects/KHB/KHB.cpp @@ -161,7 +161,7 @@ namespace projects { setBackgroundFieldToZero(BgBGrid); if(!P::isRestart) { - auto localSize = perBGrid.getLocalSize(); + auto localSize = perBGrid.getLocalSize().data(); #pragma omp parallel for collapse(3) for (int x = 0; x < localSize[0]; ++x) { diff --git a/projects/Magnetosphere/Magnetosphere.cpp b/projects/Magnetosphere/Magnetosphere.cpp index 9a6d2e058..ced4d3293 100644 --- a/projects/Magnetosphere/Magnetosphere.cpp +++ b/projects/Magnetosphere/Magnetosphere.cpp @@ -374,7 +374,7 @@ namespace projects { setBackgroundFieldToZero(BgBGrid); } - const auto localSize = BgBGrid.getLocalSize(); + const auto localSize = BgBGrid.getLocalSize().data(); #pragma omp parallel { diff --git a/projects/MultiPeak/MultiPeak.cpp b/projects/MultiPeak/MultiPeak.cpp index 3bca5de52..ec2a093b8 100644 --- a/projects/MultiPeak/MultiPeak.cpp +++ b/projects/MultiPeak/MultiPeak.cpp @@ -230,7 +230,7 @@ namespace projects { setBackgroundField(bgField, BgBGrid); if(!P::isRestart) { - auto localSize = perBGrid.getLocalSize(); + auto localSize = perBGrid.getLocalSize().data(); #pragma omp parallel for collapse(3) for (int x = 0; x < localSize[0]; ++x) { diff --git a/projects/Riemann1/Riemann1.cpp b/projects/Riemann1/Riemann1.cpp index 86a36e904..b6372f2a5 100644 --- a/projects/Riemann1/Riemann1.cpp +++ b/projects/Riemann1/Riemann1.cpp @@ -127,7 +127,7 @@ namespace projects { setBackgroundFieldToZero(BgBGrid); if(!P::isRestart) { - auto localSize = perBGrid.getLocalSize(); + auto localSize = perBGrid.getLocalSize().data(); #pragma omp parallel for collapse(3) for (int x = 0; x < localSize[0]; ++x) { diff --git a/projects/Shock/Shock.cpp b/projects/Shock/Shock.cpp index 5038cc341..2e88b4673 100644 --- a/projects/Shock/Shock.cpp +++ b/projects/Shock/Shock.cpp @@ -152,7 +152,7 @@ namespace projects { setBackgroundFieldToZero(BgBGrid); if(!P::isRestart) { - auto localSize = perBGrid.getLocalSize(); + auto localSize = perBGrid.getLocalSize().data(); #pragma omp parallel for collapse(3) for (int x = 0; x < localSize[0]; ++x) { diff --git a/projects/Shocktest/Shocktest.cpp b/projects/Shocktest/Shocktest.cpp index d8199fb42..06c8df952 100644 --- a/projects/Shocktest/Shocktest.cpp +++ b/projects/Shocktest/Shocktest.cpp @@ -199,7 +199,7 @@ namespace projects { setBackgroundFieldToZero(BgBGrid); if(!P::isRestart) { - auto localSize = perBGrid.getLocalSize(); + auto localSize = perBGrid.getLocalSize().data(); #pragma omp parallel for collapse(3) for (int x = 0; x < localSize[0]; ++x) { diff --git a/projects/testAmr/testAmr.cpp b/projects/testAmr/testAmr.cpp index afa7d161d..4dc5442d0 100644 --- a/projects/testAmr/testAmr.cpp +++ b/projects/testAmr/testAmr.cpp @@ -231,7 +231,7 @@ namespace projects { setBackgroundField(bgField, BgBGrid); if(!P::isRestart) { - auto localSize = perBGrid.getLocalSize(); + auto localSize = perBGrid.getLocalSize().data(); #pragma omp parallel for collapse(3) for (int x = 0; x < localSize[0]; ++x) { diff --git a/projects/testHall/testHall.cpp b/projects/testHall/testHall.cpp index 02a193633..d8e89a639 100644 --- a/projects/testHall/testHall.cpp +++ b/projects/testHall/testHall.cpp @@ -138,7 +138,7 @@ namespace projects { setBackgroundFieldToZero(BgBGrid); if(!P::isRestart) { - auto localSize = perBGrid.getLocalSize(); + auto localSize = perBGrid.getLocalSize().data(); #pragma omp parallel for collapse(3) for (int x = 0; x < localSize[0]; ++x) { diff --git a/projects/test_fp/test_fp.cpp b/projects/test_fp/test_fp.cpp index 96140c473..e6ef60828 100644 --- a/projects/test_fp/test_fp.cpp +++ b/projects/test_fp/test_fp.cpp @@ -113,7 +113,7 @@ namespace projects { setBackgroundFieldToZero(BgBGrid); if(!P::isRestart) { - auto localSize = perBGrid.getLocalSize(); + auto localSize = perBGrid.getLocalSize().data(); creal dx = perBGrid.DX * 3.5; creal dy = perBGrid.DY * 3.5; diff --git a/sysboundary/sysboundary.cpp b/sysboundary/sysboundary.cpp index da8074064..64a6f75c1 100644 --- a/sysboundary/sysboundary.cpp +++ b/sysboundary/sysboundary.cpp @@ -376,7 +376,7 @@ bool SysBoundary::classifyCells(dccrg::Dccrg & technicalGrid) { bool success = true; vector cells = mpiGrid.get_cells(); - auto localSize = technicalGrid.getLocalSize(); + auto localSize = technicalGrid.getLocalSize().data(); /*set all cells to default value, not_sysboundary*/ for(uint i=0; i Date: Tue, 10 Sep 2019 12:34:15 +0300 Subject: [PATCH 559/602] Also fix puhti testpackage build. --- vlasovsolver/vec.h | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/vlasovsolver/vec.h b/vlasovsolver/vec.h index 2b0789a33..ae8fa80a7 100644 --- a/vlasovsolver/vec.h +++ b/vlasovsolver/vec.h @@ -59,7 +59,11 @@ VEC8F_AGNER //user Agner's AVX2 optimized datatypes, double precision accuracy #include "vectorclass.h" typedef Vec4d Vec; +#if VECTORCLASS_H >= 20000 typedef Vec4q Veci; +#else +typedef Vec4i Veci; +#endif typedef Vec4db Vecb; typedef double Realv; #define to_realv(v) to_double(v) From 1ed734803b209711bc1a2f4806717a76757b5a5a Mon Sep 17 00:00:00 2001 From: Urs Ganse Date: Wed, 11 Sep 2019 10:51:42 +0300 Subject: [PATCH 560/602] AVX-512 on puhti (a little bit faster) --- MAKE/Makefile.puhti_intel | 14 +++++++------- testpackage/small_test_puhti.sh | 11 ++++++----- 2 files changed, 13 insertions(+), 12 deletions(-) diff --git a/MAKE/Makefile.puhti_intel b/MAKE/Makefile.puhti_intel index f97da7bc2..4e9708d07 100644 --- a/MAKE/Makefile.puhti_intel +++ b/MAKE/Makefile.puhti_intel @@ -10,10 +10,10 @@ LNK = mpic++ ifeq ($(DISTRIBUTION_FP_PRECISION),SPF) #Single-precision - VECTORCLASS = VEC8F_AGNER + VECTORCLASS = VEC16F_AGNER else #Double-precision - VECTORCLASS = VEC4D_AGNER + VECTORCLASS = VEC8D_AGNER endif #======== PAPI ========== @@ -24,7 +24,7 @@ endif #======== Allocator ========= #Use jemalloc instead of system malloc to reduce memory fragmentation? https://github.com/jemalloc/jemalloc #Configure jemalloc with --with-jemalloc-prefix=je_ when installing it -#CXXFLAGS += -DUSE_JEMALLOC -DJEMALLOC_NO_DEMANGLE +CXXFLAGS += -DUSE_JEMALLOC -DJEMALLOC_NO_DEMANGLE #======= Compiler and compilation flags ========= @@ -39,18 +39,18 @@ endif # errors that come up when using # mpi.h in c++ on Cray -CXXFLAGS = +CXXFLAGS = -DMAX_VECTOR_SIZE=512 FLAGS = #GNU flags: CC_BRAND = intel CC_BRAND_VERSION = 19.0.4 -CXXFLAGS += -traceback -g -O3 -qopenmp -funroll-loops -std=c++17 -W -Wall -Wno-unused -fabi-version=0 -march=native -mavx2 -mfma -testpackage: CXXFLAGS = -g -traceback -O2 -qopenmp -funroll-loops -std=c++17 -fabi-version=0 -mavx2 -mfma +CXXFLAGS += -traceback -g -O3 -qopenmp -std=c++17 -W -Wall -Wno-unused -xHost -ipo -qopt-zmm-usage=high +testpackage: CXXFLAGS = -g -traceback -O2 -qopenmp -std=c++17 -W -Wno-unused -xHost -ipo MATHFLAGS = -ffast-math -LDFLAGS = -qopenmp -lifcore +LDFLAGS = -qopenmp -lifcore -ipo LIB_MPI = # BOOST_VERSION = current trilinos version diff --git a/testpackage/small_test_puhti.sh b/testpackage/small_test_puhti.sh index 9d8cc8ba3..9111b9dc7 100755 --- a/testpackage/small_test_puhti.sh +++ b/testpackage/small_test_puhti.sh @@ -3,11 +3,12 @@ #SBATCH --job-name=testpackage #SBATCH --account=project_2000203 #SBATCH --partition=small -#SBATCH --ntasks=2 -#SBATH --mem-per-cpu=4000 +#SBATCH --ntasks=8 +#SBATCH --nodes=1 +#SBATCH --mem-per-cpu=4000 ht=1 #hyper threads per physical core, can only be 1 -t=3 #threads per process +t=5 #threads per process #Compute and set stuff, do not change if [ -z $SLURM_NNODES ] @@ -19,7 +20,7 @@ else fi #sisu has 2 x 12 cores -cores_per_node=24 +cores_per_node=40 #Change PBS parameters above + the ones here total_units=$(echo $nodes $cores_per_node $ht | gawk '{print $1*$2*$3}') units_per_node=$(echo $cores_per_node $ht | gawk '{print $1*$2}') @@ -54,7 +55,7 @@ create_verification_files=1 #folder for all reference data reference_dir="/scratch/project_2000203/testpackage_data" #compare agains which revision -reference_revision="c36241b84ce8179f7491ebf2a94c377d7279e8c9__DACC_SEMILAG_PQM__DTRANS_SEMILAG_PPM__DDP__DDPF__DVEC4D_AGNER" +reference_revision="current" From 32e462b73299c8ee13ebe15b9b16f81f22f0555b Mon Sep 17 00:00:00 2001 From: Urs Ganse Date: Wed, 11 Sep 2019 15:14:59 +0300 Subject: [PATCH 561/602] Re-include jemalloc, make tespackage build with AVX-512, too --- MAKE/Makefile.puhti_intel | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/MAKE/Makefile.puhti_intel b/MAKE/Makefile.puhti_intel index 4e9708d07..5decdd453 100644 --- a/MAKE/Makefile.puhti_intel +++ b/MAKE/Makefile.puhti_intel @@ -39,7 +39,7 @@ CXXFLAGS += -DUSE_JEMALLOC -DJEMALLOC_NO_DEMANGLE # errors that come up when using # mpi.h in c++ on Cray -CXXFLAGS = -DMAX_VECTOR_SIZE=512 +CXXFLAGS += -DMAX_VECTOR_SIZE=512 FLAGS = @@ -47,7 +47,7 @@ FLAGS = CC_BRAND = intel CC_BRAND_VERSION = 19.0.4 CXXFLAGS += -traceback -g -O3 -qopenmp -std=c++17 -W -Wall -Wno-unused -xHost -ipo -qopt-zmm-usage=high -testpackage: CXXFLAGS = -g -traceback -O2 -qopenmp -std=c++17 -W -Wno-unused -xHost -ipo +testpackage: CXXFLAGS += -g -traceback -O2 -qopenmp -std=c++17 -W -Wno-unused -xHost -ipo MATHFLAGS = -ffast-math LDFLAGS = -qopenmp -lifcore -ipo @@ -70,8 +70,8 @@ LIB_BOOST = -L$(LIBRARY_PREFIX)/hpcx-mpi/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_V INC_ZOLTAN = -I$(LIBRARY_PREFIX)/hpcx-mpi/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/zoltan/3.8/include LIB_ZOLTAN = -L$(LIBRARY_PREFIX)/hpcx-mpi/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/zoltan/3.8/lib -lzoltan -#INC_JEMALLOC = -I$(LIBRARY_PREFIX)/hpcx-mpi/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/jemalloc/$(JEMALLOC_VERSION)/include -#LIB_JEMALLOC = -L$(LIBRARY_PREFIX)/hpcx-mpi/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/jemalloc/$(JEMALLOC_VERSION)/lib -ljemalloc -Wl,-rpath=$(LIBRARY_PREFIX)/hpcx-mpi/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/jemalloc/$(JEMALLOC_VERSION)/lib +INC_JEMALLOC = -I$(LIBRARY_PREFIX)/hpcx-mpi/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/jemalloc/$(JEMALLOC_VERSION)/include +LIB_JEMALLOC = -L$(LIBRARY_PREFIX)/hpcx-mpi/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/jemalloc/$(JEMALLOC_VERSION)/lib -ljemalloc -Wl,-rpath=$(LIBRARY_PREFIX)/hpcx-mpi/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/jemalloc/$(JEMALLOC_VERSION)/lib INC_VLSV = -I$(LIBRARY_PREFIX)/hpcx-mpi/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/vlsv LIB_VLSV = -L$(LIBRARY_PREFIX)/hpcx-mpi/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/vlsv -lvlsv From 4562fb03bec7008de288bffa9054373da901b7fc Mon Sep 17 00:00:00 2001 From: Urs Ganse Date: Tue, 15 Oct 2019 10:52:50 +0300 Subject: [PATCH 562/602] Makefile for building with the PGI compiler on puhti --- MAKE/Makefile.puhti_pgi | 95 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 95 insertions(+) create mode 100644 MAKE/Makefile.puhti_pgi diff --git a/MAKE/Makefile.puhti_pgi b/MAKE/Makefile.puhti_pgi new file mode 100644 index 000000000..048e460f4 --- /dev/null +++ b/MAKE/Makefile.puhti_pgi @@ -0,0 +1,95 @@ +CMP = mpic++ +LNK = mpic++ + +#======== Vectorization ========== +#Set vector backend type for vlasov solvers, sets precision and length. +#Options: +# AVX: VEC4D_AGNER, VEC4F_AGNER, VEC8F_AGNER +# AVX512: VEC8D_AGNER, VEC16F_AGNER +# Fallback: VEC4D_FALLBACK, VEC4F_FALLBACK, VEC8F_FALLBACK + +ifeq ($(DISTRIBUTION_FP_PRECISION),SPF) +#Single-precision + VECTORCLASS = VEC4F_AGNER +else +#Double-precision + VECTORCLASS = VEC2D_AGNER +endif + +#======== PAPI ========== +#Add PAPI_MEM define to use papi to report memory consumption? +#CXXFLAGS += -DPAPI_MEM + + +#======== Allocator ========= +#Use jemalloc instead of system malloc to reduce memory fragmentation? https://github.com/jemalloc/jemalloc +#Configure jemalloc with --with-jemalloc-prefix=je_ when installing it +#CXXFLAGS += -DUSE_JEMALLOC -DJEMALLOC_NO_DEMANGLE + + +#======= Compiler and compilation flags ========= +# NOTES on compiler flags: +# CXXFLAGS is for compiler flags, they are always used +# MATHFLAGS are for special math etc. flags, these are only applied on solver functions +# LDFLAGS flags for linker + +#-DNO_WRITE_AT_ALL: Define to disable write at all to +# avoid memleak (much slower IO) +#-DMPICH_IGNORE_CXX_SEEK: Ignores some multiple definition +# errors that come up when using +# mpi.h in c++ on Cray + +CXXFLAGS += -DMAX_VECTOR_SIZE=256 + +FLAGS = + +#GNU flags: +CC_BRAND = pgi +CC_BRAND_VERSION = 19.7 +CXXFLAGS += -traceback -g -O3 -acc -D__GCC_ATOMIC_TEST_AND_SET_TRUEVAL=1 +testpackage: CXXFLAGS += -g -traceback -O2 -qopenmp -std=c++17 -W -Wno-unused -xHost -ipo + +MATHFLAGS = +LDFLAGS = -O3 -acc -g +LIB_MPI = + +# BOOST_VERSION = current trilinos version +# ZOLTAN_VERSION = current trilinos verson +# +#======== Libraries =========== + +MPT_VERSION = 4.0.2 +JEMALLOC_VERSION = 5.2.1 +LIBRARY_PREFIX = /projappl/project_2000203/libraries + + +#compiled libraries +INC_BOOST = -I$(LIBRARY_PREFIX)/openmpi/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/boost/include +LIB_BOOST = -L$(LIBRARY_PREFIX)/openmpi/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/boost/lib -lboost_program_options -Wl,-rpath=$(LIBRARY_PREFIX)/hpcx-mpi/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/boost/lib + +INC_ZOLTAN = -I$(LIBRARY_PREFIX)/openmpi/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/zoltan/3.8/include +LIB_ZOLTAN = -L$(LIBRARY_PREFIX)/openmpi/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/zoltan/3.8/lib -lzoltan + +INC_JEMALLOC = -I$(LIBRARY_PREFIX)/openmpi/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/jemalloc/$(JEMALLOC_VERSION)/include +LIB_JEMALLOC = -L$(LIBRARY_PREFIX)/openmpi/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/jemalloc/$(JEMALLOC_VERSION)/lib -ljemalloc -Wl,-rpath=$(LIBRARY_PREFIX)/hpcx-mpi/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/jemalloc/$(JEMALLOC_VERSION)/lib + +INC_VLSV = -I$(LIBRARY_PREFIX)/openmpi/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/vlsv +LIB_VLSV = -L$(LIBRARY_PREFIX)/openmpi/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/vlsv -lvlsv + +LIB_PROFILE = -L$(LIBRARY_PREFIX)/openmpi/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/phiprof/lib -lphiprof -lgfortran -Wl,-rpath=$(LIBRARY_PREFIX)/hpcx-mpi/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/phiprof/lib +INC_PROFILE = -I$(LIBRARY_PREFIX)/openmpi/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/phiprof/include + +#LIB_PAPI = -L$(LIBRARY_PREFIX)/openmpi/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/papi/lib -lpapi -Wl,-rpath=$(LIBRARY_PREFIX)/hpcx-mpi/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/papi/lib +#INC_PAPI = -I$(LIBRARY_PREFIX)/openmpi/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/papi/include + + +#header libraries + +INC_FSGRID = -I$(LIBRARY_PREFIX)/fsgrid +INC_EIGEN = -I$(LIBRARY_PREFIX)/eigen/3.3.7/ +INC_DCCRG = -I$(LIBRARY_PREFIX)/dccrg/ +INC_VECTORCLASS = -I$(LIBRARY_PREFIX)/vectorclass-old + + + + From d1f3699733c76f44db2947bb83c372182d644813 Mon Sep 17 00:00:00 2001 From: Urs Ganse Date: Tue, 15 Oct 2019 14:51:47 +0300 Subject: [PATCH 563/602] Update pgi compiler rpath statements --- MAKE/Makefile.puhti_pgi | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/MAKE/Makefile.puhti_pgi b/MAKE/Makefile.puhti_pgi index 048e460f4..1475a8e94 100644 --- a/MAKE/Makefile.puhti_pgi +++ b/MAKE/Makefile.puhti_pgi @@ -65,21 +65,21 @@ LIBRARY_PREFIX = /projappl/project_2000203/libraries #compiled libraries INC_BOOST = -I$(LIBRARY_PREFIX)/openmpi/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/boost/include -LIB_BOOST = -L$(LIBRARY_PREFIX)/openmpi/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/boost/lib -lboost_program_options -Wl,-rpath=$(LIBRARY_PREFIX)/hpcx-mpi/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/boost/lib +LIB_BOOST = -L$(LIBRARY_PREFIX)/openmpi/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/boost/lib -lboost_program_options -Wl,-rpath=$(LIBRARY_PREFIX)/openmpi/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/boost/lib INC_ZOLTAN = -I$(LIBRARY_PREFIX)/openmpi/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/zoltan/3.8/include LIB_ZOLTAN = -L$(LIBRARY_PREFIX)/openmpi/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/zoltan/3.8/lib -lzoltan INC_JEMALLOC = -I$(LIBRARY_PREFIX)/openmpi/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/jemalloc/$(JEMALLOC_VERSION)/include -LIB_JEMALLOC = -L$(LIBRARY_PREFIX)/openmpi/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/jemalloc/$(JEMALLOC_VERSION)/lib -ljemalloc -Wl,-rpath=$(LIBRARY_PREFIX)/hpcx-mpi/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/jemalloc/$(JEMALLOC_VERSION)/lib +LIB_JEMALLOC = -L$(LIBRARY_PREFIX)/openmpi/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/jemalloc/$(JEMALLOC_VERSION)/lib -ljemalloc -Wl,-rpath=$(LIBRARY_PREFIX)/openmpi/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/jemalloc/$(JEMALLOC_VERSION)/lib INC_VLSV = -I$(LIBRARY_PREFIX)/openmpi/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/vlsv LIB_VLSV = -L$(LIBRARY_PREFIX)/openmpi/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/vlsv -lvlsv -LIB_PROFILE = -L$(LIBRARY_PREFIX)/openmpi/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/phiprof/lib -lphiprof -lgfortran -Wl,-rpath=$(LIBRARY_PREFIX)/hpcx-mpi/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/phiprof/lib +LIB_PROFILE = -L$(LIBRARY_PREFIX)/openmpi/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/phiprof/lib -lphiprof -lgfortran -Wl,-rpath=$(LIBRARY_PREFIX)/openmpi/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/phiprof/lib INC_PROFILE = -I$(LIBRARY_PREFIX)/openmpi/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/phiprof/include -#LIB_PAPI = -L$(LIBRARY_PREFIX)/openmpi/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/papi/lib -lpapi -Wl,-rpath=$(LIBRARY_PREFIX)/hpcx-mpi/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/papi/lib +#LIB_PAPI = -L$(LIBRARY_PREFIX)/openmpi/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/papi/lib -lpapi -Wl,-rpath=$(LIBRARY_PREFIX)/openmpi/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/papi/lib #INC_PAPI = -I$(LIBRARY_PREFIX)/openmpi/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/papi/include From 1d596cdb16c3d4c8bddac225db14194bac65a537 Mon Sep 17 00:00:00 2001 From: Urs Ganse Date: Wed, 16 Oct 2019 09:55:50 +0300 Subject: [PATCH 564/602] Fixes to fallback vectorclass to allow PGI openacc --- MAKE/Makefile.puhti_pgi | 10 ++--- vlasovsolver/vectorclass_fallback.h | 60 ++++++++++++++++++++++++++++- 2 files changed, 63 insertions(+), 7 deletions(-) diff --git a/MAKE/Makefile.puhti_pgi b/MAKE/Makefile.puhti_pgi index 1475a8e94..6048ef26d 100644 --- a/MAKE/Makefile.puhti_pgi +++ b/MAKE/Makefile.puhti_pgi @@ -10,10 +10,10 @@ LNK = mpic++ ifeq ($(DISTRIBUTION_FP_PRECISION),SPF) #Single-precision - VECTORCLASS = VEC4F_AGNER + VECTORCLASS = VEC4F_FALLBACK else #Double-precision - VECTORCLASS = VEC2D_AGNER + VECTORCLASS = VEC4D_FALLBACK endif #======== PAPI ========== @@ -39,15 +39,13 @@ endif # errors that come up when using # mpi.h in c++ on Cray -CXXFLAGS += -DMAX_VECTOR_SIZE=256 - FLAGS = #GNU flags: CC_BRAND = pgi CC_BRAND_VERSION = 19.7 -CXXFLAGS += -traceback -g -O3 -acc -D__GCC_ATOMIC_TEST_AND_SET_TRUEVAL=1 -testpackage: CXXFLAGS += -g -traceback -O2 -qopenmp -std=c++17 -W -Wno-unused -xHost -ipo +CXXFLAGS += -g -O3 -acc -D__GCC_ATOMIC_TEST_AND_SET_TRUEVAL=1 -Minfo=accel +testpackage: CXXFLAGS += -g -O2 -acc -D__GCC_ATOMIC_TEST_AND_SET_TRUEVAL=1 -Minfo=accel MATHFLAGS = LDFLAGS = -O3 -acc -g diff --git a/vlasovsolver/vectorclass_fallback.h b/vlasovsolver/vectorclass_fallback.h index b9c5c6be2..0df5db517 100644 --- a/vlasovsolver/vectorclass_fallback.h +++ b/vlasovsolver/vectorclass_fallback.h @@ -28,7 +28,8 @@ */ - +// Prefetching does nothing in the fallback vectorclass. +#define _mm_prefetch(...) template @@ -258,6 +259,17 @@ static inline Vec4Simple operator / (const Vec4Simple &l, const S &r) ); } +template +static inline Vec4Simple operator / (const S &r, const Vec4Simple &l ) +{ + return Vec4Simple( + r/l.val[0], + r/l.val[1], + r/l.val[2], + r/l.val[3] + ); +} + template static inline Vec4Simple & operator += (Vec4Simple &l, const Vec4Simple &r){ l=l+r; @@ -317,6 +329,38 @@ static inline Vec4Simple operator == (const Vec4Simple &l, const Vec4Si ); } +template +static inline Vec4Simple operator == (const Vec4Simple &l, const S& r) +{ + return Vec4Simple( + l.val[0] == r, + l.val[1] == r, + l.val[2] == r, + l.val[3] == r + ); +} + +template +static inline Vec4Simple operator != (const Vec4Simple &l, const S& r) +{ + return Vec4Simple( + l.val[0] != r, + l.val[1] != r, + l.val[2] != r, + l.val[3] != r + ); +} + +template +static inline Vec4Simple operator ! (const Vec4Simple &l) +{ + return Vec4Simple( + !l.val[0], + !l.val[1], + !l.val[2], + !l.val[3] + ); +} template @@ -967,6 +1011,20 @@ static inline Vec8Simple operator == (const Vec8Simple &l, const Vec8Si ); } +template +static inline Vec8Simple operator == (const Vec8Simple &l, const S &r) +{ + return Vec8Simple( + l.val[0] == S, + l.val[1] == S, + l.val[2] == S, + l.val[3] == S, + l.val[4] == S, + l.val[5] == S, + l.val[6] == S, + l.val[7] == S + ); +} template From f8d7c39b2671c89c692a9bca7db083ef173808d7 Mon Sep 17 00:00:00 2001 From: Markus Battarbee Date: Mon, 21 Oct 2019 09:59:12 +0300 Subject: [PATCH 565/602] typo in number density datareducer metadata --- datareduction/datareducer.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/datareduction/datareducer.cpp b/datareduction/datareducer.cpp index a315a0520..93f23e84e 100644 --- a/datareduction/datareducer.cpp +++ b/datareduction/datareducer.cpp @@ -252,7 +252,7 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti species::Species& species=getObjectWrapper().particleSpecies[i]; const std::string& pop = species.name; outputReducer->addOperator(new DRO::DataReductionOperatorPopulations(pop + "/vg_rho", i, offsetof(spatial_cell::Population, RHO), 1)); - outputReducer->addMetadata(outputReducer->size()-1,"1/m^3","$\\mathrm{m}^{-3}$","$\\n_\\mathrm{"+pop+"}$","1.0"); + outputReducer->addMetadata(outputReducer->size()-1,"1/m^3","$\\mathrm{m}^{-3}$","$n_\\mathrm{"+pop+"}$","1.0"); } continue; } From 088e93b7440c87e2f8eee5f6baeda72cdbce16ba Mon Sep 17 00:00:00 2001 From: Yann Pfau-Kempf Date: Thu, 24 Oct 2019 14:09:48 +0300 Subject: [PATCH 566/602] Fix to vlsvextract because of erroneous loop over meshes. --- tools/vlsvextract.cpp | 47 +++++++++++++++++-------------------------- 1 file changed, 19 insertions(+), 28 deletions(-) diff --git a/tools/vlsvextract.cpp b/tools/vlsvextract.cpp index 292925497..f3283ec0f 100644 --- a/tools/vlsvextract.cpp +++ b/tools/vlsvextract.cpp @@ -49,7 +49,7 @@ using namespace vlsv; namespace po = boost::program_options; // If set to true, vlsvextract writes some debugging info to stderr -static bool runDebug = false; +static bool runDebug = true; bool NodeComp::operator()(const NodeCrd& a, const NodeCrd& b) const { double EPS = 0.5e-3 * (fabs(a.z) + fabs(b.z)); @@ -1017,7 +1017,7 @@ bool convertVelocityBlocks2( bool success = true; if (popNames.size() > 0) { for (set::iterator it=popNames.begin(); it!=popNames.end(); ++it) { - if (runDebug == true) cerr << "Population '" << *it << "'" << endl; + if (runDebug == true) cerr << "Population '" << *it << "' meshName '" << meshName << "'" << endl; if (vlsvReader.setCellsWithBlocks(meshName,*it) == false) {success = false; continue;} if (convertVelocityBlocks2(vlsvReader,fname,meshName,cellStruct,cellID,rotate,plasmaFrame,out,*it) == false) success = false; } @@ -1738,17 +1738,10 @@ void extractDistribution( const string & fileName, const UserOptions & mainOptio T vlsvReader; // Open VLSV file and read mesh names: vlsvReader.open(fileName); - list meshNames; + const string meshName = "SpatialGrid"; const string tagName = "MESH"; const string attributeName = "name"; - // Get spatial mesh names - if (vlsvReader.getMeshNames(meshNames) == false) { - cout << "\t file '" << fileName << "' not compatible" << endl; - vlsvReader.close(); - return; - } - //Sets cell variables (for cell geometry) -- used in getCellIdFromCoords function CellStructure cellStruct; setSpatialCellVariables( vlsvReader, cellStruct ); @@ -1892,25 +1885,23 @@ void extractDistribution( const string & fileName, const UserOptions & mainOptio // Extract velocity grid from VLSV file, if possible, and write as vlsv file: bool velGridExtracted = true; - for (list::const_iterator it2 = meshNames.begin(); it2 != meshNames.end(); ++it2) { - //slice disabled by default, enable for specific testing. TODO: add command line interface for enabling it - //convertSlicedVelocityMesh(vlsvReader,outputSliceName,*it2,cellStruct); - if (convertVelocityBlocks2(vlsvReader, outputFilePath, *it2, cellStruct, cellID, mainOptions.rotateVectors, mainOptions.plasmaFrame ) == false) { - velGridExtracted = false; + //slice disabled by default, enable for specific testing. TODO: add command line interface for enabling it + //convertSlicedVelocityMesh(vlsvReader,outputSliceName,*it2,cellStruct); + if (convertVelocityBlocks2(vlsvReader, outputFilePath, meshName, cellStruct, cellID, mainOptions.rotateVectors, mainOptions.plasmaFrame ) == false) { + velGridExtracted = false; + } else { + //Display message for the user: + if( mainOptions.getCellIdFromLine ) { + //Extracting multiple cell ids: + //Display how mant extracted and how many more to go: + int moreToGo = cellIdList.size() - extractNum; + //Display message + cout << "Extracted num. " << extractNum << ", " << moreToGo << " more to go" << endl; + //Move to the next extraction number + ++extractNum; } else { - //Display message for the user: - if( mainOptions.getCellIdFromLine ) { - //Extracting multiple cell ids: - //Display how mant extracted and how many more to go: - int moreToGo = cellIdList.size() - extractNum; - //Display message - cout << "Extracted num. " << extractNum << ", " << moreToGo << " more to go" << endl; - //Move to the next extraction number - ++extractNum; - } else { - //Single cell id: - cout << "\t extracted from '" << fileName << "'" << endl; - } + //Single cell id: + cout << "\t extracted from '" << fileName << "'" << endl; } } From 541c34c3dcddd11e7629f5110dba897c4aff4c9a Mon Sep 17 00:00:00 2001 From: Yann Pfau-Kempf Date: Thu, 24 Oct 2019 14:11:43 +0300 Subject: [PATCH 567/602] Reverted debug flag. --- tools/vlsvextract.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/vlsvextract.cpp b/tools/vlsvextract.cpp index f3283ec0f..44e9983ed 100644 --- a/tools/vlsvextract.cpp +++ b/tools/vlsvextract.cpp @@ -49,7 +49,7 @@ using namespace vlsv; namespace po = boost::program_options; // If set to true, vlsvextract writes some debugging info to stderr -static bool runDebug = true; +static bool runDebug = false; bool NodeComp::operator()(const NodeCrd& a, const NodeCrd& b) const { double EPS = 0.5e-3 * (fabs(a.z) + fabs(b.z)); From cfd77965f3a43d9f3a9d671a84345b2d27864cb4 Mon Sep 17 00:00:00 2001 From: Yann Pfau-Kempf Date: Wed, 13 Nov 2019 11:25:29 +0200 Subject: [PATCH 568/602] Write volume B field into restarts for post-processing. --- iowrite.cpp | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/iowrite.cpp b/iowrite.cpp index 31bc5c05b..4f3e2b81f 100644 --- a/iowrite.cpp +++ b/iowrite.cpp @@ -1321,6 +1321,10 @@ bool writeRestart(dccrg::Dccrg& mpiGrid, restartReducer.addOperator(new DRO::DataReductionOperatorCellParams("max_v_dt",CellParams::MAXVDT,1)); restartReducer.addOperator(new DRO::DataReductionOperatorCellParams("max_r_dt",CellParams::MAXRDT,1)); restartReducer.addOperator(new DRO::DataReductionOperatorCellParams("max_fields_dt",CellParams::MAXFDT,1)); + restartReducer.addOperator(new DRO::DataReductionOperatorCellParams("vg_b_background_vol",CellParams::BGBXVOL,3)); + restartReducer.addMetadata(restartReducer.size()-1,"T","$\\mathrm{T}$","$B_\\mathrm{vol,vg,bg}$","1.0"); + restartReducer.addOperator(new DRO::DataReductionOperatorCellParams("vg_b_perturbed_vol",CellParams::PERBXVOL,3)); + restartReducer.addMetadata(restartReducer.size()-1,"T","$\\mathrm{T}$","$B_\\mathrm{vol,vg,per}$","1.0"); restartReducer.addOperator(new DRO::DataReductionOperatorBVOLDerivatives("Bvolume_derivatives",0,bvolderivatives::N_BVOL_DERIVATIVES)); restartReducer.addOperator(new DRO::MPIrank); restartReducer.addOperator(new DRO::BoundaryType); From 11fd39c5acffc2f6af7ceb0597f222c3bc0b93c6 Mon Sep 17 00:00:00 2001 From: Yann Pfau-Kempf Date: Wed, 13 Nov 2019 11:31:39 +0200 Subject: [PATCH 569/602] Replace the restart volume B with a single DRO. --- iowrite.cpp | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/iowrite.cpp b/iowrite.cpp index 4f3e2b81f..933c35139 100644 --- a/iowrite.cpp +++ b/iowrite.cpp @@ -1321,10 +1321,8 @@ bool writeRestart(dccrg::Dccrg& mpiGrid, restartReducer.addOperator(new DRO::DataReductionOperatorCellParams("max_v_dt",CellParams::MAXVDT,1)); restartReducer.addOperator(new DRO::DataReductionOperatorCellParams("max_r_dt",CellParams::MAXRDT,1)); restartReducer.addOperator(new DRO::DataReductionOperatorCellParams("max_fields_dt",CellParams::MAXFDT,1)); - restartReducer.addOperator(new DRO::DataReductionOperatorCellParams("vg_b_background_vol",CellParams::BGBXVOL,3)); - restartReducer.addMetadata(restartReducer.size()-1,"T","$\\mathrm{T}$","$B_\\mathrm{vol,vg,bg}$","1.0"); - restartReducer.addOperator(new DRO::DataReductionOperatorCellParams("vg_b_perturbed_vol",CellParams::PERBXVOL,3)); - restartReducer.addMetadata(restartReducer.size()-1,"T","$\\mathrm{T}$","$B_\\mathrm{vol,vg,per}$","1.0"); + restartReducer.addOperator(new DRO::VariableBVol); + restartReducer.addMetadata(restartReducer.size()-1,"T","$\\mathrm{T}$","$B_\\mathrm{vol,vg}$","1.0"); restartReducer.addOperator(new DRO::DataReductionOperatorBVOLDerivatives("Bvolume_derivatives",0,bvolderivatives::N_BVOL_DERIVATIVES)); restartReducer.addOperator(new DRO::MPIrank); restartReducer.addOperator(new DRO::BoundaryType); From 3efdc74b2154470ae4ee6a23b2a418b4047b3f0b Mon Sep 17 00:00:00 2001 From: Yann Pfau-Kempf Date: Wed, 13 Nov 2019 11:50:17 +0200 Subject: [PATCH 570/602] Removed Bvolume_derivatives from restarts as it's not used by anything. --- iowrite.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/iowrite.cpp b/iowrite.cpp index 933c35139..07663b7cc 100644 --- a/iowrite.cpp +++ b/iowrite.cpp @@ -1323,7 +1323,6 @@ bool writeRestart(dccrg::Dccrg& mpiGrid, restartReducer.addOperator(new DRO::DataReductionOperatorCellParams("max_fields_dt",CellParams::MAXFDT,1)); restartReducer.addOperator(new DRO::VariableBVol); restartReducer.addMetadata(restartReducer.size()-1,"T","$\\mathrm{T}$","$B_\\mathrm{vol,vg}$","1.0"); - restartReducer.addOperator(new DRO::DataReductionOperatorBVOLDerivatives("Bvolume_derivatives",0,bvolderivatives::N_BVOL_DERIVATIVES)); restartReducer.addOperator(new DRO::MPIrank); restartReducer.addOperator(new DRO::BoundaryType); restartReducer.addOperator(new DRO::BoundaryLayer); From bc66627940642c4b5634a9bcc5783fee4c9cb436 Mon Sep 17 00:00:00 2001 From: Yann Pfau-Kempf Date: Wed, 13 Nov 2019 12:21:08 +0200 Subject: [PATCH 571/602] Reinstated Bvol. No gridGlue yet, no DRO yet. Compiles. --- common.h | 6 +++ fieldsolver/ldz_volume.cpp | 89 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 95 insertions(+) diff --git a/common.h b/common.h index 17555ce7c..f9cde544e 100644 --- a/common.h +++ b/common.h @@ -171,6 +171,9 @@ namespace CellParams { P_11_V, /*!< P_xx component after propagation in velocity space */ P_22_V, /*!< P_yy component after propagation in velocity space */ P_33_V, /*!< P_zz component after propagation in velocity space */ + EXVOL, /*!< Volume electric field averaged over spatial cell, x-component.*/ + EYVOL, /*!< Volume electric field averaged over spatial cell, y-component.*/ + EZVOL, /*!< Volume electric field averaged over spatial cell, z-component.*/ MAXVDT, /*!< maximum timestep allowed in velocity space for this cell, * this is the max allowed timestep over all particle species.*/ MAXRDT, /*!< maximum timestep allowed in ordinary space for this cell, @@ -353,6 +356,9 @@ namespace fsgrids { dPERBYVOLdz, dPERBZVOLdx, dPERBZVOLdy, + EXVOL, /*!< volume-averaged electric field x component */ + EYVOL, /*!< volume-averaged electric field y component */ + EZVOL, /*!< volume-averaged electric field z component */ N_VOL }; diff --git a/fieldsolver/ldz_volume.cpp b/fieldsolver/ldz_volume.cpp index f385e17bb..e406d03b0 100644 --- a/fieldsolver/ldz_volume.cpp +++ b/fieldsolver/ldz_volume.cpp @@ -67,6 +67,95 @@ void calculateVolumeAveragedFields( volGrid0->at(fsgrids::volfields::PERBXVOL) = perturbedCoefficients[Rec::a_0]; volGrid0->at(fsgrids::volfields::PERBYVOL) = perturbedCoefficients[Rec::b_0]; volGrid0->at(fsgrids::volfields::PERBZVOL) = perturbedCoefficients[Rec::c_0]; + + // Calculate volume average of E (FIXME NEEDS IMPROVEMENT): + std::array * EGrid_i1j1k1 = EGrid.get(i,j,k); + if ( technicalGrid.get(i,j,k)->sysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY || + (technicalGrid.get(i,j,k)->sysBoundaryFlag != sysboundarytype::NOT_SYSBOUNDARY && technicalGrid.get(i,j,k)->sysBoundaryLayer == 1) + ) { + #ifdef DEBUG_FSOLVER + bool ok = true; + if (technicalGrid.get(i ,j+1,k ) == NULL) ok = false; + if (technicalGrid.get(i ,j ,k+1) == NULL) ok = false; + if (technicalGrid.get(i ,j+1,k+1) == NULL) ok = false; + if (ok == false) { + stringstream ss; + ss << "ERROR, got NULL neighbor in " << __FILE__ << ":" << __LINE__ << endl; + cerr << ss.str(); exit(1); + } + #endif + + std::array * EGrid_i1j2k1 = EGrid.get(i ,j+1,k ); + std::array * EGrid_i1j1k2 = EGrid.get(i ,j ,k+1); + std::array * EGrid_i1j2k2 = EGrid.get(i ,j+1,k+1); + + CHECK_FLOAT(EGrid_i1j1k1->at(fsgrids::efield::EX)) + CHECK_FLOAT(EGrid_i1j2k1->at(fsgrids::efield::EX)) + CHECK_FLOAT(EGrid_i1j1k2->at(fsgrids::efield::EX)) + CHECK_FLOAT(EGrid_i1j2k2->at(fsgrids::efield::EX)) + volGrid0->at(fsgrids::volfields::EXVOL) = FOURTH*(EGrid_i1j1k1->at(fsgrids::efield::EX) + EGrid_i1j2k1->at(fsgrids::efield::EX) + EGrid_i1j1k2->at(fsgrids::efield::EX) + EGrid_i1j2k2->at(fsgrids::efield::EX)); + CHECK_FLOAT(volGrid0->at(fsgrids::volfields::EXVOL)) + } else { + volGrid0->at(fsgrids::volfields::EXVOL) = 0.0; + } + + if ( technicalGrid.get(i,j,k)->sysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY || + (technicalGrid.get(i,j,k)->sysBoundaryFlag != sysboundarytype::NOT_SYSBOUNDARY && technicalGrid.get(i,j,k)->sysBoundaryLayer == 1) + ) { + #ifdef DEBUG_FSOLVER + bool ok = true; + if (technicalGrid.get(i+1,j ,k ) == NULL) ok = false; + if (technicalGrid.get(i ,j ,k+1) == NULL) ok = false; + if (technicalGrid.get(i+1,j ,k+1) == NULL) ok = false; + if (ok == false) { + stringstream ss; + ss << "ERROR, got NULL neighbor in " << __FILE__ << ":" << __LINE__ << endl; + cerr << ss.str(); exit(1); + } + #endif + + std::array * EGrid_i2j1k1 = EGrid.get(i+1,j ,k ); + std::array * EGrid_i1j1k2 = EGrid.get(i ,j ,k+1); + std::array * EGrid_i2j1k2 = EGrid.get(i+1,j ,k+1); + + CHECK_FLOAT(EGrid_i1j1k1->at(fsgrids::efield::EY)) + CHECK_FLOAT(EGrid_i2j1k1->at(fsgrids::efield::EY)) + CHECK_FLOAT(EGrid_i1j1k2->at(fsgrids::efield::EY)) + CHECK_FLOAT(EGrid_i2j1k2->at(fsgrids::efield::EY)) + volGrid0->at(fsgrids::volfields::EYVOL) = FOURTH*(EGrid_i1j1k1->at(fsgrids::efield::EY) + EGrid_i2j1k1->at(fsgrids::efield::EY) + EGrid_i1j1k2->at(fsgrids::efield::EY) + EGrid_i2j1k2->at(fsgrids::efield::EY)); + CHECK_FLOAT(volGrid0->at(fsgrids::volfields::EYVOL)) + } else { + volGrid0->at(fsgrids::volfields::EYVOL) = 0.0; + } + + if ( technicalGrid.get(i,j,k)->sysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY || + (technicalGrid.get(i,j,k)->sysBoundaryFlag != sysboundarytype::NOT_SYSBOUNDARY && technicalGrid.get(i,j,k)->sysBoundaryLayer == 1) + ) { + #ifdef DEBUG_FSOLVER + bool ok = true; + if (technicalGrid.get(i+1,j ,k ) == NULL) ok = false; + if (technicalGrid.get(i ,j+1,k ) == NULL) ok = false; + if (technicalGrid.get(i+1,j+1,k ) == NULL) ok = false; + if (ok == false) { + stringstream ss; + ss << "ERROR, got NULL neighbor in " << __FILE__ << ":" << __LINE__ << endl; + cerr << ss.str(); exit(1); + } + #endif + + std::array * EGrid_i2j1k1 = EGrid.get(i+1,j ,k ); + std::array * EGrid_i1j2k1 = EGrid.get(i ,j+1,k ); + std::array * EGrid_i2j2k1 = EGrid.get(i+1,j+1,k ); + + CHECK_FLOAT(EGrid_i1j1k1->at(fsgrids::efield::EZ)) + CHECK_FLOAT(EGrid_i2j1k1->at(fsgrids::efield::EZ)) + CHECK_FLOAT(EGrid_i1j2k1->at(fsgrids::efield::EZ)) + CHECK_FLOAT(EGrid_i2j2k1->at(fsgrids::efield::EZ)) + volGrid0->at(fsgrids::volfields::EZVOL) = FOURTH*(EGrid_i1j1k1->at(fsgrids::efield::EZ) + EGrid_i2j1k1->at(fsgrids::efield::EZ) + EGrid_i1j2k1->at(fsgrids::efield::EZ) + EGrid_i2j2k1->at(fsgrids::efield::EZ)); + CHECK_FLOAT(volGrid0->at(fsgrids::volfields::EZVOL)) + } else { + volGrid0->at(fsgrids::volfields::EZVOL) = 0.0; + } } } } From dc120a3b75d501ed9ac9ea652a9f1b2f9bfc1f19 Mon Sep 17 00:00:00 2001 From: Yann Pfau-Kempf Date: Wed, 13 Nov 2019 12:21:45 +0200 Subject: [PATCH 572/602] Revert "removed volumetric e-fields" This reverts commit 4bcc82d3ca438e527cb0d10a90ab758c1721cd5f. --- datareduction/datareducer.cpp | 38 +++++++++++++++++++++++++++++++++++ parameters.cpp | 2 ++ 2 files changed, 40 insertions(+) diff --git a/datareduction/datareducer.cpp b/datareduction/datareducer.cpp index 93f23e84e..e3f131992 100644 --- a/datareduction/datareducer.cpp +++ b/datareduction/datareducer.cpp @@ -574,6 +574,44 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti } continue; } + if(lowercase == "vole" || lowercase == "vg_vole" || lowercase == "evol" || lowercase == "vg_e_vol" || lowercase == "e_vol") { + // Volume-averaged E field + outputReducer->addOperator(new DRO::DataReductionOperatorCellParams("vg_e_vol",CellParams::EXVOL,3)); + outputReducer->addMetadata(outputReducer->size()-1,"V/m","$\\mathrm{V}\\,\\mathrm{m}^{-1}$","$E_\\mathrm{vol,vg}$","1.0"); + continue; + } + if(lowercase == "fg_vole" || lowercase == "fg_e_vol" || lowercase == "fg_evol") { + outputReducer->addOperator(new DRO::DataReductionOperatorFsGrid("fg_e_vol",[]( + FsGrid< std::array, 2>& perBGrid, + FsGrid< std::array, 2>& EGrid, + FsGrid< std::array, 2>& EHallGrid, + FsGrid< std::array, 2>& EGradPeGrid, + FsGrid< std::array, 2>& momentsGrid, + FsGrid< std::array, 2>& dPerBGrid, + FsGrid< std::array, 2>& dMomentsGrid, + FsGrid< std::array, 2>& BgBGrid, + FsGrid< std::array, 2>& volGrid, + FsGrid< fsgrids::technical, 2>& technicalGrid)->std::vector { + + std::array& gridSize = technicalGrid.getLocalSize(); + std::vector retval(gridSize[0]*gridSize[1]*gridSize[2]*3); + + // Iterate through fsgrid cells and extract EVOL + for(int z=0; zaddMetadata(outputReducer->size()-1,"V/m","$\\mathrm{V}\\,\\mathrm{m}^{-1}$","$E_\\mathrm{vol,fg}$","1.0"); + continue; + } if(lowercase == "halle" || lowercase == "fg_halle" || lowercase == "fg_e_hall") { for(int index=0; index Date: Wed, 13 Nov 2019 12:30:20 +0200 Subject: [PATCH 573/602] gridGlue for volume electric fields. Compiles. --- fieldsolver/gridGlue.cpp | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/fieldsolver/gridGlue.cpp b/fieldsolver/gridGlue.cpp index 2412db6cb..33b26eeaf 100644 --- a/fieldsolver/gridGlue.cpp +++ b/fieldsolver/gridGlue.cpp @@ -191,7 +191,7 @@ void getFieldsFromFsGrid( ) { // TODO: solver only needs bgb + PERB, we could combine them - const int fieldsToCommunicate = 18; + const int fieldsToCommunicate = 21; struct Average { Real sums[fieldsToCommunicate]; int cells; @@ -285,7 +285,9 @@ void getFieldsFromFsGrid( sendBuffer[ii].sums[15] += egradpecell->at(fsgrids::egradpe::EXGRADPE); sendBuffer[ii].sums[16] += egradpecell->at(fsgrids::egradpe::EYGRADPE); sendBuffer[ii].sums[17] += egradpecell->at(fsgrids::egradpe::EZGRADPE); - + sendBuffer[ii].sums[18] += volcell->at(fsgrids::volfields::EXVOL); + sendBuffer[ii].sums[19] += volcell->at(fsgrids::volfields::EYVOL); + sendBuffer[ii].sums[20] += volcell->at(fsgrids::volfields::EZVOL); sendBuffer[ii].cells++; } ii++; @@ -319,7 +321,7 @@ void getFieldsFromFsGrid( //Store data in dccrg for (auto const &cellAggregate : aggregatedResult) { - auto cellParams = mpiGrid[cellAggregate.first]->get_cell_parameters(); + auto cellParams = mpiGrid[cellAggregate.first]->get_cell_parameters(); if ( cellAggregate.second.cells > 0) { cellParams[CellParams::PERBXVOL] = cellAggregate.second.sums[0] / cellAggregate.second.cells; cellParams[CellParams::PERBYVOL] = cellAggregate.second.sums[1] / cellAggregate.second.cells; @@ -332,10 +334,13 @@ void getFieldsFromFsGrid( mpiGrid[cellAggregate.first]->derivativesBVOL[bvolderivatives::dPERBZVOLdy] = cellAggregate.second.sums[11] / cellAggregate.second.cells; cellParams[CellParams::BGBXVOL] = cellAggregate.second.sums[12] / cellAggregate.second.cells; cellParams[CellParams::BGBYVOL] = cellAggregate.second.sums[13] / cellAggregate.second.cells; - cellParams[CellParams::BGBZVOL] = cellAggregate.second.sums[14] / cellAggregate.second.cells; + cellParams[CellParams::BGBZVOL] = cellAggregate.second.sums[14] / cellAggregate.second.cells; cellParams[CellParams::EXGRADPE] = cellAggregate.second.sums[15] / cellAggregate.second.cells; cellParams[CellParams::EYGRADPE] = cellAggregate.second.sums[16] / cellAggregate.second.cells; - cellParams[CellParams::EZGRADPE] = cellAggregate.second.sums[17] / cellAggregate.second.cells; + cellParams[CellParams::EZGRADPE] = cellAggregate.second.sums[17] / cellAggregate.second.cells; + cellParams[CellParams::EXVOL] = cellAggregate.second.sums[18] / cellAggregate.second.cells; + cellParams[CellParams::EYVOL] = cellAggregate.second.sums[19] / cellAggregate.second.cells; + cellParams[CellParams::EZVOL] = cellAggregate.second.sums[20] / cellAggregate.second.cells; } else{ // This could happpen if all fsgrid cells are do not compute @@ -354,6 +359,9 @@ void getFieldsFromFsGrid( cellParams[CellParams::EXGRADPE] = 0; cellParams[CellParams::EYGRADPE] = 0; cellParams[CellParams::EZGRADPE] = 0; + cellParams[CellParams::EXVOL] = 0; + cellParams[CellParams::EYVOL] = 0; + cellParams[CellParams::EZVOL] = 0; } } From cf1b5c43b1280cbe20031010a2e9ca33a094d857 Mon Sep 17 00:00:00 2001 From: Yann Pfau-Kempf Date: Wed, 13 Nov 2019 13:58:34 +0200 Subject: [PATCH 574/602] Consistency fix in parameters. --- parameters.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/parameters.cpp b/parameters.cpp index eaf87ce07..ecb60e04c 100644 --- a/parameters.cpp +++ b/parameters.cpp @@ -261,7 +261,7 @@ bool Parameters::addParameters(){ // NOTE Do not remove the : before the list of variable names as this is parsed by tools/check_vlasiator_cfg.sh Readparameters::addComposing("variables.diagnostic", std::string()+"List of data reduction operators (DROs) to add to the diagnostic runtime output. Each variable to be added has to be on a new line diagnostic = XXX. Names are case insensitive. "+ "Available (20190320): "+ - "populations_blocks "+ + "populations_vg_blocks "+ "rhom populations_rho_loss_adjust"+ "loadbalance_weight"+ "maxdt_acceleration maxdt_translation populations_maxdt_acceleration populations_maxdt_translation "+ From f89c9aa58a4201914646625c7787b89774ff75c5 Mon Sep 17 00:00:00 2001 From: Yann Pfau-Kempf Date: Wed, 13 Nov 2019 15:32:58 +0200 Subject: [PATCH 575/602] Clarification in gridGlue, fix in vlasiator.cpp to get correct t=0 fields back. --- datareduction/datareducer.cpp | 6 +++--- vlasiator.cpp | 10 +++++----- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/datareduction/datareducer.cpp b/datareduction/datareducer.cpp index e3f131992..536fd00bb 100644 --- a/datareduction/datareducer.cpp +++ b/datareduction/datareducer.cpp @@ -600,9 +600,9 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti for(int z=0; z Date: Wed, 13 Nov 2019 15:00:56 +0100 Subject: [PATCH 576/602] Status of now. --- MAKE/Makefile.hornet_gcc | 74 +++++++++++++++----------- vlasovsolver/cpu_acc_intersections.cpp | 2 +- vlasovsolver/cpu_acc_intersections.hpp | 2 +- vlasovsolver/cpu_acc_semilag.cpp | 4 +- vlasovsolver/cpu_acc_transform.hpp | 4 +- 5 files changed, 50 insertions(+), 36 deletions(-) diff --git a/MAKE/Makefile.hornet_gcc b/MAKE/Makefile.hornet_gcc index 38ed88f9a..d2526af33 100644 --- a/MAKE/Makefile.hornet_gcc +++ b/MAKE/Makefile.hornet_gcc @@ -1,15 +1,31 @@ CMP = CC -LNK = CC -#-Wl,--allow-multiple-definition +LNK = CC -not_parallel_tools: CMP = g++ -not_parallel_tools: LNK = g++ +#======== Vectorization ========== +#Set vector backend type for vlasov solvers, sets precision and length. +#Options: +# AVX: VEC4D_AGNER, VEC4F_AGNER, VEC8F_AGNER +# AVX512: VEC8D_AGNER, VEC16F_AGNER +# Fallback: VEC4D_FALLBACK, VEC4F_FALLBACK, VEC8F_FALLBACK + +ifeq ($(DISTRIBUTION_FP_PRECISION),SPF) +#Single-precision + VECTORCLASS = VEC16F_AGNER +else +#Double-precision + VECTORCLASS = VEC8D_AGNER +endif + +#======== PAPI ========== +#Add PAPI_MEM define to use papi to report memory consumption? +CXXFLAGS += -DPAPI_MEM + + +#======== Allocator ========= +#Use jemalloc instead of system malloc to reduce memory fragmentation? https://github.com/jemalloc/jemalloc +#Configure jemalloc with --with-jemalloc-prefix=je_ when installing it +CXXFLAGS += -DUSE_JEMALLOC -DJEMALLOC_NO_DEMANGLE -#-DNO_WRITE_AT_ALL: Define to disable write at all to -# avoid memleak (much slower IO) -#-DMPICH_IGNORE_CXX_SEEK: Ignores some multiple definition -# errors that come up when using -# mpi.h in c++ on Cray CXXFLAGS = -DMPICH_IGNORE_CXX_SEEK @@ -22,35 +38,35 @@ FLAGS = #GNU flags: CC_BRAND = gcc -CC_BRAND_VERSION = 5.2.0 -CXXFLAGS += -O3 -fopenmp -funroll-loops -std=c++0x -W -Wall -Wno-unused -fabi-version=0 -mavx2 -#CXXFLAGS += -O3 -fopenmp -funroll-loops -std=c++0x -W -Wall -Wno-unused -fabi-version=0 -mavx +CC_BRAND_VERSION = 8.3.0 +CXXFLAGS += -g -O3 -fopenmp -funroll-loops -std=c++17 -W -Wall -Wno-unused -fabi-version=0 -mavx2 +#CXXFLAGS += -O3 -fopenmp -funroll-loops -std=c++17 -W -Wall -Wno-unused -fabi-version=0 -mavx not_parallel_tools: CXXFLAGS += -march=native -mno-avx2 -mavx -testpackage: CXXFLAGS = -O2 -fopenmp -funroll-loops -std=c++0x -fabi-version=0 +testpackage: CXXFLAGS = -g -O2 -fopenmp -funroll-loops -std=c++17 -fabi-version=0 not_parallel_tools: CC_BRAND_VERSION = 4.9.2-noavx2 MATHFLAGS = -ffast-math LDFLAGS = LIB_MPI = -lgomp -BOOST_VERSION = 1.56.0 -MPT_VERSION = 7.1.3 -ZOLTAN_VERSION = 3.8 -SILO_VERSION = 4.9.1 -JEMALLOC_VERSION = 4.0.4 -LIBRARY_PREFIX = /zhome/academic/HLRS/pri/iprurgan/vlasiator/libraries +BOOST_VERSION = +MPT_VERSION = 7.7.6 +ZOLTAN_VERSION = +SILO_VERSION = +JEMALLOC_VERSION = 5.2.1 +LIBRARY_PREFIX = /zhome/academic/HLRS/pri/ipryakem/libraries #compiled libraries -INC_BOOST = -I$(LIBRARY_PREFIX)/mpich2/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/boost/$(BOOST_VERSION)/include -LIB_BOOST = -L$(LIBRARY_PREFIX)/mpich2/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/boost/$(BOOST_VERSION)/lib -lboost_program_options +INC_BOOST = -I$(CRAY_TRILINOS_PREFIX_DIR)/include/boost +LIB_BOOST = -L$(CRAY_TRILINOS_PREFIX_DIR)/lib -lboost_program_options -INC_ZOLTAN = -I$(LIBRARY_PREFIX)/mpich2/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/zoltan/$(ZOLTAN_VERSION)/include -LIB_ZOLTAN = -L$(LIBRARY_PREFIX)/mpich2/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/zoltan/$(ZOLTAN_VERSION)/lib -lzoltan +INC_ZOLTAN = -I$(CRAY_TRILINOS_PREFIX_DIR)/include +LIB_ZOLTAN = -L$(CRAY_TRILINOS_PREFIX_DIR)/lib -lzoltan -INC_SILO = -I$(LIBRARY_PREFIX)/mpich2/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/silo/$(SILO_VERSION)/include -LIB_SILO = -L$(LIBRARY_PREFIX)/mpich2/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/silo/$(SILO_VERSION)/lib -lsilo +#INC_SILO = -I$(LIBRARY_PREFIX)/mpich2/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/silo/$(SILO_VERSION)/include +#LIB_SILO = -L$(LIBRARY_PREFIX)/mpich2/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/silo/$(SILO_VERSION)/lib -lsilo INC_JEMALLOC = -I$(LIBRARY_PREFIX)/mpich2/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/jemalloc/$(JEMALLOC_VERSION)/include LIB_JEMALLOC = -L$(LIBRARY_PREFIX)/mpich2/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/jemalloc/$(JEMALLOC_VERSION)/lib -ljemalloc @@ -58,17 +74,15 @@ LIB_JEMALLOC = -L$(LIBRARY_PREFIX)/mpich2/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_ INC_VLSV = -I$(LIBRARY_PREFIX)/mpich2/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/vlsv LIB_VLSV = -L$(LIBRARY_PREFIX)/mpich2/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/vlsv -lvlsv - LIB_PROFILE = -L$(LIBRARY_PREFIX)/mpich2/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/phiprof/lib -lphiprof -#LIB_PROFILE = $(LIBRARY_PREFIX)/mpich2/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/phiprof/lib/libphiprof.a INC_PROFILE = -I$(LIBRARY_PREFIX)/mpich2/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_VERSION)/phiprof/include #header libraries -INC_EIGEN = -I$(LIBRARY_PREFIX)/eigen/ +INC_EIGEN = -I$(LIBRARY_PREFIX)/Eigen/ INC_DCCRG = -I$(LIBRARY_PREFIX)/dccrg/ -INC_FSGRID = -I/zhome/academic/HLRS/pri/ipryakem/fsgrid/ -INC_VECTORCLASS = -I$(LIBRARY_PREFIX)/vectorclass +INC_FSGRID = -I$(LIBRARY_PREFIX)/fsgrid/ +INC_VECTORCLASS = -I$(LIBRARY_PREFIX)/vectorclass-version1 diff --git a/vlasovsolver/cpu_acc_intersections.cpp b/vlasovsolver/cpu_acc_intersections.cpp index b0ae43385..fa41ff856 100644 --- a/vlasovsolver/cpu_acc_intersections.cpp +++ b/vlasovsolver/cpu_acc_intersections.cpp @@ -29,7 +29,7 @@ //#include //#include -#include +#include //Eigen #include "../common.h" #include "../spatial_cell.hpp" diff --git a/vlasovsolver/cpu_acc_intersections.hpp b/vlasovsolver/cpu_acc_intersections.hpp index 19d3cbf5f..2e2c5ce18 100644 --- a/vlasovsolver/cpu_acc_intersections.hpp +++ b/vlasovsolver/cpu_acc_intersections.hpp @@ -22,7 +22,7 @@ #ifndef CPU_ACC_INTERSECTIONS_H #define CPU_ACC_INTERSECTIONS_H -#include +#include // Eigen #include "../definitions.h" #include "../spatial_cell.hpp" diff --git a/vlasovsolver/cpu_acc_semilag.cpp b/vlasovsolver/cpu_acc_semilag.cpp index ba0bdaa8f..524497178 100644 --- a/vlasovsolver/cpu_acc_semilag.cpp +++ b/vlasovsolver/cpu_acc_semilag.cpp @@ -24,8 +24,8 @@ #include #include -#include -#include +#include // Eigen +#include // Eigen #include "cpu_acc_semilag.hpp" #include "cpu_acc_transform.hpp" diff --git a/vlasovsolver/cpu_acc_transform.hpp b/vlasovsolver/cpu_acc_transform.hpp index ba2d3614f..e6f92d87e 100644 --- a/vlasovsolver/cpu_acc_transform.hpp +++ b/vlasovsolver/cpu_acc_transform.hpp @@ -23,8 +23,8 @@ #ifndef CPU_ACC_TRANSFORM_H #define CPU_ACC_TRANSFORM_H -#include -#include +#include // Eigen +#include // Eigen #include "../common.h" #include "../spatial_cell.hpp" From df360bd569fda17609b916acabc333f1b17c5141 Mon Sep 17 00:00:00 2001 From: ipryakem Date: Thu, 14 Nov 2019 10:51:00 +0100 Subject: [PATCH 577/602] Specify port in transferPraceData if needed. --- tools/transferPraceData.sh | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/tools/transferPraceData.sh b/tools/transferPraceData.sh index b7908dcb6..b18d02da8 100755 --- a/tools/transferPraceData.sh +++ b/tools/transferPraceData.sh @@ -341,7 +341,7 @@ transferPraceData userserver path transfer_file local_storage_path Please run grid_proxy_init first when using the gridFTP backend. user Username, option not used for gridftp or local-dd transfers (put arbitrary name) - server One of: Hermit (gridftp), Hazelhen-r (rsync), Hazelhen-ds (dd|ssh), Abel (gridftp), Sisu-g (gridftp) Sisu-r (rsync) Sisu-ds (dd|ssh) localhost-dd (local-dd) + server One of: Hermit (gridftp), Hazelhen-r (rsync), Hazelhen-ds (dd|ssh), Abel (gridftp), Sisu-g (gridftp) Sisu-r (rsync) Sisu-ds (dd|ssh) localhost-dd (local-dd) localhost-rp (rsync with special port) path is a path on remote machine (e.g. /univ_1/ws1/ws/iprsalft-paper1-runs-0/2D/ecliptic/AAE)" transfer_file is a file in the path on the remote machine created using ls -la *myfiles_to_transfer* > transfer_list.txt" local_storage_path is the folder where the files are ultimately copied after transfer, e.g., a tape drive. During transfer they go to the current folder. "." is also allowed. @@ -388,8 +388,13 @@ elif [ $machine == "localhost-dd" ] then server=localhost method=ddssh +elif [ $machine == "localhost-rp" ] +then + server=localhost + method=rsync + export RSYNC_RSH="ssh -p 1234" else - echo "Allowed server values are Hermit, Hazelhen-r, Hazelhen-ds, Abel, Sisu-g, Sisu-r, Sisu-ds, localhost-dd" + echo "Allowed server values are Hermit, Hazelhen-r, Hazelhen-ds, Abel, Sisu-g, Sisu-r, Sisu-ds, localhost-dd, localhost-rp" exit 1 fi From 504cd35ff7713d7d9298ab3bcd4312e90de591c3 Mon Sep 17 00:00:00 2001 From: ipryakem Date: Thu, 14 Nov 2019 10:59:28 +0100 Subject: [PATCH 578/602] Changed port number. --- tools/transferPraceData.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/transferPraceData.sh b/tools/transferPraceData.sh index b18d02da8..3a3549f61 100755 --- a/tools/transferPraceData.sh +++ b/tools/transferPraceData.sh @@ -392,7 +392,7 @@ elif [ $machine == "localhost-rp" ] then server=localhost method=rsync - export RSYNC_RSH="ssh -p 1234" + export RSYNC_RSH="ssh -p 1235" else echo "Allowed server values are Hermit, Hazelhen-r, Hazelhen-ds, Abel, Sisu-g, Sisu-r, Sisu-ds, localhost-dd, localhost-rp" exit 1 From b004e90f3cdbe86b779f5445ecefd1fb06dc53cd Mon Sep 17 00:00:00 2001 From: Yann Pfau-Kempf Date: Thu, 14 Nov 2019 14:41:28 +0200 Subject: [PATCH 579/602] Proper reordering of field initialisation. --- vlasiator.cpp | 52 ++++++++++++++++++++++++++------------------------- 1 file changed, 27 insertions(+), 25 deletions(-) diff --git a/vlasiator.cpp b/vlasiator.cpp index 8d94e3ff8..0d1bb3cc6 100644 --- a/vlasiator.cpp +++ b/vlasiator.cpp @@ -473,36 +473,38 @@ int main(int argn,char* args[]) { // Free up memory: readparameters.finalize(); + // Run the field solver once with zero dt. This will initialize + // Fieldsolver dt limits, and also calculate volumetric B-fields. + propagateFields( + perBGrid, + perBDt2Grid, + EGrid, + EDt2Grid, + EHallGrid, + EGradPeGrid, + momentsGrid, + momentsDt2Grid, + dPerBGrid, + dMomentsGrid, + BgBGrid, + volGrid, + technicalGrid, + sysBoundaries, 0.0, 1.0 + ); + phiprof::start("getFieldsFromFsGrid"); + volGrid.updateGhostCells(); + getFieldsFromFsGrid(volGrid, BgBGrid, EGradPeGrid, technicalGrid, mpiGrid, cells); + phiprof::stop("getFieldsFromFsGrid"); + if (P::isRestart == false) { - // Run Vlasov solver once with zero dt to initialize - //per-cell dt limits. In restarts, we read the dt from file. phiprof::start("compute-dt"); - - if(P::propagateField) { - propagateFields( - perBGrid, - perBDt2Grid, - EGrid, - EDt2Grid, - EHallGrid, - EGradPeGrid, - momentsGrid, - momentsDt2Grid, - dPerBGrid, - dMomentsGrid, - BgBGrid, - volGrid, - technicalGrid, - sysBoundaries, 0.0, 1.0 - ); - } - + // Run Vlasov solver once with zero dt to initialize + // per-cell dt limits. In restarts, we read the dt from file. calculateSpatialTranslation(mpiGrid,0.0); - calculateAcceleration(mpiGrid,0.0); - + calculateAcceleration(mpiGrid,0.0); phiprof::stop("compute-dt"); } - + // Save restart data if (P::writeInitialState) { phiprof::start("write-initial-state"); From 8a237940fb3498bbd352f8a27f0fb441d043b48a Mon Sep 17 00:00:00 2001 From: Yann Pfau-Kempf Date: Thu, 14 Nov 2019 14:45:50 +0200 Subject: [PATCH 580/602] Removed extraneous +. --- datareduction/datareducer.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/datareduction/datareducer.cpp b/datareduction/datareducer.cpp index 536fd00bb..092e5e1a7 100644 --- a/datareduction/datareducer.cpp +++ b/datareduction/datareducer.cpp @@ -634,7 +634,7 @@ void initializeDataReducers(DataReducer * outputReducer, DataReducer * diagnosti for(int z=0; z Date: Thu, 14 Nov 2019 15:51:58 +0200 Subject: [PATCH 581/602] Removed extra gridGlue call. --- vlasiator.cpp | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/vlasiator.cpp b/vlasiator.cpp index 0d1bb3cc6..44e6cb2eb 100644 --- a/vlasiator.cpp +++ b/vlasiator.cpp @@ -491,6 +491,7 @@ int main(int argn,char* args[]) { technicalGrid, sysBoundaries, 0.0, 1.0 ); + phiprof::start("getFieldsFromFsGrid"); volGrid.updateGhostCells(); getFieldsFromFsGrid(volGrid, BgBGrid, EGradPeGrid, technicalGrid, mpiGrid, cells); @@ -548,11 +549,6 @@ int main(int argn,char* args[]) { phiprof::stop("write-initial-state"); } - phiprof::start("getFieldsFromFsGrid"); - volGrid.updateGhostCells(); - getFieldsFromFsGrid(volGrid, BgBGrid, EGradPeGrid, technicalGrid, mpiGrid, cells); - phiprof::stop("getFieldsFromFsGrid"); - if (P::isRestart == false) { //compute new dt phiprof::start("compute-dt"); From 07b5a4726715250cd265ec9d253e52619d2b3afd Mon Sep 17 00:00:00 2001 From: Markus Battarbee Date: Fri, 15 Nov 2019 15:18:05 +0200 Subject: [PATCH 582/602] reinstated mid-refinement load balances --- projects/Magnetosphere/Magnetosphere.cpp | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/projects/Magnetosphere/Magnetosphere.cpp b/projects/Magnetosphere/Magnetosphere.cpp index e5f751ac0..3f116f563 100644 --- a/projects/Magnetosphere/Magnetosphere.cpp +++ b/projects/Magnetosphere/Magnetosphere.cpp @@ -638,7 +638,7 @@ namespace projects { std::cout << "Rank " << myRank << " refined " << refinedCells.size() << " cells. " << std::endl; } #endif - //mpiGrid.balance_load(); + mpiGrid.balance_load(); } if (P::amrMaxSpatialRefLevel > 1) { @@ -674,7 +674,7 @@ namespace projects { } #endif - //mpiGrid.balance_load(); + mpiGrid.balance_load(); } if (P::amrMaxSpatialRefLevel > 2) { @@ -725,7 +725,7 @@ namespace projects { } #endif - //mpiGrid.balance_load(); + mpiGrid.balance_load(); } if (P::amrMaxSpatialRefLevel > 3) { @@ -760,13 +760,7 @@ namespace projects { } #endif - //mpiGrid.balance_load(); - } - - - // Do load balance only once at end - if (P::amrMaxSpatialRefLevel > 0) { - mpiGrid.balance_load(); + mpiGrid.balance_load(); } return true; From 52c8d3dfd90f0e3128b98a82fbcafa3effd9996f Mon Sep 17 00:00:00 2001 From: Markus Battarbee Date: Fri, 15 Nov 2019 15:44:18 +0200 Subject: [PATCH 583/602] mini-app fixes --- mini-apps/acceleration-vlasiator/map_test.cpp | 19 ++++++++++--------- mini-apps/amr_reconstruction/map_test.cpp | 10 ++++++---- 2 files changed, 16 insertions(+), 13 deletions(-) diff --git a/mini-apps/acceleration-vlasiator/map_test.cpp b/mini-apps/acceleration-vlasiator/map_test.cpp index c71268f44..234f2e33a 100644 --- a/mini-apps/acceleration-vlasiator/map_test.cpp +++ b/mini-apps/acceleration-vlasiator/map_test.cpp @@ -5,9 +5,10 @@ #include "vlasovsolver/cpu_1d_ppm.hpp" #include "vlasovsolver/cpu_1d_plm.hpp" -#define cell_pop_threshold 1.e-15 -// In vlasiator, called via spatial_cell->getVelocityBlockMinValue(popID) - +const int fluxlimiterscalingfactor=1.e-15; +// Used for better calculation of flux limiters at extreme values. +// In vlasiator, the value of spatial_cell->getVelocityBlockMinValue(popID) +// is used here. /*print all values in the vector valued values array. In this array there are blocks_per_dim blocks with a width of WID*/ @@ -60,15 +61,15 @@ void propagate(Vec values[], uint blocks_per_dim, Real v_min, Real dv, #ifdef ACC_SEMILAG_PLM Vec a[2]; - compute_plm_coeff(values, (k_block + 1) * WID + k_cell , a, cell_pop_threshold); + compute_plm_coeff(values, (k_block + 1) * WID + k_cell , a, fluxlimiterscalingfactor); #endif #ifdef ACC_SEMILAG_PPM Vec a[3]; - compute_ppm_coeff(values, h6, (k_block + 1) * WID + k_cell , a, cell_pop_threshold); + compute_ppm_coeff(values, h6, (k_block + 1) * WID + k_cell , a, fluxlimiterscalingfactor); #endif #ifdef ACC_SEMILAG_PQM Vec a[5]; - compute_pqm_coeff(values, h8, (k_block + 1) * WID + k_cell , a, cell_pop_threshold); + compute_pqm_coeff(values, h8, (k_block + 1) * WID + k_cell , a, fluxlimiterscalingfactor); #endif @@ -199,15 +200,15 @@ void print_reconstruction(int step, Vec values[], uint blocks_per_dim, Real v_m for (uint k_cell=0; k_cell #include -#define cell_pop_threshold 1.e-15 -// In vlasiator, called via spatial_cell->getVelocityBlockMinValue(popID) +const int fluxlimiterscalingfactor=1.e-15; +// Used for better calculation of flux limiters at extreme values. +// In vlasiator, the value of spatial_cell->getVelocityBlockMinValue(popID) +// is used here. /*print all values in the vector valued values array. In this array there are blocks_per_dim blocks with a width of WID*/ @@ -76,7 +78,7 @@ void propagate(Vec dr[], Vec values[], Real z_translation, uint blocks_per_dim ) // Compute polynomial coefficients Vec a[3]; //compute_ppm_coeff_nonuniform(dr, values, h4, gid + target_scell_index, a); - compute_ppm_coeff_nonuniform(dr, values, h4, gid, a, cell_pop_threshold); + compute_ppm_coeff_nonuniform(dr, values, h4, gid, a, fluxlimiterscalingfactor); // Compute integral const Vec ngbr_target_density = @@ -119,7 +121,7 @@ void print_reconstruction(int step, Vec dr[], Vec values[], uint blocks_per_dim #ifdef ACC_SEMILAG_PPM Vec a[3]; //compute_ppm_coeff( values, h4, (k_block + 1) * WID + k_cell, a); - compute_ppm_coeff_nonuniform(dr, values, h4, (k_block + 1) * WID + k_cell, a, cell_pop_threshold); + compute_ppm_coeff_nonuniform(dr, values, h4, (k_block + 1) * WID + k_cell, a, fluxlimiterscalingfactor); #endif int iend = k_block * WID + k_cell; From a06cdd547249cbc1445d81ee7b0980ccb045d998 Mon Sep 17 00:00:00 2001 From: ipryakem Date: Tue, 19 Nov 2019 08:31:49 +0100 Subject: [PATCH 584/602] Added return to two non-void functions. --- fieldsolver/gridGlue.cpp | 1 + sysboundary/ionosphere.cpp | 1 + 2 files changed, 2 insertions(+) diff --git a/fieldsolver/gridGlue.cpp b/fieldsolver/gridGlue.cpp index 2412db6cb..7b32f22c9 100644 --- a/fieldsolver/gridGlue.cpp +++ b/fieldsolver/gridGlue.cpp @@ -206,6 +206,7 @@ void getFieldsFromFsGrid( for(int i = 0; i < fieldsToCommunicate; i++){ this->sums[i] += rhs.sums[i]; } + return *this; } }; diff --git a/sysboundary/ionosphere.cpp b/sysboundary/ionosphere.cpp index 9e39a6690..f9e7ae2c6 100644 --- a/sysboundary/ionosphere.cpp +++ b/sysboundary/ionosphere.cpp @@ -620,6 +620,7 @@ namespace SBC { return bGrid->get(i,j,k-1)->at(fsgrids::bfield::PERBX + component); default: cerr << "ERROR: ionosphere boundary tried to copy nonsensical magnetic field component " << component << endl; + return 0.0; break; } From 82da9d219a5a33eacdefd6846b9b537d614d428c Mon Sep 17 00:00:00 2001 From: ipryakem Date: Tue, 19 Nov 2019 08:52:00 +0100 Subject: [PATCH 585/602] Magnetic field solving on all relevant faces, only boundary behaviour in boundary code. This changes the behaviour of the SetByUser boundaries! --- MAKE/Makefile.hornet_gcc | 2 +- fieldsolver/ldz_magnetic_field.cpp | 50 +++++++++++++++++++++++++++--- sysboundary/ionosphere.cpp | 48 ++++++---------------------- sysboundary/outflow.cpp | 44 +------------------------- 4 files changed, 57 insertions(+), 87 deletions(-) diff --git a/MAKE/Makefile.hornet_gcc b/MAKE/Makefile.hornet_gcc index d2526af33..17b7d67fd 100644 --- a/MAKE/Makefile.hornet_gcc +++ b/MAKE/Makefile.hornet_gcc @@ -39,7 +39,7 @@ FLAGS = #GNU flags: CC_BRAND = gcc CC_BRAND_VERSION = 8.3.0 -CXXFLAGS += -g -O3 -fopenmp -funroll-loops -std=c++17 -W -Wall -Wno-unused -fabi-version=0 -mavx2 +CXXFLAGS += -g -O3 -fopenmp -funroll-loops -std=c++17 -W -Wall -Wno-unused -fabi-version=0 -mavx2 -Wall -Wpedantic #CXXFLAGS += -O3 -fopenmp -funroll-loops -std=c++17 -W -Wall -Wno-unused -fabi-version=0 -mavx not_parallel_tools: CXXFLAGS += -march=native -mno-avx2 -mavx testpackage: CXXFLAGS = -g -O2 -fopenmp -funroll-loops -std=c++17 -fabi-version=0 diff --git a/fieldsolver/ldz_magnetic_field.cpp b/fieldsolver/ldz_magnetic_field.cpp index bd5eaa44a..efdfa0789 100644 --- a/fieldsolver/ldz_magnetic_field.cpp +++ b/fieldsolver/ldz_magnetic_field.cpp @@ -191,8 +191,17 @@ void propagateSysBoundaryMagneticField( bGrid = perBDt2Grid.get(i,j,k); } cuint sysBoundaryFlag = technicalGrid.get(i,j,k)->sysBoundaryFlag; + cuint sysBoundaryLayer = technicalGrid.get(i,j,k)->sysBoundaryLayer; + for (uint component = 0; component < 3; component++) { - bGrid->at(fsgrids::bfield::PERBX + component) = sysBoundaries.getSysBoundary(sysBoundaryFlag)->fieldSolverBoundaryCondMagneticField(perBGrid, perBDt2Grid, EGrid, EDt2Grid, technicalGrid, i, j, k, dt, RKCase, component); + cint neigh_i=i + ((component==0)?-1:0); + cint neigh_j=j + ((component==1)?-1:0); + cint neigh_k=k + ((component==2)?-1:0); + cuint neighborSysBoundaryFlag = technicalGrid.get(neigh_i, neigh_j, neigh_k)->sysBoundaryFlag; + + if (neighborSysBoundaryFlag != sysboundarytype::NOT_SYSBOUNDARY) { // Complement to propagateMagneticFieldSimple main loop + bGrid->at(fsgrids::bfield::PERBX + component) = sysBoundaries.getSysBoundary(sysBoundaryFlag)->fieldSolverBoundaryCondMagneticField(perBGrid, perBDt2Grid, EGrid, EDt2Grid, technicalGrid, i, j, k, dt, RKCase, component); + } } } @@ -231,17 +240,48 @@ void propagateMagneticFieldSimple( timer=phiprof::initializeTimer("Compute cells"); phiprof::start(timer); - #pragma omp parallel for collapse(3) + #pragma omp parallel for collapse(3) schedule(dynamic,1) for (int k=0; kfsGridRank=technicalGrid.getRank(); - if(technicalGrid.get(i,j,k)->sysBoundaryFlag != sysboundarytype::NOT_SYSBOUNDARY) continue; - // Propagate B on all local cells: - propagateMagneticField(perBGrid, perBDt2Grid, EGrid, EDt2Grid, i, j, k, dt, RKCase); + if(technicalGrid.get(i,j,k)->sysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY) { + // Propagate B on all local cells on all faces: + propagateMagneticField(perBGrid, perBDt2Grid, EGrid, EDt2Grid, i, j, k, dt, RKCase, true, true, true); + } else { + // Easy case: in case we are neighboured by a non-sysboundary cell, we still solve the + // fields normally here. + cuint sysBoundaryLayer = technicalGrid.get(i,j,k)->sysBoundaryLayer; + if(sysBoundaryLayer == 1) { + for (uint component = 0; component < 3; component++) { + cint neigh_i=i + ((component==0)?-1:0); + cint neigh_j=j + ((component==1)?-1:0); + cint neigh_k=k + ((component==2)?-1:0); + cuint neighborSysBoundaryFlag = technicalGrid.get(neigh_i, neigh_j, neigh_k)->sysBoundaryFlag; + + if (neighborSysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY) { // Complement to propagateSysBoundaryMagneticField + switch(component) { + case 0: + propagateMagneticField(perBGrid, perBDt2Grid, EGrid, EDt2Grid, i, j, k, dt, RKCase, true, false, false); + break; + case 1: + propagateMagneticField(perBGrid, perBDt2Grid, EGrid, EDt2Grid, i, j, k, dt, RKCase, false, true, false); + break; + case 2: + propagateMagneticField(perBGrid, perBDt2Grid, EGrid, EDt2Grid, i, j, k, dt, RKCase, false, false, true); + break; + default: + cerr << "ERROR: ionosphere boundary tried to propagate nonsensical magnetic field component " << component << endl; + break; + } + } + } + } + } } } } diff --git a/sysboundary/ionosphere.cpp b/sysboundary/ionosphere.cpp index f9e7ae2c6..58e469e40 100644 --- a/sysboundary/ionosphere.cpp +++ b/sysboundary/ionosphere.cpp @@ -541,12 +541,6 @@ namespace SBC { cuint& RKCase, cuint& component ) { - std::vector< std::array > closestCells = getAllClosestNonsysboundaryCells(technicalGrid, i,j,k); - if (closestCells.size() == 1 && closestCells[0][0] == std::numeric_limits::min() ) { - std::cerr << __FILE__ << ":" << __LINE__ << ":" << "No closest cells found!" << std::endl; - abort(); - } - FsGrid< std::array, 2> * bGrid; if(RKCase == RK_ORDER1 || RKCase == RK_ORDER2_STEP2) { @@ -555,37 +549,16 @@ namespace SBC { bGrid = &perBDt2Grid; } - // Easy case: in case we are neighboured by a non-sysboundary cell, we still solve the - // fields normally here. - cuint sysBoundaryLayer = technicalGrid.get(i,j,k)->sysBoundaryLayer; - if(sysBoundaryLayer == 1) { - cint neigh_i=i + ((component==0)?-1:0); - cint neigh_j=j + ((component==1)?-1:0); - cint neigh_k=k + ((component==2)?-1:0); - cuint neighborSysBoundaryFlag = technicalGrid.get(neigh_i, neigh_j, neigh_k)->sysBoundaryFlag; - - if (neighborSysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY) { - switch(component) { - case 0: - propagateMagneticField(perBGrid, perBDt2Grid, EGrid, EDt2Grid, i, j, k, dt, RKCase, true, false, false); - break; - case 1: - propagateMagneticField(perBGrid, perBDt2Grid, EGrid, EDt2Grid, i, j, k, dt, RKCase, false, true, false); - break; - case 2: - propagateMagneticField(perBGrid, perBDt2Grid, EGrid, EDt2Grid, i, j, k, dt, RKCase, false, false, true); - break; - default: - cerr << "ERROR: ionosphere boundary tried to propagate nonsensical magnetic field component " << component << endl; - break; - } - return bGrid->get(i,j,k)->at(fsgrids::bfield::PERBX + component); - } - } // Otherwise: // Sum perturbed B component over all nearest NOT_SYSBOUNDARY neighbours /**** + std::vector< std::array > closestCells = getAllClosestNonsysboundaryCells(technicalGrid, i,j,k); + if (closestCells.size() == 1 && closestCells[0][0] == std::numeric_limits::min() ) { + std::cerr << __FILE__ << ":" << __LINE__ << ":" << "No closest cells found!" << std::endl; + abort(); + } + std::array averageB = {{ 0.0 }}; for (uint it = 0; it < closestCells.size(); it++) { #ifdef DEBUG_IONOSPHERE @@ -613,15 +586,14 @@ namespace SBC { // Copy each face B-field from the cell on the other side of it switch(component) { case 0: - return bGrid->get(i-1,j,k)->at(fsgrids::bfield::PERBX + component); + return bGrid->get(i-1,j,k)->at(fsgrids::bfield::PERBX + component); case 1: - return bGrid->get(i,j-1,k)->at(fsgrids::bfield::PERBX + component); + return bGrid->get(i,j-1,k)->at(fsgrids::bfield::PERBX + component); case 2: - return bGrid->get(i,j,k-1)->at(fsgrids::bfield::PERBX + component); + return bGrid->get(i,j,k-1)->at(fsgrids::bfield::PERBX + component); default: - cerr << "ERROR: ionosphere boundary tried to copy nonsensical magnetic field component " << component << endl; + cerr << "ERROR: ionosphere boundary tried to copy nonsensical magnetic field component " << component << endl; return 0.0; - break; } diff --git a/sysboundary/outflow.cpp b/sysboundary/outflow.cpp index f50a8e197..f801791bf 100644 --- a/sysboundary/outflow.cpp +++ b/sysboundary/outflow.cpp @@ -345,50 +345,8 @@ namespace SBC { cuint& RKCase, cuint& component ) { - Real fieldValue = -1.0; + Real fieldValue; - creal dx =Parameters::dx_ini; - creal dy =Parameters::dy_ini; - creal dz =Parameters::dz_ini; - const std::array globalIndices = technicalGrid.getGlobalIndices(i,j,k); - creal x = (convert(globalIndices[0])+0.5)*technicalGrid.DX + Parameters::xmin; - creal y = (convert(globalIndices[1])+0.5)*technicalGrid.DY + Parameters::ymin; - creal z = (convert(globalIndices[2])+0.5)*technicalGrid.DZ + Parameters::zmin; - - bool isThisCellOnAFace[6]; - determineFace(&isThisCellOnAFace[0], x, y, z, dx, dy, dz, true); - - cuint sysBoundaryLayer = technicalGrid.get(i,j,k)->sysBoundaryLayer; - if(sysBoundaryLayer == 1) { - cint neigh_i=i + ((component==0)?-1:0); - cint neigh_j=j + ((component==1)?-1:0); - cint neigh_k=k + ((component==2)?-1:0); - cuint neighborSysBoundaryFlag = technicalGrid.get(neigh_i, neigh_j, neigh_k)->sysBoundaryFlag; - - if (neighborSysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY) { - switch(component) { - case 0: - propagateMagneticField(perBGrid, perBDt2Grid, EGrid, EDt2Grid, i, j, k, dt, RKCase, true, false, false); - break; - case 1: - propagateMagneticField(perBGrid, perBDt2Grid, EGrid, EDt2Grid, i, j, k, dt, RKCase, false, true, false); - break; - case 2: - propagateMagneticField(perBGrid, perBDt2Grid, EGrid, EDt2Grid, i, j, k, dt, RKCase, false, false, true); - break; - default: - cerr << "ERROR: outflow boundary tried to propagate nonsensical magnetic field component " << component << endl; - break; - } - if(RKCase == RK_ORDER1 || RKCase == RK_ORDER2_STEP2) { - fieldValue = perBGrid.get(i,j,k)->at(fsgrids::bfield::PERBX + component); - } else { - fieldValue = perBDt2Grid.get(i,j,k)->at(fsgrids::bfield::PERBX + component); - } - return fieldValue; - } - } - if(RKCase == RK_ORDER1 || RKCase == RK_ORDER2_STEP2) { fieldValue = fieldBoundaryCopyFromExistingFaceNbrMagneticField(perBGrid, technicalGrid, i, j, k, component); } else { From 0dc66f31b657d3b17f8818b30387a951db1a60fb Mon Sep 17 00:00:00 2001 From: ipryakem Date: Tue, 19 Nov 2019 14:01:07 +0100 Subject: [PATCH 586/602] Implemented averaging of vdf and moments over all close neighbours. --- sysboundary/ionosphere.cpp | 7 +++-- sysboundary/sysboundarycondition.cpp | 40 ++++++++++++++++++++++++++-- sysboundary/sysboundarycondition.h | 10 +++++++ 3 files changed, 51 insertions(+), 6 deletions(-) diff --git a/sysboundary/ionosphere.cpp b/sysboundary/ionosphere.cpp index 58e469e40..91bda83b1 100644 --- a/sysboundary/ionosphere.cpp +++ b/sysboundary/ionosphere.cpp @@ -680,10 +680,9 @@ namespace SBC { const CellID& cellID, const uint popID ) { -// phiprof::start("vlasovBoundaryCondition (Ionosphere)"); -// const SpatialCell * cell = mpiGrid[cellID]; -// this->vlasovBoundaryCopyFromAllClosestNbrs(mpiGrid, cellID); -// phiprof::stop("vlasovBoundaryCondition (Ionosphere)"); + phiprof::start("vlasovBoundaryCondition (Ionosphere)"); + this->vlasovBoundaryCopyFromAllCloseNbrs(mpiGrid, cellID, popID); + phiprof::stop("vlasovBoundaryCondition (Ionosphere)"); } /** diff --git a/sysboundary/sysboundarycondition.cpp b/sysboundary/sysboundarycondition.cpp index 9f31e8e70..6f479cfed 100644 --- a/sysboundary/sysboundarycondition.cpp +++ b/sysboundary/sysboundarycondition.cpp @@ -249,7 +249,24 @@ namespace SBC { cerr << __FILE__ << ":" << __LINE__ << ": No closest cell found!" << endl; abort(); } - averageCellData(mpiGrid, closestCells, mpiGrid[cellID],popID); + averageCellData(mpiGrid, closestCells, mpiGrid[cellID], popID); + } + + /*! Function used to average and copy the distribution and moments from all the close sysboundarytype::NOT_SYSBOUNDARY cells. + * \param mpiGrid Grid + * \param cellID The cell's ID. + */ + void SysBoundaryCondition::vlasovBoundaryCopyFromAllCloseNbrs( + const dccrg::Dccrg& mpiGrid, + const CellID& cellID,const uint popID + ) { + const std::vector closeCells = getAllCloseNonsysboundaryCells(cellID); + + if(closeCells[0] == INVALID_CELLID) { + cerr << __FILE__ << ":" << __LINE__ << ": No close cell found!" << endl; + abort(); + } + averageCellData(mpiGrid, closeCells, mpiGrid[cellID], popID); } /*! Function used to copy the distribution from (one of) the closest sysboundarytype::NOT_SYSBOUNDARY cell but limiting to values no higher than where it can flow into. Moments are recomputed. @@ -419,7 +436,6 @@ namespace SBC { for (size_t i=0; iparameters[CellParams::RHOM_DT2] += factor*incomingCell->parameters[CellParams::RHOM_DT2]; to->parameters[CellParams::VX_DT2] += factor*incomingCell->parameters[CellParams::VX_DT2]; @@ -630,6 +646,8 @@ namespace SBC { const CellID cellId = *it; std::vector & closestCells = allClosestNonsysboundaryCells[cellId]; closestCells.clear(); + std::vector & closeCells = allCloseNonsysboundaryCells[cellId]; + closeCells.clear(); std::array & flowtoCells = allFlowtoCells[cellId]; flowtoCells.fill(NULL); uint dist = numeric_limits::max(); @@ -650,6 +668,12 @@ namespace SBC { if(d2 < 4 && i != 0 && j != 0 && k != 0) { flowtoCells.at(i + 3*j + 9*k + 13) = mpiGrid[cell]; } + if(mpiGrid[cellId]->sysBoundaryLayer == 1 && abs(i) < 2 && abs(j) < 2 && abs(k) < 2) { + closeCells.push_back(cell); + } + if(mpiGrid[cellId]->sysBoundaryLayer == 2) { + closeCells.push_back(cell); + } } } } @@ -668,6 +692,7 @@ namespace SBC { } } if(closestCells.size() == 0) closestCells.push_back(INVALID_CELLID); + if(closeCells.size() == 0) closeCells.push_back(INVALID_CELLID); } return true; } @@ -757,6 +782,17 @@ namespace SBC { return closestCells; } + /*! Get the cellIDs of all the close cells of type NOT_SYSBOUNDARY. + * \param cellID ID of the cell to start look from. + * \return The vector of cell indices of those cells + */ + std::vector & SysBoundaryCondition::getAllCloseNonsysboundaryCells( + const CellID& cellID + ) { + std::vector & closeCells = allCloseNonsysboundaryCells.at(cellID); + return closeCells; + } + /*! Get the cellIDs of all flowto cells (cells into which the velocity distribution can flow and which is of type NOT_SYSBOUNDARY). * \param cellID ID of the cell to start look from. * \return The vector of cell indices of those cells diff --git a/sysboundary/sysboundarycondition.h b/sysboundary/sysboundarycondition.h index e8528af71..180343583 100644 --- a/sysboundary/sysboundarycondition.h +++ b/sysboundary/sysboundarycondition.h @@ -222,6 +222,11 @@ namespace SBC { const CellID& cellID, const uint popID ); + void vlasovBoundaryCopyFromAllCloseNbrs( + const dccrg::Dccrg& mpiGrid, + const CellID& cellID, + const uint popID + ); void vlasovBoundaryReflect( const dccrg::Dccrg& mpiGrid, const CellID& cellID, @@ -257,6 +262,9 @@ namespace SBC { std::vector & getAllClosestNonsysboundaryCells( const CellID& cellID ); + std::vector & getAllCloseNonsysboundaryCells( + const CellID& cellID + ); Real fieldBoundaryCopyFromExistingFaceNbrMagneticField( FsGrid< std::array, 2> & perBGrid, FsGrid< fsgrids::technical, 2> & technicalGrid, @@ -274,6 +282,8 @@ namespace SBC { bool isPeriodic[3]; /*! Map of closest nonsysboundarycells. Used in getAllClosestNonsysboundaryCells. */ std::unordered_map> allClosestNonsysboundaryCells; + /*! Map of close nonsysboundarycells. Used in getAllCloseNonsysboundaryCells. */ + std::unordered_map> allCloseNonsysboundaryCells; /*! Array of cells into which the distribution function can flow. Used in getAllFlowtoCells. Cells into which one cannot flow are set to INVALID_CELLID. */ std::unordered_map> allFlowtoCells; From f2fb1090488a433e64e75ebe3e6efbb5f8f3e957 Mon Sep 17 00:00:00 2001 From: ipryakem Date: Tue, 19 Nov 2019 14:54:06 +0100 Subject: [PATCH 587/602] Implemented ionosphere fluffiness. 0 is stiff (preserve state), 1 is only copying the average of the neighbours. Per population of course. --- sysboundary/ionosphere.cpp | 7 +- sysboundary/ionosphere.h | 1 + sysboundary/sysboundarycondition.cpp | 162 ++++++++++++++------------- sysboundary/sysboundarycondition.h | 8 +- 4 files changed, 94 insertions(+), 84 deletions(-) diff --git a/sysboundary/ionosphere.cpp b/sysboundary/ionosphere.cpp index 91bda83b1..0c6af0c3e 100644 --- a/sysboundary/ionosphere.cpp +++ b/sysboundary/ionosphere.cpp @@ -67,6 +67,7 @@ namespace SBC { Readparameters::add(pop + "_ionosphere.VX0", "Bulk velocity of ionospheric distribution function in X direction (m/s)", 0.0); Readparameters::add(pop + "_ionosphere.VY0", "Bulk velocity of ionospheric distribution function in X direction (m/s)", 0.0); Readparameters::add(pop + "_ionosphere.VZ0", "Bulk velocity of ionospheric distribution function in X direction (m/s)", 0.0); + Readparameters::add(pop + "_ionosphere.fluffiness", "Weight of boundary (0) vs. average of NOT_SYSBOUNDARY neighbor's (1) moments and velocity distribution.", 0); } } @@ -127,6 +128,10 @@ namespace SBC { if(myRank == MASTER_RANK) cerr << __FILE__ << ":" << __LINE__ << " ERROR: This option has not been added for population " << pop << "!" << endl; exit(1); } + if(!Readparameters::get(pop + "_ionosphere.fluffiness", sP.fluffiness)) { + if(myRank == MASTER_RANK) cerr << __FILE__ << ":" << __LINE__ << " ERROR: This option has not been added for population " << pop << "!" << endl; + exit(1); + } if(!Readparameters::get(pop + "_Magnetosphere.T", sP.T)) { if(myRank == MASTER_RANK) cerr << __FILE__ << ":" << __LINE__ << " ERROR: This option has not been added for population " << pop << "!" << endl; exit(1); @@ -681,7 +686,7 @@ namespace SBC { const uint popID ) { phiprof::start("vlasovBoundaryCondition (Ionosphere)"); - this->vlasovBoundaryCopyFromAllCloseNbrs(mpiGrid, cellID, popID); + this->vlasovBoundaryFluffyCopyFromAllCloseNbrs(mpiGrid, cellID, popID, this->speciesParams[popID].fluffiness); phiprof::stop("vlasovBoundaryCondition (Ionosphere)"); } diff --git a/sysboundary/ionosphere.h b/sysboundary/ionosphere.h index 2922b4a52..e49ca050d 100644 --- a/sysboundary/ionosphere.h +++ b/sysboundary/ionosphere.h @@ -38,6 +38,7 @@ namespace SBC { Real rho; Real V0[3]; Real T; + Real fluffiness; uint nSpaceSamples; uint nVelocitySamples; }; diff --git a/sysboundary/sysboundarycondition.cpp b/sysboundary/sysboundarycondition.cpp index 6f479cfed..8cb497013 100644 --- a/sysboundary/sysboundarycondition.cpp +++ b/sysboundary/sysboundarycondition.cpp @@ -256,9 +256,9 @@ namespace SBC { * \param mpiGrid Grid * \param cellID The cell's ID. */ - void SysBoundaryCondition::vlasovBoundaryCopyFromAllCloseNbrs( + void SysBoundaryCondition::vlasovBoundaryFluffyCopyFromAllCloseNbrs( const dccrg::Dccrg& mpiGrid, - const CellID& cellID,const uint popID + const CellID& cellID,const uint popID,creal fluffiness ) { const std::vector closeCells = getAllCloseNonsysboundaryCells(cellID); @@ -266,7 +266,7 @@ namespace SBC { cerr << __FILE__ << ":" << __LINE__ << ": No close cell found!" << endl; abort(); } - averageCellData(mpiGrid, closeCells, mpiGrid[cellID], popID); + averageCellData(mpiGrid, closeCells, mpiGrid[cellID], popID, fluffiness); } /*! Function used to copy the distribution from (one of) the closest sysboundarytype::NOT_SYSBOUNDARY cell but limiting to values no higher than where it can flow into. Moments are recomputed. @@ -405,91 +405,93 @@ namespace SBC { const dccrg::Dccrg& mpiGrid, const std::vector cellList, SpatialCell *to, - const uint popID + const uint popID, + creal fluffiness /* default =0.0*/ ) { const size_t numberOfCells = cellList.size(); - if(numberOfCells == 1) { - copyCellData(mpiGrid[cellList[0]], to, true, false, popID); - } else { - creal factor = 1.0 / convert(numberOfCells); + creal factor = fluffiness / convert(numberOfCells); - if (popID == 0) { - to->parameters[CellParams::RHOM_DT2] = 0.0; - to->parameters[CellParams::VX_DT2] = 0.0; - to->parameters[CellParams::VY_DT2] = 0.0; - to->parameters[CellParams::VZ_DT2] = 0.0; - to->parameters[CellParams::RHOQ_DT2] = 0.0; - to->parameters[CellParams::P_11_DT2] = 0.0; - to->parameters[CellParams::P_22_DT2] = 0.0; - to->parameters[CellParams::P_33_DT2] = 0.0; - to->parameters[CellParams::RHOM] = 0.0; - to->parameters[CellParams::VX] = 0.0; - to->parameters[CellParams::VY] = 0.0; - to->parameters[CellParams::VZ] = 0.0; - to->parameters[CellParams::RHOQ] = 0.0; - to->parameters[CellParams::P_11] = 0.0; - to->parameters[CellParams::P_22] = 0.0; - to->parameters[CellParams::P_33] = 0.0; - } - to->clear(popID); - - for (size_t i=0; iparameters[CellParams::RHOM_DT2] *= 1.0 - fluffiness; + to->parameters[CellParams::VX_DT2] *= 1.0 - fluffiness; + to->parameters[CellParams::VY_DT2] *= 1.0 - fluffiness; + to->parameters[CellParams::VZ_DT2] *= 1.0 - fluffiness; + to->parameters[CellParams::RHOQ_DT2] *= 1.0 - fluffiness; + to->parameters[CellParams::P_11_DT2] *= 1.0 - fluffiness; + to->parameters[CellParams::P_22_DT2] *= 1.0 - fluffiness; + to->parameters[CellParams::P_33_DT2] *= 1.0 - fluffiness; + to->parameters[CellParams::RHOM] *= 1.0 - fluffiness; + to->parameters[CellParams::VX] *= 1.0 - fluffiness; + to->parameters[CellParams::VY] *= 1.0 - fluffiness; + to->parameters[CellParams::VZ] *= 1.0 - fluffiness; + to->parameters[CellParams::RHOQ] *= 1.0 - fluffiness; + to->parameters[CellParams::P_11] *= 1.0 - fluffiness; + to->parameters[CellParams::P_22] *= 1.0 - fluffiness; + to->parameters[CellParams::P_33] *= 1.0 - fluffiness; + } + + if (to->sysBoundaryLayer == 1) { + // Rescale own vspace + const Realf* toData = to->get_data(popID); + for (vmesh::LocalID toBlockLID=0; toBlockLIDget_number_of_velocity_blocks(popID); ++toBlockLID) { + // Pointer to target block data + Realf* toData = to->get_data(toBlockLID,popID); - if (popID == 0) { - to->parameters[CellParams::RHOM_DT2] += factor*incomingCell->parameters[CellParams::RHOM_DT2]; - to->parameters[CellParams::VX_DT2] += factor*incomingCell->parameters[CellParams::VX_DT2]; - to->parameters[CellParams::VY_DT2] += factor*incomingCell->parameters[CellParams::VY_DT2]; - to->parameters[CellParams::VZ_DT2] += factor*incomingCell->parameters[CellParams::VZ_DT2]; - to->parameters[CellParams::RHOQ_DT2] += factor*incomingCell->parameters[CellParams::RHOQ_DT2]; - to->parameters[CellParams::P_11_DT2] += factor*incomingCell->parameters[CellParams::P_11_DT2]; - to->parameters[CellParams::P_22_DT2] += factor*incomingCell->parameters[CellParams::P_22_DT2]; - to->parameters[CellParams::P_33_DT2] += factor*incomingCell->parameters[CellParams::P_33_DT2]; - to->parameters[CellParams::RHOM] += factor*incomingCell->parameters[CellParams::RHOM]; - to->parameters[CellParams::VX] += factor*incomingCell->parameters[CellParams::VX]; - to->parameters[CellParams::VY] += factor*incomingCell->parameters[CellParams::VY]; - to->parameters[CellParams::VZ] += factor*incomingCell->parameters[CellParams::VZ]; - to->parameters[CellParams::RHOQ] += factor*incomingCell->parameters[CellParams::RHOQ]; - to->parameters[CellParams::P_11] += factor*incomingCell->parameters[CellParams::P_11]; - to->parameters[CellParams::P_22] += factor*incomingCell->parameters[CellParams::P_22]; - to->parameters[CellParams::P_33] += factor*incomingCell->parameters[CellParams::P_33]; + // Add values from source cells + for (uint kc=0; kcparameters[CellParams::RHOM_DT2] += factor*incomingCell->parameters[CellParams::RHOM_DT2]; + to->parameters[CellParams::VX_DT2] += factor*incomingCell->parameters[CellParams::VX_DT2]; + to->parameters[CellParams::VY_DT2] += factor*incomingCell->parameters[CellParams::VY_DT2]; + to->parameters[CellParams::VZ_DT2] += factor*incomingCell->parameters[CellParams::VZ_DT2]; + to->parameters[CellParams::RHOQ_DT2] += factor*incomingCell->parameters[CellParams::RHOQ_DT2]; + to->parameters[CellParams::P_11_DT2] += factor*incomingCell->parameters[CellParams::P_11_DT2]; + to->parameters[CellParams::P_22_DT2] += factor*incomingCell->parameters[CellParams::P_22_DT2]; + to->parameters[CellParams::P_33_DT2] += factor*incomingCell->parameters[CellParams::P_33_DT2]; + to->parameters[CellParams::RHOM] += factor*incomingCell->parameters[CellParams::RHOM]; + to->parameters[CellParams::VX] += factor*incomingCell->parameters[CellParams::VX]; + to->parameters[CellParams::VY] += factor*incomingCell->parameters[CellParams::VY]; + to->parameters[CellParams::VZ] += factor*incomingCell->parameters[CellParams::VZ]; + to->parameters[CellParams::RHOQ] += factor*incomingCell->parameters[CellParams::RHOQ]; + to->parameters[CellParams::P_11] += factor*incomingCell->parameters[CellParams::P_11]; + to->parameters[CellParams::P_22] += factor*incomingCell->parameters[CellParams::P_22]; + to->parameters[CellParams::P_33] += factor*incomingCell->parameters[CellParams::P_33]; + } - // Do this only for the first layer, the other layers do not need this. - if (to->sysBoundaryLayer != 1) continue; + // Do this only for the first layer, the other layers do not need this. + if (to->sysBoundaryLayer != 1) continue; - const Real* blockParameters = incomingCell->get_block_parameters(popID); - const Realf* fromData = incomingCell->get_data(popID); - for (vmesh::LocalID incBlockLID=0; incBlockLIDget_number_of_velocity_blocks(popID); ++incBlockLID) { - // Check where cells are - creal vxBlock = blockParameters[BlockParams::VXCRD]; - creal vyBlock = blockParameters[BlockParams::VYCRD]; - creal vzBlock = blockParameters[BlockParams::VZCRD]; - creal dvxCell = blockParameters[BlockParams::DVX]; - creal dvyCell = blockParameters[BlockParams::DVY]; - creal dvzCell = blockParameters[BlockParams::DVZ]; - - // Global ID of the block containing incoming data - vmesh::GlobalID incBlockGID = incomingCell->get_velocity_block_global_id(incBlockLID,popID); - - // Get local ID of the target block. If the block doesn't exist, create it. - vmesh::GlobalID toBlockLID = to->get_velocity_block_local_id(incBlockGID,popID); - if (toBlockLID == SpatialCell::invalid_local_id()) { - to->add_velocity_block(incBlockGID,popID); - toBlockLID = to->get_velocity_block_local_id(incBlockGID,popID); - } - - // Pointer to target block data - Realf* toData = to->get_data(toBlockLID,popID); + const Realf* fromData = incomingCell->get_data(popID); + for (vmesh::LocalID incBlockLID=0; incBlockLIDget_number_of_velocity_blocks(popID); ++incBlockLID) { + // Global ID of the block containing incoming data + vmesh::GlobalID incBlockGID = incomingCell->get_velocity_block_global_id(incBlockLID,popID); + + // Get local ID of the target block. If the block doesn't exist, create it. + vmesh::GlobalID toBlockLID = to->get_velocity_block_local_id(incBlockGID,popID); + if (toBlockLID == SpatialCell::invalid_local_id()) { + to->add_velocity_block(incBlockGID,popID); + toBlockLID = to->get_velocity_block_local_id(incBlockGID,popID); + } + + // Pointer to target block data + Realf* toData = to->get_data(toBlockLID,popID); - // Add values from source cells - for (uint kc=0; kc& mpiGrid, std::vector cellList, SpatialCell *to, - const uint popID + const uint popID, + creal fluffiness = 0 ); std::array & getFlowtoCells( const CellID& cellID @@ -222,10 +223,11 @@ namespace SBC { const CellID& cellID, const uint popID ); - void vlasovBoundaryCopyFromAllCloseNbrs( + void vlasovBoundaryFluffyCopyFromAllCloseNbrs( const dccrg::Dccrg& mpiGrid, const CellID& cellID, - const uint popID + const uint popID, + creal fluffiness ); void vlasovBoundaryReflect( const dccrg::Dccrg& mpiGrid, From 5836b20dfa4965d20db5b18c34ae563ed1041fc2 Mon Sep 17 00:00:00 2001 From: ipryakem Date: Wed, 20 Nov 2019 09:03:50 +0100 Subject: [PATCH 588/602] Debug a06cdd, layer 2 and above don't need to check for neighbour boundary flag. --- fieldsolver/ldz_magnetic_field.cpp | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/fieldsolver/ldz_magnetic_field.cpp b/fieldsolver/ldz_magnetic_field.cpp index efdfa0789..526748ca8 100644 --- a/fieldsolver/ldz_magnetic_field.cpp +++ b/fieldsolver/ldz_magnetic_field.cpp @@ -194,13 +194,17 @@ void propagateSysBoundaryMagneticField( cuint sysBoundaryLayer = technicalGrid.get(i,j,k)->sysBoundaryLayer; for (uint component = 0; component < 3; component++) { - cint neigh_i=i + ((component==0)?-1:0); - cint neigh_j=j + ((component==1)?-1:0); - cint neigh_k=k + ((component==2)?-1:0); - cuint neighborSysBoundaryFlag = technicalGrid.get(neigh_i, neigh_j, neigh_k)->sysBoundaryFlag; - - if (neighborSysBoundaryFlag != sysboundarytype::NOT_SYSBOUNDARY) { // Complement to propagateMagneticFieldSimple main loop + if (sysBoundaryFlag != 1) { bGrid->at(fsgrids::bfield::PERBX + component) = sysBoundaries.getSysBoundary(sysBoundaryFlag)->fieldSolverBoundaryCondMagneticField(perBGrid, perBDt2Grid, EGrid, EDt2Grid, technicalGrid, i, j, k, dt, RKCase, component); + } else { + cint neigh_i=i + ((component==0)?-1:0); + cint neigh_j=j + ((component==1)?-1:0); + cint neigh_k=k + ((component==2)?-1:0); + uint neighborSysBoundaryFlag = technicalGrid.get(neigh_i, neigh_j, neigh_k)->sysBoundaryFlag; + + if (neighborSysBoundaryFlag != sysboundarytype::NOT_SYSBOUNDARY) { // Complement to propagateMagneticFieldSimple main loop + bGrid->at(fsgrids::bfield::PERBX + component) = sysBoundaries.getSysBoundary(sysBoundaryFlag)->fieldSolverBoundaryCondMagneticField(perBGrid, perBDt2Grid, EGrid, EDt2Grid, technicalGrid, i, j, k, dt, RKCase, component); + } } } } From d845d53a056a1353f94922586dee0e2e2239344d Mon Sep 17 00:00:00 2001 From: ipryakem Date: Wed, 20 Nov 2019 09:49:55 +0100 Subject: [PATCH 589/602] Fixed ionosphere copying of layer 2 B. --- sysboundary/ionosphere.cpp | 23 +++++++++++++++++------ 1 file changed, 17 insertions(+), 6 deletions(-) diff --git a/sysboundary/ionosphere.cpp b/sysboundary/ionosphere.cpp index 0c6af0c3e..b9d181a6e 100644 --- a/sysboundary/ionosphere.cpp +++ b/sysboundary/ionosphere.cpp @@ -588,20 +588,31 @@ namespace SBC { return (averageB[0]+averageB[1]+averageB[2])*normalDirection[component]; ***/ - // Copy each face B-field from the cell on the other side of it + // Copy each face B-field from the cell in the simulation cell direction + // NOTE This might misbehave if OUTFLOW crosses IONOSPHERE, which was supported prior to 201911 but was never really used. switch(component) { case 0: - return bGrid->get(i-1,j,k)->at(fsgrids::bfield::PERBX + component); + if (technicalGrid.get(i-1,j,k)->sysBoundaryLayer==1) { + return bGrid->get(i-1,j,k)->at(fsgrids::bfield::PERBX + component); + } else { // Is sysboundarylayer 2, read from opposite direction + return bGrid->get(i+1,j,k)->at(fsgrids::bfield::PERBX + component); + } case 1: - return bGrid->get(i,j-1,k)->at(fsgrids::bfield::PERBX + component); + if (technicalGrid.get(i,j-1,k)->sysBoundaryLayer==1) { + return bGrid->get(i,j-1,k)->at(fsgrids::bfield::PERBX + component); + } else { // Is sysboundarylayer 2, read from opposite direction + return bGrid->get(i,j+1,k)->at(fsgrids::bfield::PERBX + component); + } case 2: - return bGrid->get(i,j,k-1)->at(fsgrids::bfield::PERBX + component); + if (technicalGrid.get(i,j,k-1)->sysBoundaryLayer==1) { + return bGrid->get(i,j,k-1)->at(fsgrids::bfield::PERBX + component); + } else { // Is sysboundarylayer 2, read from opposite direction + return bGrid->get(i,j,k+1)->at(fsgrids::bfield::PERBX + component); + } default: cerr << "ERROR: ionosphere boundary tried to copy nonsensical magnetic field component " << component << endl; return 0.0; } - - } void Ionosphere::fieldSolverBoundaryCondElectricField( From 2f05d034f33228cf2236cd8050255da49a1cedeb Mon Sep 17 00:00:00 2001 From: ipryakem Date: Wed, 20 Nov 2019 11:03:40 +0100 Subject: [PATCH 590/602] One flag -> layer bug and streamlining of the code suggested by Markus. --- fieldsolver/ldz_magnetic_field.cpp | 29 ++++++----------------------- 1 file changed, 6 insertions(+), 23 deletions(-) diff --git a/fieldsolver/ldz_magnetic_field.cpp b/fieldsolver/ldz_magnetic_field.cpp index 526748ca8..9d0be6873 100644 --- a/fieldsolver/ldz_magnetic_field.cpp +++ b/fieldsolver/ldz_magnetic_field.cpp @@ -194,7 +194,7 @@ void propagateSysBoundaryMagneticField( cuint sysBoundaryLayer = technicalGrid.get(i,j,k)->sysBoundaryLayer; for (uint component = 0; component < 3; component++) { - if (sysBoundaryFlag != 1) { + if (sysBoundaryLayer != 1) { bGrid->at(fsgrids::bfield::PERBX + component) = sysBoundaries.getSysBoundary(sysBoundaryFlag)->fieldSolverBoundaryCondMagneticField(perBGrid, perBDt2Grid, EGrid, EDt2Grid, technicalGrid, i, j, k, dt, RKCase, component); } else { cint neigh_i=i + ((component==0)?-1:0); @@ -261,28 +261,11 @@ void propagateMagneticFieldSimple( // fields normally here. cuint sysBoundaryLayer = technicalGrid.get(i,j,k)->sysBoundaryLayer; if(sysBoundaryLayer == 1) { - for (uint component = 0; component < 3; component++) { - cint neigh_i=i + ((component==0)?-1:0); - cint neigh_j=j + ((component==1)?-1:0); - cint neigh_k=k + ((component==2)?-1:0); - cuint neighborSysBoundaryFlag = technicalGrid.get(neigh_i, neigh_j, neigh_k)->sysBoundaryFlag; - - if (neighborSysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY) { // Complement to propagateSysBoundaryMagneticField - switch(component) { - case 0: - propagateMagneticField(perBGrid, perBDt2Grid, EGrid, EDt2Grid, i, j, k, dt, RKCase, true, false, false); - break; - case 1: - propagateMagneticField(perBGrid, perBDt2Grid, EGrid, EDt2Grid, i, j, k, dt, RKCase, false, true, false); - break; - case 2: - propagateMagneticField(perBGrid, perBDt2Grid, EGrid, EDt2Grid, i, j, k, dt, RKCase, false, false, true); - break; - default: - cerr << "ERROR: ionosphere boundary tried to propagate nonsensical magnetic field component " << component << endl; - break; - } - } + bool prop_x = (technicalGrid.get(i-1,j,k)->sysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY); + bool prop_y = (technicalGrid.get(i,j-1,k)->sysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY); + bool prop_z = (technicalGrid.get(i,j,k-1)->sysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY); + if (prop_x || prop_y || prop_z) { + propagateMagneticField(perBGrid, perBDt2Grid, EGrid, EDt2Grid, i, j, k, dt, RKCase, prop_x, prop_y, prop_z); } } } From d3b7c99af249ed39c6594d8ff9352db65358a4ef Mon Sep 17 00:00:00 2001 From: ipryakem Date: Wed, 20 Nov 2019 13:50:55 +0100 Subject: [PATCH 591/602] Eihen/Makefile modifications for HazelHen. --- MAKE/Makefile.hornet_gcc | 2 +- vlasovsolver/cpu_acc_intersections.cpp | 2 +- vlasovsolver/cpu_acc_intersections.hpp | 2 +- vlasovsolver/cpu_acc_semilag.cpp | 4 ++-- vlasovsolver/cpu_acc_transform.hpp | 4 ++-- 5 files changed, 7 insertions(+), 7 deletions(-) diff --git a/MAKE/Makefile.hornet_gcc b/MAKE/Makefile.hornet_gcc index 17b7d67fd..4a4418332 100644 --- a/MAKE/Makefile.hornet_gcc +++ b/MAKE/Makefile.hornet_gcc @@ -79,7 +79,7 @@ INC_PROFILE = -I$(LIBRARY_PREFIX)/mpich2/$(MPT_VERSION)/$(CC_BRAND)/$(CC_BRAND_V #header libraries -INC_EIGEN = -I$(LIBRARY_PREFIX)/Eigen/ +INC_EIGEN = -I$(LIBRARY_PREFIX)/ INC_DCCRG = -I$(LIBRARY_PREFIX)/dccrg/ INC_FSGRID = -I$(LIBRARY_PREFIX)/fsgrid/ INC_VECTORCLASS = -I$(LIBRARY_PREFIX)/vectorclass-version1 diff --git a/vlasovsolver/cpu_acc_intersections.cpp b/vlasovsolver/cpu_acc_intersections.cpp index fa41ff856..b0ae43385 100644 --- a/vlasovsolver/cpu_acc_intersections.cpp +++ b/vlasovsolver/cpu_acc_intersections.cpp @@ -29,7 +29,7 @@ //#include //#include -#include //Eigen +#include #include "../common.h" #include "../spatial_cell.hpp" diff --git a/vlasovsolver/cpu_acc_intersections.hpp b/vlasovsolver/cpu_acc_intersections.hpp index 2e2c5ce18..19d3cbf5f 100644 --- a/vlasovsolver/cpu_acc_intersections.hpp +++ b/vlasovsolver/cpu_acc_intersections.hpp @@ -22,7 +22,7 @@ #ifndef CPU_ACC_INTERSECTIONS_H #define CPU_ACC_INTERSECTIONS_H -#include // Eigen +#include #include "../definitions.h" #include "../spatial_cell.hpp" diff --git a/vlasovsolver/cpu_acc_semilag.cpp b/vlasovsolver/cpu_acc_semilag.cpp index 524497178..ba0bdaa8f 100644 --- a/vlasovsolver/cpu_acc_semilag.cpp +++ b/vlasovsolver/cpu_acc_semilag.cpp @@ -24,8 +24,8 @@ #include #include -#include // Eigen -#include // Eigen +#include +#include #include "cpu_acc_semilag.hpp" #include "cpu_acc_transform.hpp" diff --git a/vlasovsolver/cpu_acc_transform.hpp b/vlasovsolver/cpu_acc_transform.hpp index e6f92d87e..ba2d3614f 100644 --- a/vlasovsolver/cpu_acc_transform.hpp +++ b/vlasovsolver/cpu_acc_transform.hpp @@ -23,8 +23,8 @@ #ifndef CPU_ACC_TRANSFORM_H #define CPU_ACC_TRANSFORM_H -#include // Eigen -#include // Eigen +#include +#include #include "../common.h" #include "../spatial_cell.hpp" From ef3955c76b8c9a5d76f2c7b52f49ee9cab71c8c4 Mon Sep 17 00:00:00 2001 From: ipryakem Date: Wed, 20 Nov 2019 14:47:35 +0100 Subject: [PATCH 592/602] Rehaul of moment computation at system boundaries. Aim: boundaries set vdf and moments _R and _V, so that they can be used to compute moments and _DT2 moments. --- grid.cpp | 2 +- sysboundary/donotcompute.h | 3 +- sysboundary/ionosphere.cpp | 31 +++++--- sysboundary/ionosphere.h | 3 +- sysboundary/outflow.cpp | 9 ++- sysboundary/outflow.h | 3 +- sysboundary/setbyuser.cpp | 6 +- sysboundary/setbyuser.h | 3 +- sysboundary/setmaxwellian.cpp | 24 ++++-- sysboundary/sysboundary.cpp | 18 ++++- sysboundary/sysboundary.h | 2 +- sysboundary/sysboundarycondition.cpp | 111 +++++++++------------------ sysboundary/sysboundarycondition.h | 10 ++- vlasiator.cpp | 34 ++++---- vlasovsolver/cpu_moments.cpp | 24 +++++- vlasovsolver/vlasovmover.cpp | 34 ++++---- 16 files changed, 169 insertions(+), 148 deletions(-) diff --git a/grid.cpp b/grid.cpp index 962a37178..0bd98831a 100644 --- a/grid.cpp +++ b/grid.cpp @@ -282,7 +282,7 @@ void initializeGrids( if (P::isRestart == false) { // Apply boundary conditions so that we get correct initial moments - sysBoundaries.applySysBoundaryVlasovConditions(mpiGrid,Parameters::t); + sysBoundaries.applySysBoundaryVlasovConditions(mpiGrid,Parameters::t, true); // It doesn't matter here whether we put _R or _V moments //compute moments, and set them in RHO* and RHO_*_DT2. If restart, they are already read in phiprof::start("Init moments"); diff --git a/sysboundary/donotcompute.h b/sysboundary/donotcompute.h index c72a0b870..376298e36 100644 --- a/sysboundary/donotcompute.h +++ b/sysboundary/donotcompute.h @@ -112,7 +112,8 @@ namespace SBC { virtual void vlasovBoundaryCondition( const dccrg::Dccrg& mpiGrid, const CellID& cellID, - const uint popID + const uint popID, + const bool calculate_V_moments ) { std::cerr << "ERROR: DoNotCompute::vlasovBoundaryCondition called!" << std::endl;} }; } diff --git a/sysboundary/ionosphere.cpp b/sysboundary/ionosphere.cpp index b9d181a6e..0ce5e0f45 100644 --- a/sysboundary/ionosphere.cpp +++ b/sysboundary/ionosphere.cpp @@ -694,7 +694,8 @@ namespace SBC { void Ionosphere::vlasovBoundaryCondition( const dccrg::Dccrg& mpiGrid, const CellID& cellID, - const uint popID + const uint popID, + const bool calculate_V_moments ) { phiprof::start("vlasovBoundaryCondition (Ionosphere)"); this->vlasovBoundaryFluffyCopyFromAllCloseNbrs(mpiGrid, cellID, popID, this->speciesParams[popID].fluffiness); @@ -783,14 +784,22 @@ namespace SBC { calculateCellMoments(&templateCell,true,true); // WARNING Time-independence assumed here. Normal moments computed in setProjectCell - templateCell.parameters[CellParams::RHOM_DT2] = templateCell.parameters[CellParams::RHOM]; - templateCell.parameters[CellParams::VX_DT2] = templateCell.parameters[CellParams::VX]; - templateCell.parameters[CellParams::VY_DT2] = templateCell.parameters[CellParams::VY]; - templateCell.parameters[CellParams::VZ_DT2] = templateCell.parameters[CellParams::VZ]; - templateCell.parameters[CellParams::RHOQ_DT2] = templateCell.parameters[CellParams::RHOQ]; - templateCell.parameters[CellParams::P_11_DT2] = templateCell.parameters[CellParams::P_11]; - templateCell.parameters[CellParams::P_22_DT2] = templateCell.parameters[CellParams::P_22]; - templateCell.parameters[CellParams::P_33_DT2] = templateCell.parameters[CellParams::P_33]; + templateCell.parameters[CellParams::RHOM_R] = templateCell.parameters[CellParams::RHOM]; + templateCell.parameters[CellParams::VX_R] = templateCell.parameters[CellParams::VX]; + templateCell.parameters[CellParams::VY_R] = templateCell.parameters[CellParams::VY]; + templateCell.parameters[CellParams::VZ_R] = templateCell.parameters[CellParams::VZ]; + templateCell.parameters[CellParams::RHOQ_R] = templateCell.parameters[CellParams::RHOQ]; + templateCell.parameters[CellParams::P_11_R] = templateCell.parameters[CellParams::P_11]; + templateCell.parameters[CellParams::P_22_R] = templateCell.parameters[CellParams::P_22]; + templateCell.parameters[CellParams::P_33_R] = templateCell.parameters[CellParams::P_33]; + templateCell.parameters[CellParams::RHOM_V] = templateCell.parameters[CellParams::RHOM]; + templateCell.parameters[CellParams::VX_V] = templateCell.parameters[CellParams::VX]; + templateCell.parameters[CellParams::VY_V] = templateCell.parameters[CellParams::VY]; + templateCell.parameters[CellParams::VZ_V] = templateCell.parameters[CellParams::VZ]; + templateCell.parameters[CellParams::RHOQ_V] = templateCell.parameters[CellParams::RHOQ]; + templateCell.parameters[CellParams::P_11_V] = templateCell.parameters[CellParams::P_11]; + templateCell.parameters[CellParams::P_22_V] = templateCell.parameters[CellParams::P_22]; + templateCell.parameters[CellParams::P_33_V] = templateCell.parameters[CellParams::P_33]; } Real Ionosphere::shiftedMaxwellianDistribution( @@ -862,8 +871,8 @@ namespace SBC { } void Ionosphere::setCellFromTemplate(SpatialCell* cell,const uint popID) { - //Copy, and allow to change blocks - copyCellData(&templateCell,cell,true,false,popID); + copyCellData(&templateCell,cell,false,popID,true); // copy also vdf, _V + copyCellData(&templateCell,cell,true,popID,false); // don't copy vdf again but copy _R now } std::string Ionosphere::getName() const {return "Ionosphere";} diff --git a/sysboundary/ionosphere.h b/sysboundary/ionosphere.h index e49ca050d..4f7435b16 100644 --- a/sysboundary/ionosphere.h +++ b/sysboundary/ionosphere.h @@ -124,7 +124,8 @@ namespace SBC { virtual void vlasovBoundaryCondition( const dccrg::Dccrg& mpiGrid, const CellID& cellID, - const uint popID + const uint popID, + const bool calculate_V_moments ); virtual std::string getName() const; diff --git a/sysboundary/outflow.cpp b/sysboundary/outflow.cpp index f801791bf..a0d2a4dc2 100644 --- a/sysboundary/outflow.cpp +++ b/sysboundary/outflow.cpp @@ -437,7 +437,8 @@ namespace SBC { void Outflow::vlasovBoundaryCondition( const dccrg::Dccrg& mpiGrid, const CellID& cellID, - const uint popID + const uint popID, + const bool calculate_V_moments ) { // phiprof::start("vlasovBoundaryCondition (Outflow)"); @@ -461,16 +462,16 @@ namespace SBC { break; case vlasovscheme::COPY: if (cell->sysBoundaryLayer == 1) { - vlasovBoundaryCopyFromTheClosestNbr(mpiGrid,cellID,false,popID); + vlasovBoundaryCopyFromTheClosestNbr(mpiGrid,cellID,false,popID,calculate_V_moments); } else { - vlasovBoundaryCopyFromTheClosestNbr(mpiGrid,cellID,true,popID); + vlasovBoundaryCopyFromTheClosestNbr(mpiGrid,cellID,true,popID,calculate_V_moments); } break; case vlasovscheme::LIMIT: if (cell->sysBoundaryLayer == 1) { vlasovBoundaryCopyFromTheClosestNbrAndLimit(mpiGrid,cellID,popID); } else { - vlasovBoundaryCopyFromTheClosestNbr(mpiGrid,cellID,true,popID); + vlasovBoundaryCopyFromTheClosestNbr(mpiGrid,cellID,true,popID,calculate_V_moments); } break; default: diff --git a/sysboundary/outflow.h b/sysboundary/outflow.h index 2c6bb1249..eb8985010 100644 --- a/sysboundary/outflow.h +++ b/sysboundary/outflow.h @@ -123,7 +123,8 @@ namespace SBC { virtual void vlasovBoundaryCondition( const dccrg::Dccrg& mpiGrid, const CellID& cellID, - const uint popID + const uint popID, + const bool calculate_V_moments ); virtual void getFaces(bool* faces); diff --git a/sysboundary/setbyuser.cpp b/sysboundary/setbyuser.cpp index b97bfa268..8b2a392a5 100644 --- a/sysboundary/setbyuser.cpp +++ b/sysboundary/setbyuser.cpp @@ -267,7 +267,8 @@ namespace SBC { void SetByUser::vlasovBoundaryCondition( const dccrg::Dccrg& mpiGrid, const CellID& cellID, - const uint popID + const uint popID, + const bool calculate_V_moments ) { // No need to do anything in this function, as the propagators do not touch the distribution function } @@ -339,7 +340,8 @@ namespace SBC { for(uint i=0; i<6; i++) { if(facesToProcess[i] && isThisCellOnAFace[i]) { - copyCellData(&templateCells[i], cell,true,false,popID); + copyCellData(&templateCells[i], cell,false,popID,true); // copy also vdf, _V + copyCellData(&templateCells[i], cell,true,popID,false); // don't copy vdf again but copy _R now break; // This effectively sets the precedence of faces through the order of faces. } } diff --git a/sysboundary/setbyuser.h b/sysboundary/setbyuser.h index 2bd54c84f..b10962cf5 100644 --- a/sysboundary/setbyuser.h +++ b/sysboundary/setbyuser.h @@ -128,7 +128,8 @@ namespace SBC { virtual void vlasovBoundaryCondition( const dccrg::Dccrg& mpiGrid, const CellID& cellID, - const uint popID + const uint popID, + const bool calculate_V_moments ); virtual void getFaces(bool* faces); diff --git a/sysboundary/setmaxwellian.cpp b/sysboundary/setmaxwellian.cpp index c218970f3..e1719fc3a 100644 --- a/sysboundary/setmaxwellian.cpp +++ b/sysboundary/setmaxwellian.cpp @@ -313,14 +313,22 @@ namespace SBC { if(!this->isThisDynamic) { // WARNING Time-independence assumed here. - templateCell.parameters[CellParams::RHOM_DT2] = templateCell.parameters[CellParams::RHOM]; - templateCell.parameters[CellParams::VX_DT2] = templateCell.parameters[CellParams::VX]; - templateCell.parameters[CellParams::VY_DT2] = templateCell.parameters[CellParams::VY]; - templateCell.parameters[CellParams::VZ_DT2] = templateCell.parameters[CellParams::VZ]; - templateCell.parameters[CellParams::RHOQ_DT2] = templateCell.parameters[CellParams::RHOQ]; - templateCell.parameters[CellParams::P_11_DT2] = templateCell.parameters[CellParams::P_11]; - templateCell.parameters[CellParams::P_22_DT2] = templateCell.parameters[CellParams::P_22]; - templateCell.parameters[CellParams::P_33_DT2] = templateCell.parameters[CellParams::P_33]; + templateCell.parameters[CellParams::RHOM_R] = templateCell.parameters[CellParams::RHOM]; + templateCell.parameters[CellParams::VX_R] = templateCell.parameters[CellParams::VX]; + templateCell.parameters[CellParams::VY_R] = templateCell.parameters[CellParams::VY]; + templateCell.parameters[CellParams::VZ_R] = templateCell.parameters[CellParams::VZ]; + templateCell.parameters[CellParams::RHOQ_R] = templateCell.parameters[CellParams::RHOQ]; + templateCell.parameters[CellParams::P_11_R] = templateCell.parameters[CellParams::P_11]; + templateCell.parameters[CellParams::P_22_R] = templateCell.parameters[CellParams::P_22]; + templateCell.parameters[CellParams::P_33_R] = templateCell.parameters[CellParams::P_33]; + templateCell.parameters[CellParams::RHOM_V] = templateCell.parameters[CellParams::RHOM]; + templateCell.parameters[CellParams::VX_V] = templateCell.parameters[CellParams::VX]; + templateCell.parameters[CellParams::VY_V] = templateCell.parameters[CellParams::VY]; + templateCell.parameters[CellParams::VZ_V] = templateCell.parameters[CellParams::VZ]; + templateCell.parameters[CellParams::RHOQ_V] = templateCell.parameters[CellParams::RHOQ]; + templateCell.parameters[CellParams::P_11_V] = templateCell.parameters[CellParams::P_11]; + templateCell.parameters[CellParams::P_22_V] = templateCell.parameters[CellParams::P_22]; + templateCell.parameters[CellParams::P_33_V] = templateCell.parameters[CellParams::P_33]; } else { cerr << "ERROR: this is not dynamic in time, please code it!" << endl; abort(); diff --git a/sysboundary/sysboundary.cpp b/sysboundary/sysboundary.cpp index 64a6f75c1..90bdc65c1 100644 --- a/sysboundary/sysboundary.cpp +++ b/sysboundary/sysboundary.cpp @@ -29,6 +29,7 @@ #include "../grid.h" #include "../object_wrapper.h" +#include "../vlasovsolver/cpu_moments.h" #include "sysboundary.h" #include "donotcompute.h" @@ -574,7 +575,8 @@ bool SysBoundary::applyInitialState( */ void SysBoundary::applySysBoundaryVlasovConditions( dccrg::Dccrg& mpiGrid, - creal& t + creal& t, + const bool calculate_V_moments // if true, compute into _V, false into _R moments so that the interpolated ones can be done for layer 1 boundaries ) { if(sysBoundaries.size()==0) { return; //no system boundaries @@ -609,7 +611,12 @@ void SysBoundary::applySysBoundaryVlasovConditions( #pragma omp parallel for for (uint i=0; isysBoundaryFlag; - this->getSysBoundary(sysBoundaryType)->vlasovBoundaryCondition(mpiGrid,localCells[i],popID); + this->getSysBoundary(sysBoundaryType)->vlasovBoundaryCondition(mpiGrid,localCells[i],popID,calculate_V_moments); + } + if (calculate_V_moments) { + calculateMoments_V(mpiGrid, localCells, true); + } else { + calculateMoments_R(mpiGrid, localCells, true); } phiprof::stop(timer); @@ -626,7 +633,12 @@ void SysBoundary::applySysBoundaryVlasovConditions( #pragma omp parallel for for (uint i=0; isysBoundaryFlag; - this->getSysBoundary(sysBoundaryType)->vlasovBoundaryCondition(mpiGrid, boundaryCells[i],popID); + this->getSysBoundary(sysBoundaryType)->vlasovBoundaryCondition(mpiGrid, boundaryCells[i],popID,calculate_V_moments); + } + if (calculate_V_moments) { + calculateMoments_V(mpiGrid, boundaryCells, true); + } else { + calculateMoments_R(mpiGrid, boundaryCells, true); } phiprof::stop(timer); diff --git a/sysboundary/sysboundary.h b/sysboundary/sysboundary.h index ca94eb953..97bf1d8b4 100644 --- a/sysboundary/sysboundary.h +++ b/sysboundary/sysboundary.h @@ -75,7 +75,7 @@ class SysBoundary { FsGrid< std::array, 2> & perBGrid, Project& project ); - void applySysBoundaryVlasovConditions(dccrg::Dccrg& mpiGrid, creal& t); + void applySysBoundaryVlasovConditions(dccrg::Dccrg& mpiGrid, creal& t, const bool calculate_V_moments); unsigned int size() const; SBC::SysBoundaryCondition* getSysBoundary(cuint sysBoundaryType) const; bool isDynamic() const; diff --git a/sysboundary/sysboundarycondition.cpp b/sysboundary/sysboundarycondition.cpp index 8cb497013..a50bd802b 100644 --- a/sysboundary/sysboundarycondition.cpp +++ b/sysboundary/sysboundarycondition.cpp @@ -223,7 +223,8 @@ namespace SBC { const dccrg::Dccrg& mpiGrid, const CellID& cellID, const bool& copyMomentsOnly, - const uint popID + const uint popID, + const bool calculate_V_moments ) { const CellID closestCell = getTheClosestNonsysboundaryCell(cellID); @@ -231,8 +232,8 @@ namespace SBC { cerr << __FILE__ << ":" << __LINE__ << ": No closest cell found!" << endl; abort(); } - //Do not allow block adjustment, the block structure when calling vlasovBoundaryCondition should be static - copyCellData(mpiGrid[closestCell],mpiGrid[cellID],false, copyMomentsOnly, popID); + + copyCellData(mpiGrid[closestCell],mpiGrid[cellID], copyMomentsOnly, popID, calculate_V_moments); } /*! Function used to average and copy the distribution and moments from all the closest sysboundarytype::NOT_SYSBOUNDARY cells. @@ -340,7 +341,6 @@ namespace SBC { } } } - calculateCellMoments(to,true,true); } /*! Function used to copy the distribution and moments from one cell to another. In layer 2, copy only the moments. @@ -351,45 +351,49 @@ namespace SBC { void SysBoundaryCondition::copyCellData( SpatialCell* from, SpatialCell* to, - bool allowBlockAdjustment, const bool& copyMomentsOnly, - const uint popID + const uint popID, + const bool calculate_V_moments ) { - // WARNING Time-independence assumed here. _R and _V not copied, - // as boundary conditions cells should not set/use them. if (popID == 0) { - to->parameters[CellParams::RHOM_DT2] = from->parameters[CellParams::RHOM_DT2]; - to->parameters[CellParams::VX_DT2] = from->parameters[CellParams::VX_DT2]; - to->parameters[CellParams::VY_DT2] = from->parameters[CellParams::VY_DT2]; - to->parameters[CellParams::VZ_DT2] = from->parameters[CellParams::VZ_DT2]; - to->parameters[CellParams::RHOQ_DT2] = from->parameters[CellParams::RHOQ_DT2]; - to->parameters[CellParams::P_11_DT2] = from->parameters[CellParams::P_11_DT2]; - to->parameters[CellParams::P_22_DT2] = from->parameters[CellParams::P_22_DT2]; - to->parameters[CellParams::P_33_DT2] = from->parameters[CellParams::P_33_DT2]; - to->parameters[CellParams::RHOM] = from->parameters[CellParams::RHOM]; - to->parameters[CellParams::VX] = from->parameters[CellParams::VX]; - to->parameters[CellParams::VY] = from->parameters[CellParams::VY]; - to->parameters[CellParams::VZ] = from->parameters[CellParams::VZ]; - to->parameters[CellParams::RHOQ] = from->parameters[CellParams::RHOQ]; - to->parameters[CellParams::P_11] = from->parameters[CellParams::P_11]; - to->parameters[CellParams::P_22] = from->parameters[CellParams::P_22]; - to->parameters[CellParams::P_33] = from->parameters[CellParams::P_33]; + if (calculate_V_moments) { + to->parameters[CellParams::RHOM_V] = from->parameters[CellParams::RHOM_V]; + to->parameters[CellParams::VX_V] = from->parameters[CellParams::VX_V]; + to->parameters[CellParams::VY_V] = from->parameters[CellParams::VY_V]; + to->parameters[CellParams::VZ_V] = from->parameters[CellParams::VZ_V]; + to->parameters[CellParams::RHOQ_V] = from->parameters[CellParams::RHOQ_V]; + to->parameters[CellParams::P_11_V] = from->parameters[CellParams::P_11_V]; + to->parameters[CellParams::P_22_V] = from->parameters[CellParams::P_22_V]; + to->parameters[CellParams::P_33_V] = from->parameters[CellParams::P_33_V]; + } else { + to->parameters[CellParams::RHOM_R] = from->parameters[CellParams::RHOM_R]; + to->parameters[CellParams::VX_R] = from->parameters[CellParams::VX_R]; + to->parameters[CellParams::VY_R] = from->parameters[CellParams::VY_R]; + to->parameters[CellParams::VZ_R] = from->parameters[CellParams::VZ_R]; + to->parameters[CellParams::RHOQ_R] = from->parameters[CellParams::RHOQ_R]; + to->parameters[CellParams::P_11_R] = from->parameters[CellParams::P_11_R]; + to->parameters[CellParams::P_22_R] = from->parameters[CellParams::P_22_R]; + to->parameters[CellParams::P_33_R] = from->parameters[CellParams::P_33_R]; + } } if(to->sysBoundaryLayer == 1 && !copyMomentsOnly) { // Do this only for the first layer, the other layers do not need this. Do only if copyMomentsOnly is false. to->set_population(from->get_population(popID), popID); } else { - to->get_population(popID).RHO = from->get_population(popID).RHO; - to->get_population(popID).RHO_R = from->get_population(popID).RHO_R; - to->get_population(popID).RHO_V = from->get_population(popID).RHO_V; + if (calculate_V_moments) { + to->get_population(popID).RHO_V = from->get_population(popID).RHO_V; + } else { + to->get_population(popID).RHO_R = from->get_population(popID).RHO_R; + } + for (uint i=0; i<3; i++) { - to->get_population(popID).V[i] = from->get_population(popID).V[i]; - to->get_population(popID).V_R[i] = from->get_population(popID).V_R[i]; - to->get_population(popID).V_V[i] = from->get_population(popID).V_V[i]; - to->get_population(popID).P[i] = from->get_population(popID).P[i]; - to->get_population(popID).P_R[i] = from->get_population(popID).P_R[i]; - to->get_population(popID).P_V[i] = from->get_population(popID).P_V[i]; - + if (calculate_V_moments) { + to->get_population(popID).V_V[i] = from->get_population(popID).V_V[i]; + to->get_population(popID).P_V[i] = from->get_population(popID).P_V[i]; + } else { + to->get_population(popID).V_R[i] = from->get_population(popID).V_R[i]; + to->get_population(popID).P_R[i] = from->get_population(popID).P_R[i]; + } } } } @@ -410,26 +414,6 @@ namespace SBC { ) { const size_t numberOfCells = cellList.size(); creal factor = fluffiness / convert(numberOfCells); - - // Rescale moments - if (popID == 0) { - to->parameters[CellParams::RHOM_DT2] *= 1.0 - fluffiness; - to->parameters[CellParams::VX_DT2] *= 1.0 - fluffiness; - to->parameters[CellParams::VY_DT2] *= 1.0 - fluffiness; - to->parameters[CellParams::VZ_DT2] *= 1.0 - fluffiness; - to->parameters[CellParams::RHOQ_DT2] *= 1.0 - fluffiness; - to->parameters[CellParams::P_11_DT2] *= 1.0 - fluffiness; - to->parameters[CellParams::P_22_DT2] *= 1.0 - fluffiness; - to->parameters[CellParams::P_33_DT2] *= 1.0 - fluffiness; - to->parameters[CellParams::RHOM] *= 1.0 - fluffiness; - to->parameters[CellParams::VX] *= 1.0 - fluffiness; - to->parameters[CellParams::VY] *= 1.0 - fluffiness; - to->parameters[CellParams::VZ] *= 1.0 - fluffiness; - to->parameters[CellParams::RHOQ] *= 1.0 - fluffiness; - to->parameters[CellParams::P_11] *= 1.0 - fluffiness; - to->parameters[CellParams::P_22] *= 1.0 - fluffiness; - to->parameters[CellParams::P_33] *= 1.0 - fluffiness; - } if (to->sysBoundaryLayer == 1) { // Rescale own vspace @@ -449,25 +433,6 @@ namespace SBC { for (size_t i=0; iparameters[CellParams::RHOM_DT2] += factor*incomingCell->parameters[CellParams::RHOM_DT2]; - to->parameters[CellParams::VX_DT2] += factor*incomingCell->parameters[CellParams::VX_DT2]; - to->parameters[CellParams::VY_DT2] += factor*incomingCell->parameters[CellParams::VY_DT2]; - to->parameters[CellParams::VZ_DT2] += factor*incomingCell->parameters[CellParams::VZ_DT2]; - to->parameters[CellParams::RHOQ_DT2] += factor*incomingCell->parameters[CellParams::RHOQ_DT2]; - to->parameters[CellParams::P_11_DT2] += factor*incomingCell->parameters[CellParams::P_11_DT2]; - to->parameters[CellParams::P_22_DT2] += factor*incomingCell->parameters[CellParams::P_22_DT2]; - to->parameters[CellParams::P_33_DT2] += factor*incomingCell->parameters[CellParams::P_33_DT2]; - to->parameters[CellParams::RHOM] += factor*incomingCell->parameters[CellParams::RHOM]; - to->parameters[CellParams::VX] += factor*incomingCell->parameters[CellParams::VX]; - to->parameters[CellParams::VY] += factor*incomingCell->parameters[CellParams::VY]; - to->parameters[CellParams::VZ] += factor*incomingCell->parameters[CellParams::VZ]; - to->parameters[CellParams::RHOQ] += factor*incomingCell->parameters[CellParams::RHOQ]; - to->parameters[CellParams::P_11] += factor*incomingCell->parameters[CellParams::P_11]; - to->parameters[CellParams::P_22] += factor*incomingCell->parameters[CellParams::P_22]; - to->parameters[CellParams::P_33] += factor*incomingCell->parameters[CellParams::P_33]; - } - // Do this only for the first layer, the other layers do not need this. if (to->sysBoundaryLayer != 1) continue; diff --git a/sysboundary/sysboundarycondition.h b/sysboundary/sysboundarycondition.h index b39f155f5..131c16bf0 100644 --- a/sysboundary/sysboundarycondition.h +++ b/sysboundary/sysboundarycondition.h @@ -148,7 +148,8 @@ namespace SBC { virtual void vlasovBoundaryCondition( const dccrg::Dccrg& mpiGrid, const CellID& cellID, - const uint popID + const uint popID, + const bool calculate_V_moments )=0; virtual void getFaces(bool* faces); @@ -175,9 +176,9 @@ namespace SBC { void copyCellData( SpatialCell *from, SpatialCell *to, - bool allowBlockAdjustment, const bool& copyMomentsOnly, - const uint popID + const uint popID, + const bool calculate_V_moments ); void averageCellData( const dccrg::Dccrg& mpiGrid, @@ -211,7 +212,8 @@ namespace SBC { const dccrg::Dccrg& mpiGrid, const CellID& cellID, const bool& copyMomentsOnly, - const uint popID + const uint popID, + const bool calculate_V_moments ); void vlasovBoundaryCopyFromTheClosestNbrAndLimit( const dccrg::Dccrg& mpiGrid, diff --git a/vlasiator.cpp b/vlasiator.cpp index 09469ecce..5ab0f6ad8 100644 --- a/vlasiator.cpp +++ b/vlasiator.cpp @@ -851,23 +851,23 @@ int main(int argn,char* args[]) { phiprof::start("Propagate"); //Propagate the state of simulation forward in time by dt: - if (P::propagateVlasovTranslation || P::propagateVlasovAcceleration ) { - phiprof::start("Update system boundaries (Vlasov pre-translation)"); - sysBoundaries.applySysBoundaryVlasovConditions(mpiGrid, P::t+0.5*P::dt); - phiprof::stop("Update system boundaries (Vlasov pre-translation)"); - addTimedBarrier("barrier-boundary-conditions"); - } phiprof::start("Spatial-space"); - if( P::propagateVlasovTranslation) { calculateSpatialTranslation(mpiGrid,P::dt); } else { calculateSpatialTranslation(mpiGrid,0.0); } - phiprof::stop("Spatial-space",computedCells,"Cells"); + // Apply boundary conditions + if (P::propagateVlasovTranslation || P::propagateVlasovAcceleration ) { + phiprof::start("Update system boundaries (Vlasov post-translation)"); + sysBoundaries.applySysBoundaryVlasovConditions(mpiGrid, P::t+0.5*P::dt, false); + phiprof::stop("Update system boundaries (Vlasov post-translation)"); + addTimedBarrier("barrier-boundary-conditions"); + } + phiprof::start("Compute interp moments"); calculateInterpolatedVelocityMoments( mpiGrid, @@ -882,14 +882,6 @@ int main(int argn,char* args[]) { ); phiprof::stop("Compute interp moments"); - // Apply boundary conditions - if (P::propagateVlasovTranslation || P::propagateVlasovAcceleration ) { - phiprof::start("Update system boundaries (Vlasov post-translation)"); - sysBoundaries.applySysBoundaryVlasovConditions(mpiGrid, P::t+0.5*P::dt); - phiprof::stop("Update system boundaries (Vlasov post-translation)"); - addTimedBarrier("barrier-boundary-conditions"); - } - // Propagate fields forward in time by dt. This needs to be done before the // moments for t + dt are computed (field uses t and t+0.5dt) if (P::propagateField) { @@ -939,10 +931,16 @@ int main(int argn,char* args[]) { //zero step to set up moments _v calculateAcceleration(mpiGrid, 0.0); } - phiprof::stop("Velocity-space",computedCells,"Cells"); addTimedBarrier("barrier-after-acceleration"); - + + if (P::propagateVlasovTranslation || P::propagateVlasovAcceleration ) { + phiprof::start("Update system boundaries (Vlasov post-acceleration)"); + sysBoundaries.applySysBoundaryVlasovConditions(mpiGrid, P::t+0.5*P::dt, true); + phiprof::stop("Update system boundaries (Vlasov post-acceleration)"); + addTimedBarrier("barrier-boundary-conditions"); + } + phiprof::start("Compute interp moments"); // *here we compute rho and rho_v for timestep t + dt, so next // timestep * // diff --git a/vlasovsolver/cpu_moments.cpp b/vlasovsolver/cpu_moments.cpp index eb5a1ed8f..b038c0b64 100644 --- a/vlasovsolver/cpu_moments.cpp +++ b/vlasovsolver/cpu_moments.cpp @@ -161,6 +161,10 @@ void calculateMoments_R( for (size_t c=0; csysBoundaryFlag != sysboundarytype::NOT_SYSBOUNDARY && cell->sysBoundaryLayer != 1) { + continue; + } + // Clear old moments to zero value if (popID == 0) { cell->parameters[CellParams::RHOM_R ] = 0.0; @@ -227,6 +231,9 @@ void calculateMoments_R( #pragma omp parallel for for (size_t c=0; csysBoundaryFlag != sysboundarytype::NOT_SYSBOUNDARY && cell->sysBoundaryLayer != 1) { + continue; + } cell->parameters[CellParams::VX_R] = divideIfNonZero(cell->parameters[CellParams::VX_R], cell->parameters[CellParams::RHOM_R]); cell->parameters[CellParams::VY_R] = divideIfNonZero(cell->parameters[CellParams::VY_R], cell->parameters[CellParams::RHOM_R]); cell->parameters[CellParams::VZ_R] = divideIfNonZero(cell->parameters[CellParams::VZ_R], cell->parameters[CellParams::RHOM_R]); @@ -242,7 +249,11 @@ void calculateMoments_R( #pragma omp parallel for for (size_t c=0; csysBoundaryFlag != sysboundarytype::NOT_SYSBOUNDARY && cell->sysBoundaryLayer != 1) { + continue; + } + vmesh::VelocityBlockContainer& blockContainer = cell->get_velocity_blocks(popID); if (blockContainer.size() == 0) continue; const Realf* data = blockContainer.getData(); @@ -299,6 +310,10 @@ void calculateMoments_V( for (size_t c=0; csysBoundaryFlag != sysboundarytype::NOT_SYSBOUNDARY && cell->sysBoundaryLayer != 1) { + continue; + } + // Clear old moments to zero value if (popID == 0) { cell->parameters[CellParams::RHOM_V ] = 0.0; @@ -347,6 +362,9 @@ void calculateMoments_V( #pragma omp parallel for for (size_t c=0; csysBoundaryFlag != sysboundarytype::NOT_SYSBOUNDARY && cell->sysBoundaryLayer != 1) { + continue; + } cell->parameters[CellParams::VX_V] = divideIfNonZero(cell->parameters[CellParams::VX_V], cell->parameters[CellParams::RHOM_V]); cell->parameters[CellParams::VY_V] = divideIfNonZero(cell->parameters[CellParams::VY_V], cell->parameters[CellParams::RHOM_V]); cell->parameters[CellParams::VZ_V] = divideIfNonZero(cell->parameters[CellParams::VZ_V], cell->parameters[CellParams::RHOM_V]); @@ -362,6 +380,10 @@ void calculateMoments_V( #pragma omp parallel for for (size_t c=0; csysBoundaryFlag != sysboundarytype::NOT_SYSBOUNDARY && cell->sysBoundaryLayer != 1) { + continue; + } vmesh::VelocityBlockContainer& blockContainer = cell->get_velocity_blocks(popID); if (blockContainer.size() == 0) continue; diff --git a/vlasovsolver/vlasovmover.cpp b/vlasovsolver/vlasovmover.cpp index 664c5ebd7..0bf4f10e1 100644 --- a/vlasovsolver/vlasovmover.cpp +++ b/vlasovsolver/vlasovmover.cpp @@ -443,28 +443,26 @@ void calculateInterpolatedVelocityMoments( ) { const vector& cells = getLocalCells(); - //Iterate through all local cells (excl. system boundary cells): + //Iterate through all local cells #pragma omp parallel for for (size_t c=0; csysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY) { - SC->parameters[cp_rhom ] = 0.5* ( SC->parameters[CellParams::RHOM_R] + SC->parameters[CellParams::RHOM_V] ); - SC->parameters[cp_vx] = 0.5* ( SC->parameters[CellParams::VX_R] + SC->parameters[CellParams::VX_V] ); - SC->parameters[cp_vy] = 0.5* ( SC->parameters[CellParams::VY_R] + SC->parameters[CellParams::VY_V] ); - SC->parameters[cp_vz] = 0.5* ( SC->parameters[CellParams::VZ_R] + SC->parameters[CellParams::VZ_V] ); - SC->parameters[cp_rhoq ] = 0.5* ( SC->parameters[CellParams::RHOQ_R] + SC->parameters[CellParams::RHOQ_V] ); - SC->parameters[cp_p11] = 0.5* ( SC->parameters[CellParams::P_11_R] + SC->parameters[CellParams::P_11_V] ); - SC->parameters[cp_p22] = 0.5* ( SC->parameters[CellParams::P_22_R] + SC->parameters[CellParams::P_22_V] ); - SC->parameters[cp_p33] = 0.5* ( SC->parameters[CellParams::P_33_R] + SC->parameters[CellParams::P_33_V] ); - - for (uint popID=0; popIDget_population(popID); - pop.RHO = 0.5 * ( pop.RHO_R + pop.RHO_V ); - for(int i=0; i<3; i++) { - pop.V[i] = 0.5 * ( pop.V_R[i] + pop.V_V[i] ); - pop.P[i] = 0.5 * ( pop.P_R[i] + pop.P_V[i] ); - } + SC->parameters[cp_rhom ] = 0.5* ( SC->parameters[CellParams::RHOM_R] + SC->parameters[CellParams::RHOM_V] ); + SC->parameters[cp_vx] = 0.5* ( SC->parameters[CellParams::VX_R] + SC->parameters[CellParams::VX_V] ); + SC->parameters[cp_vy] = 0.5* ( SC->parameters[CellParams::VY_R] + SC->parameters[CellParams::VY_V] ); + SC->parameters[cp_vz] = 0.5* ( SC->parameters[CellParams::VZ_R] + SC->parameters[CellParams::VZ_V] ); + SC->parameters[cp_rhoq ] = 0.5* ( SC->parameters[CellParams::RHOQ_R] + SC->parameters[CellParams::RHOQ_V] ); + SC->parameters[cp_p11] = 0.5* ( SC->parameters[CellParams::P_11_R] + SC->parameters[CellParams::P_11_V] ); + SC->parameters[cp_p22] = 0.5* ( SC->parameters[CellParams::P_22_R] + SC->parameters[CellParams::P_22_V] ); + SC->parameters[cp_p33] = 0.5* ( SC->parameters[CellParams::P_33_R] + SC->parameters[CellParams::P_33_V] ); + + for (uint popID=0; popIDget_population(popID); + pop.RHO = 0.5 * ( pop.RHO_R + pop.RHO_V ); + for(int i=0; i<3; i++) { + pop.V[i] = 0.5 * ( pop.V_R[i] + pop.V_V[i] ); + pop.P[i] = 0.5 * ( pop.P_R[i] + pop.P_V[i] ); } } } From a3c031ff52a9a8cfd1c9b3032896d636c659e3a7 Mon Sep 17 00:00:00 2001 From: ipryakem Date: Wed, 20 Nov 2019 16:42:29 +0100 Subject: [PATCH 593/602] Attempt at fixing ionosphere layer 2 population moments, unsuccessful. --- sysboundary/ionosphere.cpp | 2 +- sysboundary/sysboundarycondition.cpp | 134 ++++++++++++++++++++++----- sysboundary/sysboundarycondition.h | 5 +- 3 files changed, 114 insertions(+), 27 deletions(-) diff --git a/sysboundary/ionosphere.cpp b/sysboundary/ionosphere.cpp index 0ce5e0f45..0fbc7d8cf 100644 --- a/sysboundary/ionosphere.cpp +++ b/sysboundary/ionosphere.cpp @@ -698,7 +698,7 @@ namespace SBC { const bool calculate_V_moments ) { phiprof::start("vlasovBoundaryCondition (Ionosphere)"); - this->vlasovBoundaryFluffyCopyFromAllCloseNbrs(mpiGrid, cellID, popID, this->speciesParams[popID].fluffiness); + this->vlasovBoundaryFluffyCopyFromAllCloseNbrs(mpiGrid, cellID, popID, calculate_V_moments, this->speciesParams[popID].fluffiness); phiprof::stop("vlasovBoundaryCondition (Ionosphere)"); } diff --git a/sysboundary/sysboundarycondition.cpp b/sysboundary/sysboundarycondition.cpp index a50bd802b..326a6c9fb 100644 --- a/sysboundary/sysboundarycondition.cpp +++ b/sysboundary/sysboundarycondition.cpp @@ -242,7 +242,7 @@ namespace SBC { */ void SysBoundaryCondition::vlasovBoundaryCopyFromAllClosestNbrs( const dccrg::Dccrg& mpiGrid, - const CellID& cellID,const uint popID + const CellID& cellID,const uint popID, const bool calculate_V_moments ) { const std::vector closestCells = getAllClosestNonsysboundaryCells(cellID); @@ -250,7 +250,7 @@ namespace SBC { cerr << __FILE__ << ":" << __LINE__ << ": No closest cell found!" << endl; abort(); } - averageCellData(mpiGrid, closestCells, mpiGrid[cellID], popID); + averageCellData(mpiGrid, closestCells, mpiGrid[cellID], popID, calculate_V_moments); } /*! Function used to average and copy the distribution and moments from all the close sysboundarytype::NOT_SYSBOUNDARY cells. @@ -259,7 +259,7 @@ namespace SBC { */ void SysBoundaryCondition::vlasovBoundaryFluffyCopyFromAllCloseNbrs( const dccrg::Dccrg& mpiGrid, - const CellID& cellID,const uint popID,creal fluffiness + const CellID& cellID,const uint popID,const bool calculate_V_moments, creal fluffiness ) { const std::vector closeCells = getAllCloseNonsysboundaryCells(cellID); @@ -267,7 +267,7 @@ namespace SBC { cerr << __FILE__ << ":" << __LINE__ << ": No close cell found!" << endl; abort(); } - averageCellData(mpiGrid, closeCells, mpiGrid[cellID], popID, fluffiness); + averageCellData(mpiGrid, closeCells, mpiGrid[cellID], popID, calculate_V_moments, fluffiness); } /*! Function used to copy the distribution from (one of) the closest sysboundarytype::NOT_SYSBOUNDARY cell but limiting to values no higher than where it can flow into. Moments are recomputed. @@ -410,6 +410,7 @@ namespace SBC { const std::vector cellList, SpatialCell *to, const uint popID, + const bool calculate_V_moments, creal fluffiness /* default =0.0*/ ) { const size_t numberOfCells = cellList.size(); @@ -428,35 +429,118 @@ namespace SBC { } toData += SIZE_VELBLOCK; } // for-loop over velocity blocks + } else { + // Rescale moments + if (popID == 0) { + if (calculate_V_moments) { + to->parameters[CellParams::RHOM_V] *= 1.0 - fluffiness; + to->parameters[CellParams::VX_V] *= 1.0 - fluffiness; + to->parameters[CellParams::VY_V] *= 1.0 - fluffiness; + to->parameters[CellParams::VZ_V] *= 1.0 - fluffiness; + to->parameters[CellParams::RHOQ_V] *= 1.0 - fluffiness; + to->parameters[CellParams::P_11_V] *= 1.0 - fluffiness; + to->parameters[CellParams::P_22_V] *= 1.0 - fluffiness; + to->parameters[CellParams::P_33_V] *= 1.0 - fluffiness; + } else { + to->parameters[CellParams::RHOM_R] *= 1.0 - fluffiness; + to->parameters[CellParams::VX_R] *= 1.0 - fluffiness; + to->parameters[CellParams::VY_R] *= 1.0 - fluffiness; + to->parameters[CellParams::VZ_R] *= 1.0 - fluffiness; + to->parameters[CellParams::RHOQ_R] *= 1.0 - fluffiness; + to->parameters[CellParams::P_11_R] *= 1.0 - fluffiness; + to->parameters[CellParams::P_22_R] *= 1.0 - fluffiness; + to->parameters[CellParams::P_33_R] *= 1.0 - fluffiness; + } + } + + Population & pop = to->get_population(popID); + if(calculate_V_moments) { + pop.RHO_V *= 1.0 - fluffiness; + pop.V_V[0] *= 1.0 - fluffiness; + pop.V_V[1] *= 1.0 - fluffiness; + pop.V_V[2] *= 1.0 - fluffiness; + pop.P_V[0] *= 1.0 - fluffiness; + pop.P_V[1] *= 1.0 - fluffiness; + pop.P_V[2] *= 1.0 - fluffiness; + } else { + pop.RHO_R *= 1.0 - fluffiness; + pop.V_R[0] *= 1.0 - fluffiness; + pop.V_R[1] *= 1.0 - fluffiness; + pop.V_R[2] *= 1.0 - fluffiness; + pop.P_R[0] *= 1.0 - fluffiness; + pop.P_R[1] *= 1.0 - fluffiness; + pop.P_R[2] *= 1.0 - fluffiness; + } } for (size_t i=0; isysBoundaryLayer != 1) continue; - const Realf* fromData = incomingCell->get_data(popID); - for (vmesh::LocalID incBlockLID=0; incBlockLIDget_number_of_velocity_blocks(popID); ++incBlockLID) { - // Global ID of the block containing incoming data - vmesh::GlobalID incBlockGID = incomingCell->get_velocity_block_global_id(incBlockLID,popID); - - // Get local ID of the target block. If the block doesn't exist, create it. - vmesh::GlobalID toBlockLID = to->get_velocity_block_local_id(incBlockGID,popID); - if (toBlockLID == SpatialCell::invalid_local_id()) { - to->add_velocity_block(incBlockGID,popID); - toBlockLID = to->get_velocity_block_local_id(incBlockGID,popID); + if (to->sysBoundaryLayer == 1) { + const Realf* fromData = incomingCell->get_data(popID); + for (vmesh::LocalID incBlockLID=0; incBlockLIDget_number_of_velocity_blocks(popID); ++incBlockLID) { + // Global ID of the block containing incoming data + vmesh::GlobalID incBlockGID = incomingCell->get_velocity_block_global_id(incBlockLID,popID); + + // Get local ID of the target block. If the block doesn't exist, create it. + vmesh::GlobalID toBlockLID = to->get_velocity_block_local_id(incBlockGID,popID); + if (toBlockLID == SpatialCell::invalid_local_id()) { + to->add_velocity_block(incBlockGID,popID); + toBlockLID = to->get_velocity_block_local_id(incBlockGID,popID); + } + + // Pointer to target block data + Realf* toData = to->get_data(toBlockLID,popID); + + // Add values from source cells + for (uint kc=0; kcparameters[CellParams::RHOM_V] += factor*incomingCell->parameters[CellParams::RHOM_V]; + to->parameters[CellParams::VX_V] += factor*incomingCell->parameters[CellParams::VX_V]; + to->parameters[CellParams::VY_V] += factor*incomingCell->parameters[CellParams::VY_V]; + to->parameters[CellParams::VZ_V] += factor*incomingCell->parameters[CellParams::VZ_V]; + to->parameters[CellParams::RHOQ_V] += factor*incomingCell->parameters[CellParams::RHOQ_V]; + to->parameters[CellParams::P_11_V] += factor*incomingCell->parameters[CellParams::P_11_V]; + to->parameters[CellParams::P_22_V] += factor*incomingCell->parameters[CellParams::P_22_V]; + to->parameters[CellParams::P_33_V] += factor*incomingCell->parameters[CellParams::P_33_V]; + } else { + to->parameters[CellParams::RHOM_R] += factor*incomingCell->parameters[CellParams::RHOM_V]; + to->parameters[CellParams::VX_R] += factor*incomingCell->parameters[CellParams::VX_R]; + to->parameters[CellParams::VY_R] += factor*incomingCell->parameters[CellParams::VY_R]; + to->parameters[CellParams::VZ_R] += factor*incomingCell->parameters[CellParams::VZ_R]; + to->parameters[CellParams::RHOQ_R] += factor*incomingCell->parameters[CellParams::RHOQ_R]; + to->parameters[CellParams::P_11_R] += factor*incomingCell->parameters[CellParams::P_11_R]; + to->parameters[CellParams::P_22_R] += factor*incomingCell->parameters[CellParams::P_22_R]; + to->parameters[CellParams::P_33_R] += factor*incomingCell->parameters[CellParams::P_33_R]; + } } - // Pointer to target block data - Realf* toData = to->get_data(toBlockLID,popID); - - // Add values from source cells - for (uint kc=0; kcget_population(popID); + const Population & fromPop = incomingCell->get_population(popID); + if(calculate_V_moments) { + pop.RHO_V += factor*fromPop.RHO_V ; + pop.V_V[0] += factor*fromPop.V_V[0]; + pop.V_V[1] += factor*fromPop.V_V[1]; + pop.V_V[2] += factor*fromPop.V_V[2]; + pop.P_V[0] += factor*fromPop.P_V[0]; + pop.P_V[1] += factor*fromPop.P_V[1]; + pop.P_V[2] += factor*fromPop.P_V[2]; + } else { + pop.RHO_R += factor*fromPop.RHO_R ; + pop.V_R[0] += factor*fromPop.V_R[0]; + pop.V_R[1] += factor*fromPop.V_R[1]; + pop.V_R[2] += factor*fromPop.V_R[2]; + pop.P_R[0] += factor*fromPop.P_R[0]; + pop.P_R[1] += factor*fromPop.P_R[1]; + pop.P_R[2] += factor*fromPop.P_R[2]; } - fromData += SIZE_VELBLOCK; - } // for-loop over velocity blocks + } } } diff --git a/sysboundary/sysboundarycondition.h b/sysboundary/sysboundarycondition.h index 131c16bf0..f7a2dc25f 100644 --- a/sysboundary/sysboundarycondition.h +++ b/sysboundary/sysboundarycondition.h @@ -185,6 +185,7 @@ namespace SBC { std::vector cellList, SpatialCell *to, const uint popID, + const bool calculate_V_moments, creal fluffiness = 0 ); std::array & getFlowtoCells( @@ -223,12 +224,14 @@ namespace SBC { void vlasovBoundaryCopyFromAllClosestNbrs( const dccrg::Dccrg& mpiGrid, const CellID& cellID, - const uint popID + const uint popID, + const bool calculate_V_moments ); void vlasovBoundaryFluffyCopyFromAllCloseNbrs( const dccrg::Dccrg& mpiGrid, const CellID& cellID, const uint popID, + const bool calculate_V_moments, creal fluffiness ); void vlasovBoundaryReflect( From 05c0ba1d3ee206a0e35bf92d80b5b0bf5c62df89 Mon Sep 17 00:00:00 2001 From: ipryakem Date: Thu, 21 Nov 2019 14:46:47 +0100 Subject: [PATCH 594/602] Improvement of the ionosphere B computations. --- fieldsolver/ldz_magnetic_field.cpp | 63 ++++++---- sysboundary/ionosphere.cpp | 178 ++++++++++++++++++++--------- 2 files changed, 166 insertions(+), 75 deletions(-) diff --git a/fieldsolver/ldz_magnetic_field.cpp b/fieldsolver/ldz_magnetic_field.cpp index 9d0be6873..9ea8ef1de 100644 --- a/fieldsolver/ldz_magnetic_field.cpp +++ b/fieldsolver/ldz_magnetic_field.cpp @@ -182,7 +182,8 @@ void propagateSysBoundaryMagneticField( cint k, SysBoundary& sysBoundaries, creal& dt, - cint& RKCase + cint& RKCase, + cuint component ) { std::array * bGrid; if (RKCase == RK_ORDER1 || RKCase == RK_ORDER2_STEP2) { @@ -190,23 +191,7 @@ void propagateSysBoundaryMagneticField( } else { bGrid = perBDt2Grid.get(i,j,k); } - cuint sysBoundaryFlag = technicalGrid.get(i,j,k)->sysBoundaryFlag; - cuint sysBoundaryLayer = technicalGrid.get(i,j,k)->sysBoundaryLayer; - - for (uint component = 0; component < 3; component++) { - if (sysBoundaryLayer != 1) { - bGrid->at(fsgrids::bfield::PERBX + component) = sysBoundaries.getSysBoundary(sysBoundaryFlag)->fieldSolverBoundaryCondMagneticField(perBGrid, perBDt2Grid, EGrid, EDt2Grid, technicalGrid, i, j, k, dt, RKCase, component); - } else { - cint neigh_i=i + ((component==0)?-1:0); - cint neigh_j=j + ((component==1)?-1:0); - cint neigh_k=k + ((component==2)?-1:0); - uint neighborSysBoundaryFlag = technicalGrid.get(neigh_i, neigh_j, neigh_k)->sysBoundaryFlag; - - if (neighborSysBoundaryFlag != sysboundarytype::NOT_SYSBOUNDARY) { // Complement to propagateMagneticFieldSimple main loop - bGrid->at(fsgrids::bfield::PERBX + component) = sysBoundaries.getSysBoundary(sysBoundaryFlag)->fieldSolverBoundaryCondMagneticField(perBGrid, perBDt2Grid, EGrid, EDt2Grid, technicalGrid, i, j, k, dt, RKCase, component); - } - } - } + bGrid->at(fsgrids::bfield::PERBX + component) = sysBoundaries.getSysBoundary(technicalGrid.get(i,j,k)->sysBoundaryFlag)->fieldSolverBoundaryCondMagneticField(perBGrid, perBDt2Grid, EGrid, EDt2Grid, technicalGrid, i, j, k, dt, RKCase, component); } /*! \brief High-level magnetic field propagation function. @@ -294,13 +279,51 @@ void propagateMagneticFieldSimple( // Propagate B on system boundary/process inner cells timer=phiprof::initializeTimer("Compute system boundary cells"); phiprof::start(timer); + // L1 pass + #pragma omp parallel for collapse(3) + for (int k=0; ksysBoundaryFlag != sysboundarytype::NOT_SYSBOUNDARY && + technicalGrid.get(i,j,k)->sysBoundaryFlag != sysboundarytype::DO_NOT_COMPUTE) { + + cuint sysBoundaryFlag = technicalGrid.get(i,j,k)->sysBoundaryFlag; + cuint sysBoundaryLayer = technicalGrid.get(i,j,k)->sysBoundaryLayer; + + // L1 pass + if (sysBoundaryLayer == 1) { + for (uint component = 0; component < 3; component++) { + cint neigh_i=i + ((component==0)?-1:0); + cint neigh_j=j + ((component==1)?-1:0); + cint neigh_k=k + ((component==2)?-1:0); + uint neighborSysBoundaryFlag = technicalGrid.get(neigh_i, neigh_j, neigh_k)->sysBoundaryFlag; + if (neighborSysBoundaryFlag != sysboundarytype::NOT_SYSBOUNDARY) { // Complement to previous loop + propagateSysBoundaryMagneticField(perBGrid, perBDt2Grid, EGrid, EDt2Grid, technicalGrid, i, j, k, sysBoundaries, dt, RKCase, component); + } + } + } + } + } + } + } + + // L2 pass #pragma omp parallel for collapse(3) for (int k=0; ksysBoundaryFlag != sysboundarytype::NOT_SYSBOUNDARY && - technicalGrid.get(i,j,k)->sysBoundaryFlag != sysboundarytype::DO_NOT_COMPUTE) { - propagateSysBoundaryMagneticField(perBGrid, perBDt2Grid, EGrid, EDt2Grid, technicalGrid, i, j, k, sysBoundaries, dt, RKCase); + technicalGrid.get(i,j,k)->sysBoundaryFlag != sysboundarytype::DO_NOT_COMPUTE) { + + cuint sysBoundaryFlag = technicalGrid.get(i,j,k)->sysBoundaryFlag; + cuint sysBoundaryLayer = technicalGrid.get(i,j,k)->sysBoundaryLayer; + + // L2 pass + if (sysBoundaryLayer == 2) { + for (uint component = 0; component < 3; component++) { + propagateSysBoundaryMagneticField(perBGrid, perBDt2Grid, EGrid, EDt2Grid, technicalGrid, i, j, k, sysBoundaries, dt, RKCase, component); + } + } } } } diff --git a/sysboundary/ionosphere.cpp b/sysboundary/ionosphere.cpp index 0fbc7d8cf..9ee75ba5c 100644 --- a/sysboundary/ionosphere.cpp +++ b/sysboundary/ionosphere.cpp @@ -554,64 +554,132 @@ namespace SBC { bGrid = &perBDt2Grid; } - - // Otherwise: - // Sum perturbed B component over all nearest NOT_SYSBOUNDARY neighbours - /**** - std::vector< std::array > closestCells = getAllClosestNonsysboundaryCells(technicalGrid, i,j,k); - if (closestCells.size() == 1 && closestCells[0][0] == std::numeric_limits::min() ) { - std::cerr << __FILE__ << ":" << __LINE__ << ":" << "No closest cells found!" << std::endl; - abort(); - } - - std::array averageB = {{ 0.0 }}; - for (uint it = 0; it < closestCells.size(); it++) { - #ifdef DEBUG_IONOSPHERE - if (technicalGrid.get(closestCells[it][0],closestCells[it][1],closestCells[it][2])->sysBoundaryFlag != sysboundarytype::NOT_SYSBOUNDARY) { - stringstream ss; - ss << "ERROR, ionosphere cell (" << i << "," << j << "," << k << ") uses value from sysboundary nbr (" << closestCells[it][0] << "," << closestCells[it][1] << "," << closestCells[it][2] << " in " << __FILE__ << ":" << __LINE__ << endl; - cerr << ss.str(); - exit(1); + if (technicalGrid.get(i,j,k)->sysBoundaryLayer == 1) { + switch(component) { + case 0: + if ( technicalGrid.get(i-1,j,k)->sysBoundaryLayer==1 && technicalGrid.get(i-2,j,k)->sysBoundaryFlag==sysboundarytype::NOT_SYSBOUNDARY + && technicalGrid.get(i+1,j,k)->sysBoundaryFlag==sysboundarytype::NOT_SYSBOUNDARY + ) { + return 0.5 * (bGrid->get(i-1,j,k)->at(fsgrids::bfield::PERBX) + bGrid->get(i+1,j,k)->at(fsgrids::bfield::PERBX)); + } else if (technicalGrid.get(i-1,j,k)->sysBoundaryLayer==1 && technicalGrid.get(i-2,j,k)->sysBoundaryFlag==sysboundarytype::NOT_SYSBOUNDARY) { + return bGrid->get(i-1,j,k)->at(fsgrids::bfield::PERBX); + } else if (technicalGrid.get(i+1,j,k)->sysBoundaryFlag==sysboundarytype::NOT_SYSBOUNDARY) { + return bGrid->get(i+1,j,k)->at(fsgrids::bfield::PERBX); + } else { + Real retval = 0.0; + uint nCells = 0; + if (technicalGrid.get(i,j-1,k)->sysBoundaryLayer==1 && technicalGrid.get(i-1,j-1,k)->sysBoundaryFlag==sysboundarytype::NOT_SYSBOUNDARY) { + retval += bGrid->get(i,j-1,k)->at(fsgrids::bfield::PERBX); + nCells++; + } + if (technicalGrid.get(i,j+1,k)->sysBoundaryLayer==1 && technicalGrid.get(i-1,j+1,k)->sysBoundaryFlag==sysboundarytype::NOT_SYSBOUNDARY) { + retval += bGrid->get(i,j+1,k)->at(fsgrids::bfield::PERBX); + nCells++; + } + if (technicalGrid.get(i,j,k-1)->sysBoundaryLayer==1 && technicalGrid.get(i-1,j,k-1)->sysBoundaryFlag==sysboundarytype::NOT_SYSBOUNDARY) { + retval += bGrid->get(i,j,k-1)->at(fsgrids::bfield::PERBX); + nCells++; + } + if (technicalGrid.get(i,j,k+1)->sysBoundaryLayer==1 && technicalGrid.get(i-1,j,k+1)->sysBoundaryFlag==sysboundarytype::NOT_SYSBOUNDARY) { + retval += bGrid->get(i,j,k+1)->at(fsgrids::bfield::PERBX); + nCells++; + } + if (nCells == 0) { + cerr << __FILE__ << ":" << __LINE__ << ": ERROR: this should not have fallen through." << endl; + return 0.0; + } + return retval / nCells; + } + case 1: + if ( technicalGrid.get(i,j-1,k)->sysBoundaryLayer==1 && technicalGrid.get(i,j-2,k)->sysBoundaryFlag==sysboundarytype::NOT_SYSBOUNDARY + && technicalGrid.get(i,j+1,k)->sysBoundaryFlag==sysboundarytype::NOT_SYSBOUNDARY + ) { + return 0.5 * (bGrid->get(i,j-1,k)->at(fsgrids::bfield::PERBY) + bGrid->get(i,j+1,k)->at(fsgrids::bfield::PERBY)); + } else if (technicalGrid.get(i,j-1,k)->sysBoundaryLayer==1 && technicalGrid.get(i,j-2,k)->sysBoundaryFlag==sysboundarytype::NOT_SYSBOUNDARY) { + return bGrid->get(i,j-1,k)->at(fsgrids::bfield::PERBY); + } else if (technicalGrid.get(i,j+1,k)->sysBoundaryFlag==sysboundarytype::NOT_SYSBOUNDARY) { + return bGrid->get(i,j+1,k)->at(fsgrids::bfield::PERBY); + } else { + Real retval = 0.0; + uint nCells = 0; + if (technicalGrid.get(i-1,j,k)->sysBoundaryLayer==1 && technicalGrid.get(i-1,j-1,k)->sysBoundaryFlag==sysboundarytype::NOT_SYSBOUNDARY) { + retval += bGrid->get(i-1,j,k)->at(fsgrids::bfield::PERBY); + nCells++; + } + if (technicalGrid.get(i+1,j,k)->sysBoundaryLayer==1 && technicalGrid.get(i+1,j-1,k)->sysBoundaryFlag==sysboundarytype::NOT_SYSBOUNDARY) { + retval += bGrid->get(i+1,j,k)->at(fsgrids::bfield::PERBY); + nCells++; + } + if (technicalGrid.get(i,j,k-1)->sysBoundaryLayer==1 && technicalGrid.get(i,j-1,k-1)->sysBoundaryFlag==sysboundarytype::NOT_SYSBOUNDARY) { + retval += bGrid->get(i,j,k-1)->at(fsgrids::bfield::PERBY); + nCells++; + } + if (technicalGrid.get(i,j,k+1)->sysBoundaryLayer==1 && technicalGrid.get(i,j-1,k+1)->sysBoundaryFlag==sysboundarytype::NOT_SYSBOUNDARY) { + retval += bGrid->get(i,j,k+1)->at(fsgrids::bfield::PERBY); + nCells++; + } + if (nCells == 0) { + cerr << __FILE__ << ":" << __LINE__ << ": ERROR: this should not have fallen through." << endl; + return 0.0; + } + return retval / nCells; + } + case 2: + if ( technicalGrid.get(i,j,k-1)->sysBoundaryLayer==1 && technicalGrid.get(i,j,k-2)->sysBoundaryFlag==sysboundarytype::NOT_SYSBOUNDARY + && technicalGrid.get(i,j,k+1)->sysBoundaryFlag==sysboundarytype::NOT_SYSBOUNDARY + ) { + return 0.5 * (bGrid->get(i,j,k-1)->at(fsgrids::bfield::PERBZ) + bGrid->get(i,j,k+1)->at(fsgrids::bfield::PERBZ)); + } else if (technicalGrid.get(i,j,k-1)->sysBoundaryLayer==1 && technicalGrid.get(i,j,k-2)->sysBoundaryFlag==sysboundarytype::NOT_SYSBOUNDARY) { + return bGrid->get(i,j,k-1)->at(fsgrids::bfield::PERBZ); + } else if (technicalGrid.get(i,j,k+1)->sysBoundaryFlag==sysboundarytype::NOT_SYSBOUNDARY) { + return bGrid->get(i,j,k+1)->at(fsgrids::bfield::PERBZ); + } else { + Real retval = 0.0; + uint nCells = 0; + if (technicalGrid.get(i-1,j,k)->sysBoundaryLayer==1 && technicalGrid.get(i-1,j,k-1)->sysBoundaryFlag==sysboundarytype::NOT_SYSBOUNDARY) { + retval += bGrid->get(i-1,j,k)->at(fsgrids::bfield::PERBZ); + nCells++; + } + if (technicalGrid.get(i+1,j,k)->sysBoundaryLayer==1 && technicalGrid.get(i+1,j,k-1)->sysBoundaryFlag==sysboundarytype::NOT_SYSBOUNDARY) { + retval += bGrid->get(i+1,j,k)->at(fsgrids::bfield::PERBZ); + nCells++; + } + if (technicalGrid.get(i,j-1,k)->sysBoundaryLayer==1 && technicalGrid.get(i,j-1,k-1)->sysBoundaryFlag==sysboundarytype::NOT_SYSBOUNDARY) { + retval += bGrid->get(i,j-1,k)->at(fsgrids::bfield::PERBZ); + nCells++; + } + if (technicalGrid.get(i,j+1,k)->sysBoundaryLayer==1 && technicalGrid.get(i,j+1,k-1)->sysBoundaryFlag==sysboundarytype::NOT_SYSBOUNDARY) { + retval += bGrid->get(i,j+1,k)->at(fsgrids::bfield::PERBZ); + nCells++; + } + if (nCells == 0) { + cerr << __FILE__ << ":" << __LINE__ << ": ERROR: this should not have fallen through." << endl; + return 0.0; + } + return retval / nCells; + } + default: + cerr << "ERROR: ionosphere boundary tried to copy nonsensical magnetic field component " << component << endl; + return 0.0; } - #endif - averageB[0] += bGrid->get(closestCells[it][0], closestCells[it][1], closestCells[it][2])->at(fsgrids::bfield::PERBX); - averageB[1] += bGrid->get(closestCells[it][0], closestCells[it][1], closestCells[it][2])->at(fsgrids::bfield::PERBY); - averageB[2] += bGrid->get(closestCells[it][0], closestCells[it][1], closestCells[it][2])->at(fsgrids::bfield::PERBZ); - } - - // Average and project to normal direction - std::array normalDirection = fieldSolverGetNormalDirection(technicalGrid, i, j, k); - for(uint i=0; i<3; i++) { - averageB[i] *= normalDirection[i] / closestCells.size(); - } - // Return (B.n)*normalVector[component] - return (averageB[0]+averageB[1]+averageB[2])*normalDirection[component]; - ***/ - - // Copy each face B-field from the cell in the simulation cell direction - // NOTE This might misbehave if OUTFLOW crosses IONOSPHERE, which was supported prior to 201911 but was never really used. - switch(component) { - case 0: - if (technicalGrid.get(i-1,j,k)->sysBoundaryLayer==1) { - return bGrid->get(i-1,j,k)->at(fsgrids::bfield::PERBX + component); - } else { // Is sysboundarylayer 2, read from opposite direction - return bGrid->get(i+1,j,k)->at(fsgrids::bfield::PERBX + component); - } - case 1: - if (technicalGrid.get(i,j-1,k)->sysBoundaryLayer==1) { - return bGrid->get(i,j-1,k)->at(fsgrids::bfield::PERBX + component); - } else { // Is sysboundarylayer 2, read from opposite direction - return bGrid->get(i,j+1,k)->at(fsgrids::bfield::PERBX + component); - } - case 2: - if (technicalGrid.get(i,j,k-1)->sysBoundaryLayer==1) { - return bGrid->get(i,j,k-1)->at(fsgrids::bfield::PERBX + component); - } else { // Is sysboundarylayer 2, read from opposite direction - return bGrid->get(i,j,k+1)->at(fsgrids::bfield::PERBX + component); + } else { // L2 cells + Real retval = 0.0; + uint nCells = 0; + for (uint a=-1; a<2; a++) { + for (uint b=-1; b<2; b++) { + for (uint c=0; c<2; c++) { + if (technicalGrid.get(a,b,c)->sysBoundaryLayer == 1) { + retval += bGrid->get(a,b,c)->at(fsgrids::bfield::PERBX + component); + nCells++; + } + } } - default: - cerr << "ERROR: ionosphere boundary tried to copy nonsensical magnetic field component " << component << endl; + } + if (nCells == 0) { + cerr << __FILE__ << ":" << __LINE__ << ": ERROR: this should not have fallen through." << endl; return 0.0; + } + return retval / nCells; } } From 8d7cea17323b545f8a00a679f8f532692916edd3 Mon Sep 17 00:00:00 2001 From: ipryakem Date: Tue, 26 Nov 2019 10:35:26 +0100 Subject: [PATCH 595/602] Debugged offsets in ionosphere magnetic field boundary condition. --- sysboundary/ionosphere.cpp | 45 +++++++++++++++++++++++++++++++++++--- 1 file changed, 42 insertions(+), 3 deletions(-) diff --git a/sysboundary/ionosphere.cpp b/sysboundary/ionosphere.cpp index 9ee75ba5c..91da22f4e 100644 --- a/sysboundary/ionosphere.cpp +++ b/sysboundary/ionosphere.cpp @@ -584,6 +584,19 @@ namespace SBC { retval += bGrid->get(i,j,k+1)->at(fsgrids::bfield::PERBX); nCells++; } + if (nCells == 0) { + for (int a=i-1; asysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY) { + retval += bGrid->get(a,b,c)->at(fsgrids::bfield::PERBX); + nCells++; + } + } + } + } + } if (nCells == 0) { cerr << __FILE__ << ":" << __LINE__ << ": ERROR: this should not have fallen through." << endl; return 0.0; @@ -618,6 +631,19 @@ namespace SBC { retval += bGrid->get(i,j,k+1)->at(fsgrids::bfield::PERBY); nCells++; } + if (nCells == 0) { + for (int a=i-1; asysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY) { + retval += bGrid->get(a,b,c)->at(fsgrids::bfield::PERBY); + nCells++; + } + } + } + } + } if (nCells == 0) { cerr << __FILE__ << ":" << __LINE__ << ": ERROR: this should not have fallen through." << endl; return 0.0; @@ -652,6 +678,19 @@ namespace SBC { retval += bGrid->get(i,j+1,k)->at(fsgrids::bfield::PERBZ); nCells++; } + if (nCells == 0) { + for (int a=i-1; asysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY) { + retval += bGrid->get(a,b,c)->at(fsgrids::bfield::PERBZ); + nCells++; + } + } + } + } + } if (nCells == 0) { cerr << __FILE__ << ":" << __LINE__ << ": ERROR: this should not have fallen through." << endl; return 0.0; @@ -665,9 +704,9 @@ namespace SBC { } else { // L2 cells Real retval = 0.0; uint nCells = 0; - for (uint a=-1; a<2; a++) { - for (uint b=-1; b<2; b++) { - for (uint c=0; c<2; c++) { + for (int a=i-1; asysBoundaryLayer == 1) { retval += bGrid->get(a,b,c)->at(fsgrids::bfield::PERBX + component); nCells++; From 7625b1f4c458df92805528745326b2b1a6f19188 Mon Sep 17 00:00:00 2001 From: ipryakem Date: Tue, 26 Nov 2019 15:47:45 +0100 Subject: [PATCH 596/602] Bit mask based determination of which E and B components to solve. --- common.h | 10 ++ fieldsolver/ldz_electric_field.cpp | 143 ++++++++++++----------------- fieldsolver/ldz_magnetic_field.cpp | 65 ++++--------- sysboundary/sysboundary.cpp | 60 ++++++++++++ 4 files changed, 144 insertions(+), 134 deletions(-) diff --git a/common.h b/common.h index 17555ce7c..6ed9a4cc6 100644 --- a/common.h +++ b/common.h @@ -361,6 +361,7 @@ namespace fsgrids { int sysBoundaryLayer; /*!< System boundary layer index. */ Real maxFsDt; /*!< maximum timestep allowed in ordinary space by fieldsolver for this cell**/ int fsGridRank; /*!< Rank in the fsGrids cartesian coordinator */ + uint SOLVE; /*!< Bit mask to determine whether a given cell should solve E or B components. */ }; } @@ -380,6 +381,15 @@ namespace sysboundarytype { }; } +namespace compute { + const uint BX = (1 << 0); // 1 + const uint BY = (1 << 1); // 2 + const uint BZ = (1 << 2); // 4 + const uint EX = (1 << 3); // 8 + const uint EY = (1 << 4); // 16 + const uint EZ = (1 << 5); // 32 +} + /*! Steps in Runge-Kutta methods */ enum {RK_ORDER1, /*!< First order method, one step (and initialisation) */ RK_ORDER2_STEP1, /*!< Two-step second order method, first step */ diff --git a/fieldsolver/ldz_electric_field.cpp b/fieldsolver/ldz_electric_field.cpp index 5fec2dbb3..35a3d64d5 100644 --- a/fieldsolver/ldz_electric_field.cpp +++ b/fieldsolver/ldz_electric_field.cpp @@ -1531,95 +1531,66 @@ void calculateElectricField( if (cellSysBoundaryFlag == sysboundarytype::DO_NOT_COMPUTE) return; - cuint cellSysBoundaryLayer = technicalGrid.get(i,j,k)->sysBoundaryLayer; - - if ((cellSysBoundaryFlag != sysboundarytype::NOT_SYSBOUNDARY) && (cellSysBoundaryLayer > 1)) { - // Sysboundary level 2+ cells + cuint mask = technicalGrid.get(i,j,k)->SOLVE; + + if ((mask & compute::EX) == compute::EX) { + calculateEdgeElectricFieldX( + perBGrid, + EGrid, + EHallGrid, + EGradPeGrid, + momentsGrid, + dPerBGrid, + dMomentsGrid, + BgBGrid, + technicalGrid, + i, + j, + k, + RKCase + ); + } else { sysBoundaries.getSysBoundary(cellSysBoundaryFlag)->fieldSolverBoundaryCondElectricField(EGrid, i, j, k, 0); + } + + if ((mask & compute::EY) == compute::EY) { + calculateEdgeElectricFieldY( + perBGrid, + EGrid, + EHallGrid, + EGradPeGrid, + momentsGrid, + dPerBGrid, + dMomentsGrid, + BgBGrid, + technicalGrid, + i, + j, + k, + RKCase + ); + } else { sysBoundaries.getSysBoundary(cellSysBoundaryFlag)->fieldSolverBoundaryCondElectricField(EGrid, i, j, k, 1); - sysBoundaries.getSysBoundary(cellSysBoundaryFlag)->fieldSolverBoundaryCondElectricField(EGrid, i, j, k, 2); + } + + if ((mask & compute::EZ) == compute::EZ) { + calculateEdgeElectricFieldZ( + perBGrid, + EGrid, + EHallGrid, + EGradPeGrid, + momentsGrid, + dPerBGrid, + dMomentsGrid, + BgBGrid, + technicalGrid, + i, + j, + k, + RKCase + ); } else { - // Regular cells - // OR level 1 cells whose Ex-component is adjacent to a regular cell - if((cellSysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY) || - ((cellSysBoundaryFlag != sysboundarytype::NOT_SYSBOUNDARY) && - (technicalGrid.get(i ,j-1,k )->sysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY || - technicalGrid.get(i ,j ,k-1)->sysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY || - technicalGrid.get(i ,j-1,k-1)->sysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY)) - ) { - calculateEdgeElectricFieldX( - perBGrid, - EGrid, - EHallGrid, - EGradPeGrid, - momentsGrid, - dPerBGrid, - dMomentsGrid, - BgBGrid, - technicalGrid, - i, - j, - k, - RKCase - ); - } else { - // level 1 cells whose Ex-component is not adjacent to a regular cell - sysBoundaries.getSysBoundary(cellSysBoundaryFlag)->fieldSolverBoundaryCondElectricField(EGrid, i, j, k, 0); - } - // Regular cells - // OR level 1 cells whose Ey-component is adjacent to a regular cell - if((cellSysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY) || - ((cellSysBoundaryFlag != sysboundarytype::NOT_SYSBOUNDARY) && - (technicalGrid.get(i-1,j ,k )->sysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY || - technicalGrid.get(i ,j ,k-1)->sysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY || - technicalGrid.get(i-1,j ,k-1)->sysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY)) - ) { - calculateEdgeElectricFieldY( - perBGrid, - EGrid, - EHallGrid, - EGradPeGrid, - momentsGrid, - dPerBGrid, - dMomentsGrid, - BgBGrid, - technicalGrid, - i, - j, - k, - RKCase - ); - } else { - // level 1 cells whose Ey-component is not adjacent to a regular cell - sysBoundaries.getSysBoundary(cellSysBoundaryFlag)->fieldSolverBoundaryCondElectricField(EGrid, i, j, k, 1); - } - // Regular cells - // OR level 1 cells whose Ey-component is adjacent to a regular cell - if((cellSysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY) || - ((cellSysBoundaryFlag != sysboundarytype::NOT_SYSBOUNDARY) && - (technicalGrid.get(i-1,j ,k )->sysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY || - technicalGrid.get(i ,j-1,k )->sysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY || - technicalGrid.get(i-1,j-1,k )->sysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY)) - ) { - calculateEdgeElectricFieldZ( - perBGrid, - EGrid, - EHallGrid, - EGradPeGrid, - momentsGrid, - dPerBGrid, - dMomentsGrid, - BgBGrid, - technicalGrid, - i, - j, - k, - RKCase - ); - } else { - // level 1 cells whose Ez-component is not adjacent to a regular cell - sysBoundaries.getSysBoundary(cellSysBoundaryFlag)->fieldSolverBoundaryCondElectricField(EGrid, i, j, k, 2); - } + sysBoundaries.getSysBoundary(cellSysBoundaryFlag)->fieldSolverBoundaryCondElectricField(EGrid, i, j, k, 2); } } diff --git a/fieldsolver/ldz_magnetic_field.cpp b/fieldsolver/ldz_magnetic_field.cpp index 9ea8ef1de..188029d51 100644 --- a/fieldsolver/ldz_magnetic_field.cpp +++ b/fieldsolver/ldz_magnetic_field.cpp @@ -233,27 +233,8 @@ void propagateMagneticFieldSimple( for (int k=0; kfsGridRank=technicalGrid.getRank(); - - if(technicalGrid.get(i,j,k)->sysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY) { - // Propagate B on all local cells on all faces: - propagateMagneticField(perBGrid, perBDt2Grid, EGrid, EDt2Grid, i, j, k, dt, RKCase, true, true, true); - } else { - // Easy case: in case we are neighboured by a non-sysboundary cell, we still solve the - // fields normally here. - cuint sysBoundaryLayer = technicalGrid.get(i,j,k)->sysBoundaryLayer; - if(sysBoundaryLayer == 1) { - bool prop_x = (technicalGrid.get(i-1,j,k)->sysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY); - bool prop_y = (technicalGrid.get(i,j-1,k)->sysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY); - bool prop_z = (technicalGrid.get(i,j,k-1)->sysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY); - if (prop_x || prop_y || prop_z) { - propagateMagneticField(perBGrid, perBDt2Grid, EGrid, EDt2Grid, i, j, k, dt, RKCase, prop_x, prop_y, prop_z); - } - } - } + cuint mask = technicalGrid.get(i,j,k)->SOLVE; + propagateMagneticField(perBGrid, perBDt2Grid, EGrid, EDt2Grid, i, j, k, dt, RKCase, ((mask & compute::BX) == compute::BX), ((mask & compute::BY) == compute::BY), ((mask & compute::BZ) == compute::BZ)); } } } @@ -284,23 +265,17 @@ void propagateMagneticFieldSimple( for (int k=0; ksysBoundaryFlag != sysboundarytype::NOT_SYSBOUNDARY && - technicalGrid.get(i,j,k)->sysBoundaryFlag != sysboundarytype::DO_NOT_COMPUTE) { - - cuint sysBoundaryFlag = technicalGrid.get(i,j,k)->sysBoundaryFlag; - cuint sysBoundaryLayer = technicalGrid.get(i,j,k)->sysBoundaryLayer; - - // L1 pass - if (sysBoundaryLayer == 1) { - for (uint component = 0; component < 3; component++) { - cint neigh_i=i + ((component==0)?-1:0); - cint neigh_j=j + ((component==1)?-1:0); - cint neigh_k=k + ((component==2)?-1:0); - uint neighborSysBoundaryFlag = technicalGrid.get(neigh_i, neigh_j, neigh_k)->sysBoundaryFlag; - if (neighborSysBoundaryFlag != sysboundarytype::NOT_SYSBOUNDARY) { // Complement to previous loop - propagateSysBoundaryMagneticField(perBGrid, perBDt2Grid, EGrid, EDt2Grid, technicalGrid, i, j, k, sysBoundaries, dt, RKCase, component); - } - } + cuint mask = technicalGrid.get(i,j,k)->SOLVE; + // L1 pass + if (technicalGrid.get(i,j,k)->sysBoundaryLayer == 1) { + if ((mask & compute::BX) != compute::BX) { + propagateSysBoundaryMagneticField(perBGrid, perBDt2Grid, EGrid, EDt2Grid, technicalGrid, i, j, k, sysBoundaries, dt, RKCase, 0); + } + if ((mask & compute::BY) != compute::BY) { + propagateSysBoundaryMagneticField(perBGrid, perBDt2Grid, EGrid, EDt2Grid, technicalGrid, i, j, k, sysBoundaries, dt, RKCase, 1); + } + if ((mask & compute::BZ) != compute::BZ) { + propagateSysBoundaryMagneticField(perBGrid, perBDt2Grid, EGrid, EDt2Grid, technicalGrid, i, j, k, sysBoundaries, dt, RKCase, 2); } } } @@ -313,16 +288,10 @@ void propagateMagneticFieldSimple( for (int j=0; jsysBoundaryFlag != sysboundarytype::NOT_SYSBOUNDARY && - technicalGrid.get(i,j,k)->sysBoundaryFlag != sysboundarytype::DO_NOT_COMPUTE) { - - cuint sysBoundaryFlag = technicalGrid.get(i,j,k)->sysBoundaryFlag; - cuint sysBoundaryLayer = technicalGrid.get(i,j,k)->sysBoundaryLayer; - - // L2 pass - if (sysBoundaryLayer == 2) { - for (uint component = 0; component < 3; component++) { - propagateSysBoundaryMagneticField(perBGrid, perBDt2Grid, EGrid, EDt2Grid, technicalGrid, i, j, k, sysBoundaries, dt, RKCase, component); - } + technicalGrid.get(i,j,k)->sysBoundaryLayer == 2 + ) { + for (uint component = 0; component < 3; component++) { + propagateSysBoundaryMagneticField(perBGrid, perBDt2Grid, EGrid, EDt2Grid, technicalGrid, i, j, k, sysBoundaries, dt, RKCase, component); } } } diff --git a/sysboundary/sysboundary.cpp b/sysboundary/sysboundary.cpp index 90bdc65c1..cdf13900e 100644 --- a/sysboundary/sysboundary.cpp +++ b/sysboundary/sysboundary.cpp @@ -390,6 +390,8 @@ bool SysBoundary::classifyCells(dccrg::DccrgsysBoundaryFlag = sysboundarytype::NOT_SYSBOUNDARY; technicalGrid.get(x,y,z)->sysBoundaryLayer = 0; technicalGrid.get(x,y,z)->maxFsDt = std::numeric_limits::max(); + // Set the fsgrid rank in the technical grid + technicalGrid.get(x,y,z)->fsGridRank=technicalGrid.getRank(); } } } @@ -523,6 +525,64 @@ bool SysBoundary::classifyCells(dccrg::Dccrg fsGridDimensions = {convert(P::xcells_ini) * pow(2,P::amrMaxSpatialRefLevel), + convert(P::ycells_ini) * pow(2,P::amrMaxSpatialRefLevel), + convert(P::zcells_ini) * pow(2,P::amrMaxSpatialRefLevel)}; + + // One pass to setup the bit mask to know which components the field solver should propagate. + #pragma omp parallel for collapse(3) + for (int x = 0; x < localSize[0]; ++x) { + for (int y = 0; y < localSize[1]; ++y) { + for (int z = 0; z < localSize[2]; ++z) { + technicalGrid.get(x,y,z)->SOLVE = 0; + + std::array globalIndices = technicalGrid.getGlobalIndices(x,y,z); + + if ( globalIndices[0] == 0 || globalIndices[0] == fsGridDimensions[0]-1 + || globalIndices[1] == 0 || globalIndices[1] == fsGridDimensions[1]-1 + || globalIndices[2] == 0 || globalIndices[2] == fsGridDimensions[2]-1 + ) { + continue; + } + if (technicalGrid.get(x,y,z)->sysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY) { + technicalGrid.get(x,y,z)->SOLVE = technicalGrid.get(x,y,z)->SOLVE | compute::BX; + technicalGrid.get(x,y,z)->SOLVE = technicalGrid.get(x,y,z)->SOLVE | compute::BY; + technicalGrid.get(x,y,z)->SOLVE = technicalGrid.get(x,y,z)->SOLVE | compute::BZ; + technicalGrid.get(x,y,z)->SOLVE = technicalGrid.get(x,y,z)->SOLVE | compute::EX; + technicalGrid.get(x,y,z)->SOLVE = technicalGrid.get(x,y,z)->SOLVE | compute::EY; + technicalGrid.get(x,y,z)->SOLVE = technicalGrid.get(x,y,z)->SOLVE | compute::EZ; + } else { + if (technicalGrid.get(x-1,y,z)->sysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY) { + technicalGrid.get(x,y,z)->SOLVE = technicalGrid.get(x,y,z)->SOLVE | compute::BX; + technicalGrid.get(x,y,z)->SOLVE = technicalGrid.get(x,y,z)->SOLVE | compute::EY; + technicalGrid.get(x,y,z)->SOLVE = technicalGrid.get(x,y,z)->SOLVE | compute::EZ; + } + if (technicalGrid.get(x,y-1,z)->sysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY) { + technicalGrid.get(x,y,z)->SOLVE = technicalGrid.get(x,y,z)->SOLVE | compute::BY; + technicalGrid.get(x,y,z)->SOLVE = technicalGrid.get(x,y,z)->SOLVE | compute::EX; + technicalGrid.get(x,y,z)->SOLVE = technicalGrid.get(x,y,z)->SOLVE | compute::EZ; + } + if (technicalGrid.get(x,y,z-1)->sysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY) { + technicalGrid.get(x,y,z)->SOLVE = technicalGrid.get(x,y,z)->SOLVE | compute::BZ; + technicalGrid.get(x,y,z)->SOLVE = technicalGrid.get(x,y,z)->SOLVE | compute::EX; + technicalGrid.get(x,y,z)->SOLVE = technicalGrid.get(x,y,z)->SOLVE | compute::EY; + } + if (technicalGrid.get(x-1,y-1,z)->sysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY) { + technicalGrid.get(x,y,z)->SOLVE = technicalGrid.get(x,y,z)->SOLVE | compute::EZ; + } + if (technicalGrid.get(x-1,y,z-1)->sysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY) { + technicalGrid.get(x,y,z)->SOLVE = technicalGrid.get(x,y,z)->SOLVE | compute::EY; + } + if (technicalGrid.get(x,y-1,z-1)->sysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY) { + technicalGrid.get(x,y,z)->SOLVE = technicalGrid.get(x,y,z)->SOLVE | compute::EX; + } + } + } + } + } + + technicalGrid.updateGhostCells(); + return success; } From 956cd27d9c76de4eb35085b66364cb90c31576ec Mon Sep 17 00:00:00 2001 From: ipryakem Date: Tue, 26 Nov 2019 16:30:05 +0100 Subject: [PATCH 597/602] Bit mask also in ionosphere.cpp. --- sysboundary/ionosphere.cpp | 57 ++++++++++++++++++-------------------- 1 file changed, 27 insertions(+), 30 deletions(-) diff --git a/sysboundary/ionosphere.cpp b/sysboundary/ionosphere.cpp index 91da22f4e..c3e5b6cfe 100644 --- a/sysboundary/ionosphere.cpp +++ b/sysboundary/ionosphere.cpp @@ -557,30 +557,30 @@ namespace SBC { if (technicalGrid.get(i,j,k)->sysBoundaryLayer == 1) { switch(component) { case 0: - if ( technicalGrid.get(i-1,j,k)->sysBoundaryLayer==1 && technicalGrid.get(i-2,j,k)->sysBoundaryFlag==sysboundarytype::NOT_SYSBOUNDARY - && technicalGrid.get(i+1,j,k)->sysBoundaryFlag==sysboundarytype::NOT_SYSBOUNDARY + if ( ((technicalGrid.get(i-1,j,k)->SOLVE & compute::BX) == compute::BX) + && ((technicalGrid.get(i+1,j,k)->SOLVE & compute::BX) == compute::BX) ) { return 0.5 * (bGrid->get(i-1,j,k)->at(fsgrids::bfield::PERBX) + bGrid->get(i+1,j,k)->at(fsgrids::bfield::PERBX)); - } else if (technicalGrid.get(i-1,j,k)->sysBoundaryLayer==1 && technicalGrid.get(i-2,j,k)->sysBoundaryFlag==sysboundarytype::NOT_SYSBOUNDARY) { + } else if ((technicalGrid.get(i-1,j,k)->SOLVE & compute::BX) == compute::BX) { return bGrid->get(i-1,j,k)->at(fsgrids::bfield::PERBX); - } else if (technicalGrid.get(i+1,j,k)->sysBoundaryFlag==sysboundarytype::NOT_SYSBOUNDARY) { + } else if ((technicalGrid.get(i+1,j,k)->SOLVE & compute::BX) == compute::BX) { return bGrid->get(i+1,j,k)->at(fsgrids::bfield::PERBX); } else { Real retval = 0.0; uint nCells = 0; - if (technicalGrid.get(i,j-1,k)->sysBoundaryLayer==1 && technicalGrid.get(i-1,j-1,k)->sysBoundaryFlag==sysboundarytype::NOT_SYSBOUNDARY) { + if ((technicalGrid.get(i,j-1,k)->SOLVE & compute::BX) == compute::BX) { retval += bGrid->get(i,j-1,k)->at(fsgrids::bfield::PERBX); nCells++; } - if (technicalGrid.get(i,j+1,k)->sysBoundaryLayer==1 && technicalGrid.get(i-1,j+1,k)->sysBoundaryFlag==sysboundarytype::NOT_SYSBOUNDARY) { + if ((technicalGrid.get(i,j+1,k)->SOLVE & compute::BX) == compute::BX) { retval += bGrid->get(i,j+1,k)->at(fsgrids::bfield::PERBX); nCells++; } - if (technicalGrid.get(i,j,k-1)->sysBoundaryLayer==1 && technicalGrid.get(i-1,j,k-1)->sysBoundaryFlag==sysboundarytype::NOT_SYSBOUNDARY) { + if ((technicalGrid.get(i,j,k-1)->SOLVE & compute::BX) == compute::BX) { retval += bGrid->get(i,j,k-1)->at(fsgrids::bfield::PERBX); nCells++; } - if (technicalGrid.get(i,j,k+1)->sysBoundaryLayer==1 && technicalGrid.get(i-1,j,k+1)->sysBoundaryFlag==sysboundarytype::NOT_SYSBOUNDARY) { + if ((technicalGrid.get(i,j,k+1)->SOLVE & compute::BX) == compute::BX) { retval += bGrid->get(i,j,k+1)->at(fsgrids::bfield::PERBX); nCells++; } @@ -588,8 +588,7 @@ namespace SBC { for (int a=i-1; asysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY) { + if ((technicalGrid.get(a,b,c)->SOLVE & compute::BX) == compute::BX) { retval += bGrid->get(a,b,c)->at(fsgrids::bfield::PERBX); nCells++; } @@ -604,30 +603,30 @@ namespace SBC { return retval / nCells; } case 1: - if ( technicalGrid.get(i,j-1,k)->sysBoundaryLayer==1 && technicalGrid.get(i,j-2,k)->sysBoundaryFlag==sysboundarytype::NOT_SYSBOUNDARY - && technicalGrid.get(i,j+1,k)->sysBoundaryFlag==sysboundarytype::NOT_SYSBOUNDARY + if ( (technicalGrid.get(i,j-1,k)->SOLVE & compute::BY) == compute::BY + && (technicalGrid.get(i,j+1,k)->SOLVE & compute::BY) == compute::BY ) { return 0.5 * (bGrid->get(i,j-1,k)->at(fsgrids::bfield::PERBY) + bGrid->get(i,j+1,k)->at(fsgrids::bfield::PERBY)); - } else if (technicalGrid.get(i,j-1,k)->sysBoundaryLayer==1 && technicalGrid.get(i,j-2,k)->sysBoundaryFlag==sysboundarytype::NOT_SYSBOUNDARY) { + } else if ((technicalGrid.get(i,j-1,k)->SOLVE & compute::BY) == compute::BY) { return bGrid->get(i,j-1,k)->at(fsgrids::bfield::PERBY); - } else if (technicalGrid.get(i,j+1,k)->sysBoundaryFlag==sysboundarytype::NOT_SYSBOUNDARY) { + } else if ((technicalGrid.get(i,j+1,k)->SOLVE & compute::BY) == compute::BY) { return bGrid->get(i,j+1,k)->at(fsgrids::bfield::PERBY); } else { Real retval = 0.0; uint nCells = 0; - if (technicalGrid.get(i-1,j,k)->sysBoundaryLayer==1 && technicalGrid.get(i-1,j-1,k)->sysBoundaryFlag==sysboundarytype::NOT_SYSBOUNDARY) { + if ((technicalGrid.get(i-1,j,k)->SOLVE & compute::BY) == compute::BY) { retval += bGrid->get(i-1,j,k)->at(fsgrids::bfield::PERBY); nCells++; } - if (technicalGrid.get(i+1,j,k)->sysBoundaryLayer==1 && technicalGrid.get(i+1,j-1,k)->sysBoundaryFlag==sysboundarytype::NOT_SYSBOUNDARY) { + if ((technicalGrid.get(i+1,j,k)->SOLVE & compute::BY) == compute::BY) { retval += bGrid->get(i+1,j,k)->at(fsgrids::bfield::PERBY); nCells++; } - if (technicalGrid.get(i,j,k-1)->sysBoundaryLayer==1 && technicalGrid.get(i,j-1,k-1)->sysBoundaryFlag==sysboundarytype::NOT_SYSBOUNDARY) { + if ((technicalGrid.get(i,j,k-1)->SOLVE & compute::BY) == compute::BY) { retval += bGrid->get(i,j,k-1)->at(fsgrids::bfield::PERBY); nCells++; } - if (technicalGrid.get(i,j,k+1)->sysBoundaryLayer==1 && technicalGrid.get(i,j-1,k+1)->sysBoundaryFlag==sysboundarytype::NOT_SYSBOUNDARY) { + if ((technicalGrid.get(i,j,k+1)->SOLVE & compute::BY) == compute::BY) { retval += bGrid->get(i,j,k+1)->at(fsgrids::bfield::PERBY); nCells++; } @@ -635,8 +634,7 @@ namespace SBC { for (int a=i-1; asysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY) { + if ((technicalGrid.get(a,b,c)->SOLVE & compute::BY) == compute::BY) { retval += bGrid->get(a,b,c)->at(fsgrids::bfield::PERBY); nCells++; } @@ -651,30 +649,30 @@ namespace SBC { return retval / nCells; } case 2: - if ( technicalGrid.get(i,j,k-1)->sysBoundaryLayer==1 && technicalGrid.get(i,j,k-2)->sysBoundaryFlag==sysboundarytype::NOT_SYSBOUNDARY - && technicalGrid.get(i,j,k+1)->sysBoundaryFlag==sysboundarytype::NOT_SYSBOUNDARY + if ( (technicalGrid.get(i,j,k-1)->SOLVE & compute::BZ) == compute::BZ + && (technicalGrid.get(i,j,k+1)->SOLVE & compute::BZ) == compute::BZ ) { return 0.5 * (bGrid->get(i,j,k-1)->at(fsgrids::bfield::PERBZ) + bGrid->get(i,j,k+1)->at(fsgrids::bfield::PERBZ)); - } else if (technicalGrid.get(i,j,k-1)->sysBoundaryLayer==1 && technicalGrid.get(i,j,k-2)->sysBoundaryFlag==sysboundarytype::NOT_SYSBOUNDARY) { + } else if ((technicalGrid.get(i,j,k-1)->SOLVE & compute::BZ) == compute::BZ) { return bGrid->get(i,j,k-1)->at(fsgrids::bfield::PERBZ); - } else if (technicalGrid.get(i,j,k+1)->sysBoundaryFlag==sysboundarytype::NOT_SYSBOUNDARY) { + } else if ((technicalGrid.get(i,j,k+1)->SOLVE & compute::BZ) == compute::BZ) { return bGrid->get(i,j,k+1)->at(fsgrids::bfield::PERBZ); } else { Real retval = 0.0; uint nCells = 0; - if (technicalGrid.get(i-1,j,k)->sysBoundaryLayer==1 && technicalGrid.get(i-1,j,k-1)->sysBoundaryFlag==sysboundarytype::NOT_SYSBOUNDARY) { + if ((technicalGrid.get(i-1,j,k)->SOLVE & compute::BZ) == compute::BZ) { retval += bGrid->get(i-1,j,k)->at(fsgrids::bfield::PERBZ); nCells++; } - if (technicalGrid.get(i+1,j,k)->sysBoundaryLayer==1 && technicalGrid.get(i+1,j,k-1)->sysBoundaryFlag==sysboundarytype::NOT_SYSBOUNDARY) { + if ((technicalGrid.get(i+1,j,k)->SOLVE & compute::BZ) == compute::BZ) { retval += bGrid->get(i+1,j,k)->at(fsgrids::bfield::PERBZ); nCells++; } - if (technicalGrid.get(i,j-1,k)->sysBoundaryLayer==1 && technicalGrid.get(i,j-1,k-1)->sysBoundaryFlag==sysboundarytype::NOT_SYSBOUNDARY) { + if ((technicalGrid.get(i,j-1,k)->SOLVE & compute::BZ) == compute::BZ) { retval += bGrid->get(i,j-1,k)->at(fsgrids::bfield::PERBZ); nCells++; } - if (technicalGrid.get(i,j+1,k)->sysBoundaryLayer==1 && technicalGrid.get(i,j+1,k-1)->sysBoundaryFlag==sysboundarytype::NOT_SYSBOUNDARY) { + if ((technicalGrid.get(i,j+1,k)->SOLVE & compute::BZ) == compute::BZ) { retval += bGrid->get(i,j+1,k)->at(fsgrids::bfield::PERBZ); nCells++; } @@ -682,8 +680,7 @@ namespace SBC { for (int a=i-1; asysBoundaryFlag == sysboundarytype::NOT_SYSBOUNDARY) { + if ((technicalGrid.get(a,b,c)->SOLVE & compute::BZ) == compute::BZ) { retval += bGrid->get(a,b,c)->at(fsgrids::bfield::PERBZ); nCells++; } From efe18cb26c5c75bb1731f7ba5d5a1bb9cf10c3d2 Mon Sep 17 00:00:00 2001 From: ipryakem Date: Thu, 28 Nov 2019 10:38:09 +0100 Subject: [PATCH 598/602] 2D fix. --- sysboundary/sysboundary.cpp | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/sysboundary/sysboundary.cpp b/sysboundary/sysboundary.cpp index cdf13900e..5179fcc66 100644 --- a/sysboundary/sysboundary.cpp +++ b/sysboundary/sysboundary.cpp @@ -525,9 +525,7 @@ bool SysBoundary::classifyCells(dccrg::Dccrg fsGridDimensions = {convert(P::xcells_ini) * pow(2,P::amrMaxSpatialRefLevel), - convert(P::ycells_ini) * pow(2,P::amrMaxSpatialRefLevel), - convert(P::zcells_ini) * pow(2,P::amrMaxSpatialRefLevel)}; + const std::array fsGridDimensions = technicalGrid.getGlobalSize(); // One pass to setup the bit mask to know which components the field solver should propagate. #pragma omp parallel for collapse(3) @@ -538,9 +536,9 @@ bool SysBoundary::classifyCells(dccrg::Dccrg globalIndices = technicalGrid.getGlobalIndices(x,y,z); - if ( globalIndices[0] == 0 || globalIndices[0] == fsGridDimensions[0]-1 - || globalIndices[1] == 0 || globalIndices[1] == fsGridDimensions[1]-1 - || globalIndices[2] == 0 || globalIndices[2] == fsGridDimensions[2]-1 + if ( ((globalIndices[0] == 0 || globalIndices[0] == fsGridDimensions[0]-1) && fsGridDimensions[0] > 1) + || ((globalIndices[1] == 0 || globalIndices[1] == fsGridDimensions[1]-1) && fsGridDimensions[1] > 1) + || ((globalIndices[2] == 0 || globalIndices[2] == fsGridDimensions[2]-1) && fsGridDimensions[2] > 1) ) { continue; } From 308bde682d7bccdbbeafa498cd6cb3d2d139906d Mon Sep 17 00:00:00 2001 From: ipryakem Date: Thu, 28 Nov 2019 16:10:41 +0100 Subject: [PATCH 599/602] Have distributions in layer 1 and 2 system boundaries. Hopefully solves the restart problem of fluffy ionosphere. (Run test pending but pushing this nevertheless.) --- sysboundary/outflow.cpp | 12 +-- sysboundary/sysboundary.cpp | 4 +- sysboundary/sysboundarycondition.cpp | 155 ++++++--------------------- sysboundary/sysboundarycondition.h | 2 +- vlasovsolver/cpu_moments.cpp | 21 ++-- 5 files changed, 45 insertions(+), 149 deletions(-) diff --git a/sysboundary/outflow.cpp b/sysboundary/outflow.cpp index a0d2a4dc2..1df942aae 100644 --- a/sysboundary/outflow.cpp +++ b/sysboundary/outflow.cpp @@ -461,18 +461,10 @@ namespace SBC { case vlasovscheme::NONE: break; case vlasovscheme::COPY: - if (cell->sysBoundaryLayer == 1) { - vlasovBoundaryCopyFromTheClosestNbr(mpiGrid,cellID,false,popID,calculate_V_moments); - } else { - vlasovBoundaryCopyFromTheClosestNbr(mpiGrid,cellID,true,popID,calculate_V_moments); - } + vlasovBoundaryCopyFromTheClosestNbr(mpiGrid,cellID,false,popID,calculate_V_moments); break; case vlasovscheme::LIMIT: - if (cell->sysBoundaryLayer == 1) { - vlasovBoundaryCopyFromTheClosestNbrAndLimit(mpiGrid,cellID,popID); - } else { - vlasovBoundaryCopyFromTheClosestNbr(mpiGrid,cellID,true,popID,calculate_V_moments); - } + vlasovBoundaryCopyFromTheClosestNbrAndLimit(mpiGrid,cellID,popID); break; default: std::cerr << __FILE__ << ":" << __LINE__ << "ERROR: invalid Outflow Vlasov scheme!" << std::endl; diff --git a/sysboundary/sysboundary.cpp b/sysboundary/sysboundary.cpp index 5179fcc66..e243dc2de 100644 --- a/sysboundary/sysboundary.cpp +++ b/sysboundary/sysboundary.cpp @@ -605,8 +605,6 @@ bool SysBoundary::applyInitialState( if( // This is to skip the reapplication Parameters::isRestart == true // When not restarting && (*it)->doApplyUponRestart() == false // When reapplicaiton is not requested - && (*it)->getIndex() != sysboundarytype::IONOSPHERE // But this is to force it when we have either IONOSPHERE - && (*it)->getIndex() != sysboundarytype::SET_MAXWELLIAN // or SET_MAXWELLIAN as otherwise the POP_METADA are not properly set ) { continue; } @@ -634,7 +632,7 @@ bool SysBoundary::applyInitialState( void SysBoundary::applySysBoundaryVlasovConditions( dccrg::Dccrg& mpiGrid, creal& t, - const bool calculate_V_moments // if true, compute into _V, false into _R moments so that the interpolated ones can be done for layer 1 boundaries + const bool calculate_V_moments // if true, compute into _V, false into _R moments so that the interpolated ones can be done ) { if(sysBoundaries.size()==0) { return; //no system boundaries diff --git a/sysboundary/sysboundarycondition.cpp b/sysboundary/sysboundarycondition.cpp index 326a6c9fb..2b3723455 100644 --- a/sysboundary/sysboundarycondition.cpp +++ b/sysboundary/sysboundarycondition.cpp @@ -343,15 +343,14 @@ namespace SBC { } } - /*! Function used to copy the distribution and moments from one cell to another. In layer 2, copy only the moments. + /*! Function used to copy the distribution and moments from one cell to another. * \param from Pointer to parent cell to copy from. * \param to Pointer to destination cell. - * \param allowBlockAdjustment If true, blocks can be created or destroyed. If false, only blocks existing in the destination cell are copied. */ void SysBoundaryCondition::copyCellData( SpatialCell* from, SpatialCell* to, - const bool& copyMomentsOnly, + const bool copyMomentsOnly, const uint popID, const bool calculate_V_moments ) { @@ -377,7 +376,7 @@ namespace SBC { } } - if(to->sysBoundaryLayer == 1 && !copyMomentsOnly) { // Do this only for the first layer, the other layers do not need this. Do only if copyMomentsOnly is false. + if(!copyMomentsOnly) { // Do this only if copyMomentsOnly is false. to->set_population(from->get_population(popID), popID); } else { if (calculate_V_moments) { @@ -399,8 +398,6 @@ namespace SBC { } /*! Take a list of cells and set the destination cell distribution function to the average of the list's cells'. - * For layer 1 the whole distribution function is copied. - * For layer >1, only moments are copied * \param mpiGrid Grid * \param cellList Vector of cells to copy from. * \param to Pointer to cell in which to set the averaged distribution. @@ -416,131 +413,45 @@ namespace SBC { const size_t numberOfCells = cellList.size(); creal factor = fluffiness / convert(numberOfCells); - if (to->sysBoundaryLayer == 1) { - // Rescale own vspace - const Realf* toData = to->get_data(popID); - for (vmesh::LocalID toBlockLID=0; toBlockLIDget_number_of_velocity_blocks(popID); ++toBlockLID) { - // Pointer to target block data - Realf* toData = to->get_data(toBlockLID,popID); - - // Add values from source cells - for (uint kc=0; kcparameters[CellParams::RHOM_V] *= 1.0 - fluffiness; - to->parameters[CellParams::VX_V] *= 1.0 - fluffiness; - to->parameters[CellParams::VY_V] *= 1.0 - fluffiness; - to->parameters[CellParams::VZ_V] *= 1.0 - fluffiness; - to->parameters[CellParams::RHOQ_V] *= 1.0 - fluffiness; - to->parameters[CellParams::P_11_V] *= 1.0 - fluffiness; - to->parameters[CellParams::P_22_V] *= 1.0 - fluffiness; - to->parameters[CellParams::P_33_V] *= 1.0 - fluffiness; - } else { - to->parameters[CellParams::RHOM_R] *= 1.0 - fluffiness; - to->parameters[CellParams::VX_R] *= 1.0 - fluffiness; - to->parameters[CellParams::VY_R] *= 1.0 - fluffiness; - to->parameters[CellParams::VZ_R] *= 1.0 - fluffiness; - to->parameters[CellParams::RHOQ_R] *= 1.0 - fluffiness; - to->parameters[CellParams::P_11_R] *= 1.0 - fluffiness; - to->parameters[CellParams::P_22_R] *= 1.0 - fluffiness; - to->parameters[CellParams::P_33_R] *= 1.0 - fluffiness; - } - } + + // Rescale own vspace + const Realf* toData = to->get_data(popID); + for (vmesh::LocalID toBlockLID=0; toBlockLIDget_number_of_velocity_blocks(popID); ++toBlockLID) { + // Pointer to target block data + Realf* toData = to->get_data(toBlockLID,popID); - Population & pop = to->get_population(popID); - if(calculate_V_moments) { - pop.RHO_V *= 1.0 - fluffiness; - pop.V_V[0] *= 1.0 - fluffiness; - pop.V_V[1] *= 1.0 - fluffiness; - pop.V_V[2] *= 1.0 - fluffiness; - pop.P_V[0] *= 1.0 - fluffiness; - pop.P_V[1] *= 1.0 - fluffiness; - pop.P_V[2] *= 1.0 - fluffiness; - } else { - pop.RHO_R *= 1.0 - fluffiness; - pop.V_R[0] *= 1.0 - fluffiness; - pop.V_R[1] *= 1.0 - fluffiness; - pop.V_R[2] *= 1.0 - fluffiness; - pop.P_R[0] *= 1.0 - fluffiness; - pop.P_R[1] *= 1.0 - fluffiness; - pop.P_R[2] *= 1.0 - fluffiness; + // Add values from source cells + for (uint kc=0; kcsysBoundaryLayer == 1) { - const Realf* fromData = incomingCell->get_data(popID); - for (vmesh::LocalID incBlockLID=0; incBlockLIDget_number_of_velocity_blocks(popID); ++incBlockLID) { - // Global ID of the block containing incoming data - vmesh::GlobalID incBlockGID = incomingCell->get_velocity_block_global_id(incBlockLID,popID); - - // Get local ID of the target block. If the block doesn't exist, create it. - vmesh::GlobalID toBlockLID = to->get_velocity_block_local_id(incBlockGID,popID); - if (toBlockLID == SpatialCell::invalid_local_id()) { - to->add_velocity_block(incBlockGID,popID); - toBlockLID = to->get_velocity_block_local_id(incBlockGID,popID); - } - - // Pointer to target block data - Realf* toData = to->get_data(toBlockLID,popID); - - // Add values from source cells - for (uint kc=0; kcparameters[CellParams::RHOM_V] += factor*incomingCell->parameters[CellParams::RHOM_V]; - to->parameters[CellParams::VX_V] += factor*incomingCell->parameters[CellParams::VX_V]; - to->parameters[CellParams::VY_V] += factor*incomingCell->parameters[CellParams::VY_V]; - to->parameters[CellParams::VZ_V] += factor*incomingCell->parameters[CellParams::VZ_V]; - to->parameters[CellParams::RHOQ_V] += factor*incomingCell->parameters[CellParams::RHOQ_V]; - to->parameters[CellParams::P_11_V] += factor*incomingCell->parameters[CellParams::P_11_V]; - to->parameters[CellParams::P_22_V] += factor*incomingCell->parameters[CellParams::P_22_V]; - to->parameters[CellParams::P_33_V] += factor*incomingCell->parameters[CellParams::P_33_V]; - } else { - to->parameters[CellParams::RHOM_R] += factor*incomingCell->parameters[CellParams::RHOM_V]; - to->parameters[CellParams::VX_R] += factor*incomingCell->parameters[CellParams::VX_R]; - to->parameters[CellParams::VY_R] += factor*incomingCell->parameters[CellParams::VY_R]; - to->parameters[CellParams::VZ_R] += factor*incomingCell->parameters[CellParams::VZ_R]; - to->parameters[CellParams::RHOQ_R] += factor*incomingCell->parameters[CellParams::RHOQ_R]; - to->parameters[CellParams::P_11_R] += factor*incomingCell->parameters[CellParams::P_11_R]; - to->parameters[CellParams::P_22_R] += factor*incomingCell->parameters[CellParams::P_22_R]; - to->parameters[CellParams::P_33_R] += factor*incomingCell->parameters[CellParams::P_33_R]; - } + const Realf* fromData = incomingCell->get_data(popID); + for (vmesh::LocalID incBlockLID=0; incBlockLIDget_number_of_velocity_blocks(popID); ++incBlockLID) { + // Global ID of the block containing incoming data + vmesh::GlobalID incBlockGID = incomingCell->get_velocity_block_global_id(incBlockLID,popID); + + // Get local ID of the target block. If the block doesn't exist, create it. + vmesh::GlobalID toBlockLID = to->get_velocity_block_local_id(incBlockGID,popID); + if (toBlockLID == SpatialCell::invalid_local_id()) { + to->add_velocity_block(incBlockGID,popID); + toBlockLID = to->get_velocity_block_local_id(incBlockGID,popID); } - Population & pop = to->get_population(popID); - const Population & fromPop = incomingCell->get_population(popID); - if(calculate_V_moments) { - pop.RHO_V += factor*fromPop.RHO_V ; - pop.V_V[0] += factor*fromPop.V_V[0]; - pop.V_V[1] += factor*fromPop.V_V[1]; - pop.V_V[2] += factor*fromPop.V_V[2]; - pop.P_V[0] += factor*fromPop.P_V[0]; - pop.P_V[1] += factor*fromPop.P_V[1]; - pop.P_V[2] += factor*fromPop.P_V[2]; - } else { - pop.RHO_R += factor*fromPop.RHO_R ; - pop.V_R[0] += factor*fromPop.V_R[0]; - pop.V_R[1] += factor*fromPop.V_R[1]; - pop.V_R[2] += factor*fromPop.V_R[2]; - pop.P_R[0] += factor*fromPop.P_R[0]; - pop.P_R[1] += factor*fromPop.P_R[1]; - pop.P_R[2] += factor*fromPop.P_R[2]; + // Pointer to target block data + Realf* toData = to->get_data(toBlockLID,popID); + + // Add values from source cells + for (uint kc=0; kcsysBoundaryFlag == sysboundarytype::DO_NOT_COMPUTE || - (cell->sysBoundaryLayer != 1 && - cell->sysBoundaryFlag != sysboundarytype::NOT_SYSBOUNDARY)) - ) { + if (!doNotSkip && cell->sysBoundaryFlag == sysboundarytype::DO_NOT_COMPUTE) { skipMoments = true; } @@ -161,7 +156,7 @@ void calculateMoments_R( for (size_t c=0; csysBoundaryFlag != sysboundarytype::NOT_SYSBOUNDARY && cell->sysBoundaryLayer != 1) { + if (cell->sysBoundaryFlag != sysboundarytype::DO_NOT_COMPUTE) { continue; } @@ -231,7 +226,7 @@ void calculateMoments_R( #pragma omp parallel for for (size_t c=0; csysBoundaryFlag != sysboundarytype::NOT_SYSBOUNDARY && cell->sysBoundaryLayer != 1) { + if (cell->sysBoundaryFlag != sysboundarytype::DO_NOT_COMPUTE) { continue; } cell->parameters[CellParams::VX_R] = divideIfNonZero(cell->parameters[CellParams::VX_R], cell->parameters[CellParams::RHOM_R]); @@ -250,7 +245,7 @@ void calculateMoments_R( for (size_t c=0; csysBoundaryFlag != sysboundarytype::NOT_SYSBOUNDARY && cell->sysBoundaryLayer != 1) { + if (cell->sysBoundaryFlag != sysboundarytype::DO_NOT_COMPUTE) { continue; } @@ -310,7 +305,7 @@ void calculateMoments_V( for (size_t c=0; csysBoundaryFlag != sysboundarytype::NOT_SYSBOUNDARY && cell->sysBoundaryLayer != 1) { + if (cell->sysBoundaryFlag != sysboundarytype::DO_NOT_COMPUTE) { continue; } @@ -362,7 +357,7 @@ void calculateMoments_V( #pragma omp parallel for for (size_t c=0; csysBoundaryFlag != sysboundarytype::NOT_SYSBOUNDARY && cell->sysBoundaryLayer != 1) { + if (cell->sysBoundaryFlag != sysboundarytype::DO_NOT_COMPUTE) { continue; } cell->parameters[CellParams::VX_V] = divideIfNonZero(cell->parameters[CellParams::VX_V], cell->parameters[CellParams::RHOM_V]); @@ -381,7 +376,7 @@ void calculateMoments_V( for (size_t c=0; csysBoundaryFlag != sysboundarytype::NOT_SYSBOUNDARY && cell->sysBoundaryLayer != 1) { + if (cell->sysBoundaryFlag != sysboundarytype::DO_NOT_COMPUTE) { continue; } From 221d61f63d942aac3a23f9725cc3fe1aabebea9f Mon Sep 17 00:00:00 2001 From: ipryakem Date: Mon, 2 Dec 2019 12:07:02 +0100 Subject: [PATCH 600/602] Fix blunder in moment computation changes form previous commit. --- vlasovsolver/cpu_moments.cpp | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/vlasovsolver/cpu_moments.cpp b/vlasovsolver/cpu_moments.cpp index 0402d52a1..42c24b30a 100644 --- a/vlasovsolver/cpu_moments.cpp +++ b/vlasovsolver/cpu_moments.cpp @@ -156,7 +156,7 @@ void calculateMoments_R( for (size_t c=0; csysBoundaryFlag != sysboundarytype::DO_NOT_COMPUTE) { + if (cell->sysBoundaryFlag == sysboundarytype::DO_NOT_COMPUTE) { continue; } @@ -226,7 +226,7 @@ void calculateMoments_R( #pragma omp parallel for for (size_t c=0; csysBoundaryFlag != sysboundarytype::DO_NOT_COMPUTE) { + if (cell->sysBoundaryFlag == sysboundarytype::DO_NOT_COMPUTE) { continue; } cell->parameters[CellParams::VX_R] = divideIfNonZero(cell->parameters[CellParams::VX_R], cell->parameters[CellParams::RHOM_R]); @@ -245,7 +245,7 @@ void calculateMoments_R( for (size_t c=0; csysBoundaryFlag != sysboundarytype::DO_NOT_COMPUTE) { + if (cell->sysBoundaryFlag == sysboundarytype::DO_NOT_COMPUTE) { continue; } @@ -305,7 +305,7 @@ void calculateMoments_V( for (size_t c=0; csysBoundaryFlag != sysboundarytype::DO_NOT_COMPUTE) { + if (cell->sysBoundaryFlag == sysboundarytype::DO_NOT_COMPUTE) { continue; } @@ -357,7 +357,7 @@ void calculateMoments_V( #pragma omp parallel for for (size_t c=0; csysBoundaryFlag != sysboundarytype::DO_NOT_COMPUTE) { + if (cell->sysBoundaryFlag == sysboundarytype::DO_NOT_COMPUTE) { continue; } cell->parameters[CellParams::VX_V] = divideIfNonZero(cell->parameters[CellParams::VX_V], cell->parameters[CellParams::RHOM_V]); @@ -376,7 +376,7 @@ void calculateMoments_V( for (size_t c=0; csysBoundaryFlag != sysboundarytype::DO_NOT_COMPUTE) { + if (cell->sysBoundaryFlag == sysboundarytype::DO_NOT_COMPUTE) { continue; } From 244a356867c93306c3114ed03209a7eb0ac0b03b Mon Sep 17 00:00:00 2001 From: ipryakem Date: Wed, 4 Dec 2019 09:12:22 +0100 Subject: [PATCH 601/602] mask -> bitfield (renamed) --- fieldsolver/ldz_electric_field.cpp | 8 ++++---- fieldsolver/ldz_magnetic_field.cpp | 12 ++++++------ sysboundary/sysboundary.cpp | 2 +- 3 files changed, 11 insertions(+), 11 deletions(-) diff --git a/fieldsolver/ldz_electric_field.cpp b/fieldsolver/ldz_electric_field.cpp index 35a3d64d5..e96337ddb 100644 --- a/fieldsolver/ldz_electric_field.cpp +++ b/fieldsolver/ldz_electric_field.cpp @@ -1531,9 +1531,9 @@ void calculateElectricField( if (cellSysBoundaryFlag == sysboundarytype::DO_NOT_COMPUTE) return; - cuint mask = technicalGrid.get(i,j,k)->SOLVE; + cuint bitfield = technicalGrid.get(i,j,k)->SOLVE; - if ((mask & compute::EX) == compute::EX) { + if ((bitfield & compute::EX) == compute::EX) { calculateEdgeElectricFieldX( perBGrid, EGrid, @@ -1553,7 +1553,7 @@ void calculateElectricField( sysBoundaries.getSysBoundary(cellSysBoundaryFlag)->fieldSolverBoundaryCondElectricField(EGrid, i, j, k, 0); } - if ((mask & compute::EY) == compute::EY) { + if ((bitfield & compute::EY) == compute::EY) { calculateEdgeElectricFieldY( perBGrid, EGrid, @@ -1573,7 +1573,7 @@ void calculateElectricField( sysBoundaries.getSysBoundary(cellSysBoundaryFlag)->fieldSolverBoundaryCondElectricField(EGrid, i, j, k, 1); } - if ((mask & compute::EZ) == compute::EZ) { + if ((bitfield & compute::EZ) == compute::EZ) { calculateEdgeElectricFieldZ( perBGrid, EGrid, diff --git a/fieldsolver/ldz_magnetic_field.cpp b/fieldsolver/ldz_magnetic_field.cpp index 188029d51..36ccf6d1a 100644 --- a/fieldsolver/ldz_magnetic_field.cpp +++ b/fieldsolver/ldz_magnetic_field.cpp @@ -233,8 +233,8 @@ void propagateMagneticFieldSimple( for (int k=0; kSOLVE; - propagateMagneticField(perBGrid, perBDt2Grid, EGrid, EDt2Grid, i, j, k, dt, RKCase, ((mask & compute::BX) == compute::BX), ((mask & compute::BY) == compute::BY), ((mask & compute::BZ) == compute::BZ)); + cuint bitfield = technicalGrid.get(i,j,k)->SOLVE; + propagateMagneticField(perBGrid, perBDt2Grid, EGrid, EDt2Grid, i, j, k, dt, RKCase, ((bitfield & compute::BX) == compute::BX), ((bitfield & compute::BY) == compute::BY), ((bitfield & compute::BZ) == compute::BZ)); } } } @@ -265,16 +265,16 @@ void propagateMagneticFieldSimple( for (int k=0; kSOLVE; + cuint bitfield = technicalGrid.get(i,j,k)->SOLVE; // L1 pass if (technicalGrid.get(i,j,k)->sysBoundaryLayer == 1) { - if ((mask & compute::BX) != compute::BX) { + if ((bitfield & compute::BX) != compute::BX) { propagateSysBoundaryMagneticField(perBGrid, perBDt2Grid, EGrid, EDt2Grid, technicalGrid, i, j, k, sysBoundaries, dt, RKCase, 0); } - if ((mask & compute::BY) != compute::BY) { + if ((bitfield & compute::BY) != compute::BY) { propagateSysBoundaryMagneticField(perBGrid, perBDt2Grid, EGrid, EDt2Grid, technicalGrid, i, j, k, sysBoundaries, dt, RKCase, 1); } - if ((mask & compute::BZ) != compute::BZ) { + if ((bitfield & compute::BZ) != compute::BZ) { propagateSysBoundaryMagneticField(perBGrid, perBDt2Grid, EGrid, EDt2Grid, technicalGrid, i, j, k, sysBoundaries, dt, RKCase, 2); } } diff --git a/sysboundary/sysboundary.cpp b/sysboundary/sysboundary.cpp index e243dc2de..c55050246 100644 --- a/sysboundary/sysboundary.cpp +++ b/sysboundary/sysboundary.cpp @@ -527,7 +527,7 @@ bool SysBoundary::classifyCells(dccrg::Dccrg fsGridDimensions = technicalGrid.getGlobalSize(); - // One pass to setup the bit mask to know which components the field solver should propagate. + // One pass to setup the bit field to know which components the field solver should propagate. #pragma omp parallel for collapse(3) for (int x = 0; x < localSize[0]; ++x) { for (int y = 0; y < localSize[1]; ++y) { From 4c4d41f3ef8b33577d8b19eb3f17ffa0e8928dd3 Mon Sep 17 00:00:00 2001 From: ipryakem Date: Wed, 4 Dec 2019 09:19:52 +0100 Subject: [PATCH 602/602] Urs' sugested help text for fluffiness. --- sysboundary/ionosphere.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sysboundary/ionosphere.cpp b/sysboundary/ionosphere.cpp index c3e5b6cfe..decad97ca 100644 --- a/sysboundary/ionosphere.cpp +++ b/sysboundary/ionosphere.cpp @@ -67,7 +67,7 @@ namespace SBC { Readparameters::add(pop + "_ionosphere.VX0", "Bulk velocity of ionospheric distribution function in X direction (m/s)", 0.0); Readparameters::add(pop + "_ionosphere.VY0", "Bulk velocity of ionospheric distribution function in X direction (m/s)", 0.0); Readparameters::add(pop + "_ionosphere.VZ0", "Bulk velocity of ionospheric distribution function in X direction (m/s)", 0.0); - Readparameters::add(pop + "_ionosphere.fluffiness", "Weight of boundary (0) vs. average of NOT_SYSBOUNDARY neighbor's (1) moments and velocity distribution.", 0); + Readparameters::add(pop + "_ionosphere.fluffiness", "Inertia of boundary smoothing when copying neighbour's moments and velocity distributions (0=completely constant boundaries, 1=neighbours are interpolated immediately).", 0); } }