Fix incorrect PML material property averages (MPI)

这个提交包含在:
nmannall
2024-07-02 17:11:25 +01:00
父节点 5938579516
当前提交 7078bbebf3
共有 4 个文件被更改,包括 157 次插入20 次删除

查看文件

@@ -60,3 +60,39 @@ cpdef pml_average_er_mr(
averagemr = summr / (n1 * n2) averagemr = summr / (n1 * n2)
return averageer, averagemr return averageer, averagemr
cpdef pml_sum_er_mr(
int n1,
int n2,
int nthreads,
np.uint32_t[:, :] solid,
float_or_double[::1] ers,
float_or_double[::1] mrs
):
"""Calculates average permittivity and permeability in PML slab (based on
underlying material er and mr from solid array). Used to build PML.
Args:
n1, n2: ints for PML size in cells perpendicular to thickness direction.
nthreads: int for number of threads to use.
solid: memoryviews to access solid array.
ers, mrs: memoryviews to access arrays containing permittivity and
permeability.
Returns:
averageer, averagemr: floats for average permittivity and permeability
in PML slab.
"""
cdef Py_ssize_t m, n
cdef int numID
# Sum and average of relative permittivities and permeabilities in PML slab
cdef float sumer, summr, averageer, averagemr
for m in prange(n1, nogil=True, schedule='static', num_threads=nthreads):
for n in range(n2):
numID = solid[m ,n]
sumer += ers[numID]
summr += mrs[numID]
return sumer, summr

查看文件

@@ -152,7 +152,7 @@ class FDTDGrid:
self.initialise_dispersive_update_coeff_array() self.initialise_dispersive_update_coeff_array()
self._build_materials() self._build_materials()
def _build_pmls(self) -> None: def _build_pmls(self, build_pml_func=build_pml) -> None:
pbar = tqdm( pbar = tqdm(
total=sum(1 for value in self.pmls["thickness"].values() if value > 0), total=sum(1 for value in self.pmls["thickness"].values() if value > 0),
desc=f"Building PML boundaries [{self.name}]", desc=f"Building PML boundaries [{self.name}]",
@@ -162,7 +162,9 @@ class FDTDGrid:
) )
for pml_id, thickness in self.pmls["thickness"].items(): for pml_id, thickness in self.pmls["thickness"].items():
if thickness > 0: if thickness > 0:
build_pml(self, pml_id, thickness) # TODO: Consider making this a method of FDTDGrid (that
# can be overriden in MPIGrid)
build_pml_func(self, pml_id, thickness)
pbar.update() pbar.update()
pbar.close() pbar.close()

查看文件

@@ -27,6 +27,7 @@ from mpi4py import MPI
from numpy import ndarray from numpy import ndarray
from gprMax.grid.fdtd_grid import FDTDGrid from gprMax.grid.fdtd_grid import FDTDGrid
from gprMax.pml import build_pml_mpi
from gprMax.receivers import Rx from gprMax.receivers import Rx
from gprMax.sources import Source from gprMax.sources import Source
@@ -66,6 +67,7 @@ class MPIGrid(FDTDGrid):
self.lower_extent: npt.NDArray[np.intc] = np.zeros(3, dtype=int) self.lower_extent: npt.NDArray[np.intc] = np.zeros(3, dtype=int)
self.upper_extent: npt.NDArray[np.intc] = np.zeros(3, dtype=int) self.upper_extent: npt.NDArray[np.intc] = np.zeros(3, dtype=int)
self.negative_halo_offset: npt.NDArray[np.intc] = np.zeros(3, dtype=int)
self.global_size: npt.NDArray[np.intc] = np.zeros(3, dtype=int) self.global_size: npt.NDArray[np.intc] = np.zeros(3, dtype=int)
self.neighbours = np.full((3, 2), -1, dtype=int) self.neighbours = np.full((3, 2), -1, dtype=int)
@@ -260,6 +262,9 @@ class MPIGrid(FDTDGrid):
self._halo_swap_array(self.Hy) self._halo_swap_array(self.Hy)
self._halo_swap_array(self.Hz) self._halo_swap_array(self.Hz)
def _build_pmls(self):
super()._build_pmls(build_pml_func=build_pml_mpi)
def build(self): def build(self):
if any(self.global_size + 1 < self.mpi_tasks): if any(self.global_size + 1 < self.mpi_tasks):
logger.error( logger.error(
@@ -329,6 +334,7 @@ class MPIGrid(FDTDGrid):
# a positive halo will always exist. Grids not needing a # a positive halo will always exist. Grids not needing a
# positive halo, still need the extra size as that makes the # positive halo, still need the extra size as that makes the
# global grid on the whole one larger than the user dimensions. # global grid on the whole one larger than the user dimensions.
self.size += self.neighbours[:, 0] >= 0 self.negative_halo_offset = self.neighbours[:, 0] >= 0
self.lower_extent -= self.neighbours[:, 0] >= 0 self.size += self.negative_halo_offset
self.lower_extent -= self.negative_halo_offset
self.upper_extent = self.lower_extent + self.size self.upper_extent = self.lower_extent + self.size

查看文件

@@ -24,7 +24,7 @@ from mpi4py import MPI
import gprMax.config as config import gprMax.config as config
from .cython.pml_build import pml_average_er_mr from .cython.pml_build import pml_average_er_mr, pml_sum_er_mr
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@@ -795,22 +795,115 @@ def build_pml(G, pml_ID, thickness):
G.nx, G.ny, config.get_model_config().ompthreads, G.solid[:, :, pml.zs], ers, mrs G.nx, G.ny, config.get_model_config().ompthreads, G.solid[:, :, pml.zs], ers, mrs
) )
# TODO: Use Grid type (currently will cause a circular dependency) pml.calculate_update_coeffs(averageer, averagemr)
if config.sim_config.mpi: G.pmls["slabs"].append(pml)
if pml_ID[0] == "x":
comm = G.x_comm
elif pml_ID[0] == "y":
comm = G.y_comm
elif pml_ID[0] == "z":
comm = G.z_comm
comm.barrier()
print(f"[Rank {G.rank}] pml_ID: {pml_ID}, er: {averageer}, mr: {averagemr}") def build_pml_mpi(G, pml_ID, thickness):
averageer = comm.allreduce(averageer, MPI.SUM) / comm.size """Builds instances of the PML and calculates the initial parameters and
averagemr = comm.allreduce(averagemr, MPI.SUM) / comm.size coefficients including setting profile (based on underlying material
print(f"[Rank {G.rank}] NEW pml_ID: {pml_ID}, er: {averageer}, mr {averagemr}") er and mr from solid array).
else:
print(f"pml_ID: {pml_ID}, er: {averageer}, mr {averagemr}") Args:
G: FDTDGrid class describing a grid in a model.
pml_ID: string identifier of PML slab.
thickness: int with thickness of PML slab in cells.
"""
# Arrays to hold values of permittivity and permeability (avoids accessing
# Material class in Cython.)
ers = np.zeros(len(G.materials))
mrs = np.zeros(len(G.materials))
for i, m in enumerate(G.materials):
ers[i] = m.er
mrs[i] = m.mr
if config.sim_config.general["solver"] == "cpu":
pml_type = PML
elif config.sim_config.general["solver"] == "cuda":
pml_type = CUDAPML
elif config.sim_config.general["solver"] == "opencl":
pml_type = OpenCLPML
if pml_ID == "x0":
pml = pml_type(
G, ID=pml_ID, direction="xminus", xs=0, xf=thickness, ys=0, yf=G.ny, zs=0, zf=G.nz
)
elif pml_ID == "xmax":
pml = pml_type(
G,
ID=pml_ID,
direction="xplus",
xs=G.nx - thickness,
xf=G.nx,
ys=0,
yf=G.ny,
zs=0,
zf=G.nz,
)
elif pml_ID == "y0":
pml = pml_type(
G, ID=pml_ID, direction="yminus", xs=0, xf=G.nx, ys=0, yf=thickness, zs=0, zf=G.nz
)
elif pml_ID == "ymax":
pml = pml_type(
G,
ID=pml_ID,
direction="yplus",
xs=0,
xf=G.nx,
ys=G.ny - thickness,
yf=G.ny,
zs=0,
zf=G.nz,
)
elif pml_ID == "z0":
pml = pml_type(
G, ID=pml_ID, direction="zminus", xs=0, xf=G.nx, ys=0, yf=G.ny, zs=0, zf=thickness
)
elif pml_ID == "zmax":
pml = pml_type(
G,
ID=pml_ID,
direction="zplus",
xs=0,
xf=G.nx,
ys=0,
yf=G.ny,
zs=G.nz - thickness,
zf=G.nz,
)
# Need to account for the negative halo (remove it) to avoid double
# counting. The solid array does not have a positive halo so we
# don't need to consider that.
if pml_ID[0] == "x":
o1 = G.negative_halo_offset[1]
o2 = G.negative_halo_offset[2]
n1 = G.ny - o1
n2 = G.nz - o2
solid = G.solid[pml.xs, o1:, o2:]
comm = G.x_comm
elif pml_ID[0] == "y":
o1 = G.negative_halo_offset[0]
o2 = G.negative_halo_offset[2]
n1 = G.nx - o1
n2 = G.nz - o2
solid = G.solid[o1:, pml.ys, o2:]
comm = G.y_comm
elif pml_ID[0] == "z":
o1 = G.negative_halo_offset[0]
o2 = G.negative_halo_offset[1]
n1 = G.nx - o1
n2 = G.ny - o2
solid = G.solid[o1:, o2:, pml.zs]
comm = G.z_comm
sumer, summr = pml_sum_er_mr(n1, n2, config.get_model_config().ompthreads, solid, ers, mrs)
n = comm.allreduce(n1 * n2, MPI.SUM)
averageer = comm.allreduce(sumer, MPI.SUM) / n
averagemr = comm.allreduce(summr, MPI.SUM) / n
pml.calculate_update_coeffs(averageer, averagemr) pml.calculate_update_coeffs(averageer, averagemr)
G.pmls["slabs"].append(pml) G.pmls["slabs"].append(pml)