Merge branch '30-parallel-build' into mpi

这个提交包含在:
Nathan Mannall
2025-03-18 11:07:08 +00:00
当前提交 7c2fbf58d1
共有 119 个文件被更改,包括 4065 次插入1387 次删除

查看文件

@@ -40,13 +40,12 @@ from .user_objects.cmds_multiuse import (
MaterialRange,
Rx,
RxArray,
Snapshot,
SoilPeplinski,
TransmissionLine,
VoltageSource,
Waveform,
)
from .user_objects.cmds_output import GeometryObjectsWrite, GeometryView
from .user_objects.cmds_output import GeometryObjectsWrite, GeometryView, Snapshot
from .user_objects.cmds_singleuse import (
Discretisation,
Domain,

查看文件

@@ -166,13 +166,12 @@ class MPIContext(Context):
self.comm = MPI.COMM_WORLD
self.rank = self.comm.rank
requested_mpi_size = np.product(config.sim_config.mpi)
requested_mpi_size = np.prod(config.sim_config.mpi)
if self.comm.size < requested_mpi_size:
logger.error(
raise ValueError(
f"MPI_COMM_WORLD size of {self.comm.size} is too small for requested dimensions of"
f" {config.sim_config.mpi}. {requested_mpi_size} ranks are required."
)
raise ValueError
if self.rank >= requested_mpi_size:
logger.warning(
@@ -186,7 +185,11 @@ class MPIContext(Context):
return MPIModel()
def run(self) -> Dict:
return super().run()
try:
return super().run()
except:
logger.exception(f"Rank {self.rank} encountered an error. Aborting...")
self.comm.Abort()
def _run_model(self, model_num: int) -> None:
"""Process for running a single model.
@@ -201,9 +204,8 @@ class MPIContext(Context):
if not model_config.reuse_geometry():
model = self._create_model()
if model.is_coordinator():
scene = self._get_scene(model_num)
scene.create_internal_objects(model)
scene = self._get_scene(model_num)
scene.create_internal_objects(model)
model.build()

查看文件

@@ -45,7 +45,7 @@ cpdef get_line_properties(
material IDs for each line.
"""
cdef np.ndarray material_data = np.zeros(number_of_lines, dtype=np.uint32)
cdef np.ndarray connectivity = np.zeros(2 * number_of_lines, dtype=np.intc)
cdef np.ndarray connectivity = np.zeros(2 * number_of_lines, dtype=np.int32)
cdef int line_index = 0
cdef int connectivity_index = 0
cdef int point_id = 0

查看文件

@@ -111,9 +111,19 @@ cpdef bint is_inside_sector(
relpoint1 = px - ctrx
relpoint2 = py - ctry
return (not are_clockwise(sectorstart1, sectorstart2, relpoint1, relpoint2)
and are_clockwise(sectorend1, sectorend2, relpoint1, relpoint2)
and is_within_radius(relpoint1, relpoint2, radius))
if sectorangle <= np.pi:
return (
not are_clockwise(sectorstart1, sectorstart2, relpoint1, relpoint2)
and are_clockwise(sectorend1, sectorend2, relpoint1, relpoint2)
and is_within_radius(relpoint1, relpoint2, radius)
)
else:
return (
(
not are_clockwise(sectorstart1, sectorstart2, relpoint1, relpoint2)
or are_clockwise(sectorend1, sectorend2, relpoint1, relpoint2)
) and is_within_radius(relpoint1, relpoint2, radius)
)
cpdef bint point_in_polygon(
@@ -435,6 +445,12 @@ cpdef void build_triangle(
j2 = round_value(np.amax([z1, z2, z3]) / dz) + 1
levelcells = round_value(x1 / dx)
thicknesscells = round_value(thickness / dx)
# Bound to the size of the grid
if i2 > solid.shape[1]:
i2 = solid.shape[1]
if j2 > solid.shape[2]:
j2 = solid.shape[2]
elif normal == 'y':
area = 0.5 * (-z2 * x3 + z1 * (-x2 + x3) + x1 * (z2 - z3) + x2 * z3)
i1 = round_value(np.amin([x1, x2, x3]) / dx) - 1
@@ -443,6 +459,12 @@ cpdef void build_triangle(
j2 = round_value(np.amax([z1, z2, z3]) / dz) + 1
levelcells = round_value(y1 /dy)
thicknesscells = round_value(thickness / dy)
# Bound to the size of the grid
if i2 > solid.shape[0]:
i2 = solid.shape[0]
if j2 > solid.shape[2]:
j2 = solid.shape[2]
elif normal == 'z':
area = 0.5 * (-y2 * x3 + y1 * (-x2 + x3) + x1 * (y2 - y3) + x2 * y3)
i1 = round_value(np.amin([x1, x2, x3]) / dx) - 1
@@ -452,6 +474,18 @@ cpdef void build_triangle(
levelcells = round_value(z1 / dz)
thicknesscells = round_value(thickness / dz)
# Bound to the size of the grid
if i2 > solid.shape[0]:
i2 = solid.shape[0]
if j2 > solid.shape[1]:
j2 = solid.shape[1]
# Bound to the start of the grid
if i1 < 0:
i1 = 0
if j1 < 0:
j1 = 0
sign = np.sign(area)
for i in range(i1, i2):
@@ -963,7 +997,7 @@ cpdef void build_cone(
"""
cdef Py_ssize_t i, j, k
cdef int xs, xf, ys, yf, zs, zf, xc, yc, zc
cdef int xs, xf, ys, yf, zs, zf, xs_bound, xf_bound, ys_bound, yf_bound, zs_bound, zf_bound
cdef float f1f2mag, f2f1mag, f1ptmag, f2ptmag, dot1, dot2, factor1, factor2
cdef float theta1, theta2, distance1, distance2, R1, R2
cdef float height, distance_axis_1, distance_axis_2
@@ -1034,41 +1068,48 @@ cpdef void build_cone(
zs = round_value((z2 - Rmax) / dz) - 1
zf = round_value((z1 + Rmax) / dz) + 1
xs_bound = xs
xf_bound = xf
ys_bound = ys
yf_bound = yf
zs_bound = zs
zf_bound = zf
# Set bounds to domain if they outside
if xs < 0:
xs = 0
if xf > solid.shape[0]:
xf = solid.shape[0]
if ys < 0:
ys = 0
if yf > solid.shape[1]:
yf = solid.shape[1]
if zs < 0:
zs = 0
if zf > solid.shape[2]:
zf = solid.shape[2]
if xs_bound < 0:
xs_bound = 0
if xf_bound > solid.shape[0]:
xf_bound = solid.shape[0]
if ys_bound < 0:
ys_bound = 0
if yf_bound > solid.shape[1]:
yf_bound = solid.shape[1]
if zs_bound < 0:
zs_bound = 0
if zf_bound > solid.shape[2]:
zf_bound = solid.shape[2]
# x-aligned cone
if x_align:
for j in range(ys, yf):
for k in range(zs, zf):
for i in range(xs, xf):
if np.sqrt((j * dy + 0.5 * dy - y1)**2 + (k * dz + 0.5 * dz - z1)**2) <= ((i-xs)/(xf-xs))*(r2-r1) + r1:
for j in range(ys_bound, yf_bound):
for k in range(zs_bound, zf_bound):
for i in range(xs_bound, xf_bound):
if np.sqrt((j * dy + 0.5 * dy - y1)**2 + (k * dz + 0.5 * dz - z1)**2) <= ((i- xs)/(xf-xs))*(r2-r1) + r1:
build_voxel(i, j, k, numID, numIDx, numIDy, numIDz,
averaging, solid, rigidE, rigidH, ID)
# y-aligned cone
elif y_align:
for i in range(xs, xf):
for k in range(zs, zf):
for j in range(ys, yf):
for i in range(xs_bound, xf_bound):
for k in range(zs_bound, zf_bound):
for j in range(ys_bound, yf_bound):
if np.sqrt((i * dx + 0.5 * dx - x1)**2 + (k * dz + 0.5 * dz - z1)**2) <= ((j-ys)/(yf-ys))*(r2-r1) + r1:
build_voxel(i, j, k, numID, numIDx, numIDy, numIDz,
averaging, solid, rigidE, rigidH, ID)
# z-aligned cone
elif z_align:
for i in range(xs, xf):
for j in range(ys, yf):
for k in range(zs, zf):
for i in range(xs_bound, xf_bound):
for j in range(ys_bound, yf_bound):
for k in range(zs_bound, zf_bound):
if np.sqrt((i * dx + 0.5 * dx - x1)**2 + (j * dy + 0.5 * dy - y1)**2) <= ((k-zs)/(zf-zs))*(r2-r1) + r1:
build_voxel(i, j, k, numID, numIDx, numIDy, numIDz,
averaging, solid, rigidE, rigidH, ID)
@@ -1085,9 +1126,9 @@ cpdef void build_cone(
height = f1f2mag
for i in range(xs, xf):
for j in range(ys, yf):
for k in range(zs, zf):
for i in range(xs_bound, xf_bound):
for j in range(ys_bound, yf_bound):
for k in range(zs_bound, zf_bound):
# Build flag - default false, set to True if point is in cone
build = 0
# Vector from centre of first cone face to test point

查看文件

@@ -101,9 +101,9 @@ class GeometryView(ABC):
grid: FDTDGrid class describing a grid in a model.
"""
self.start = np.array([xs, ys, zs], dtype=np.intc)
self.stop = np.array([xf, yf, zf], dtype=np.intc)
self.step = np.array([dx, dy, dz], dtype=np.intc)
self.start = np.array([xs, ys, zs], dtype=np.int32)
self.stop = np.array([xf, yf, zf], dtype=np.int32)
self.step = np.array([dx, dy, dz], dtype=np.int32)
self.size = (self.stop - self.start) // self.step
self.filename = Path(filename)
@@ -224,7 +224,7 @@ class GeometryViewLines(GeometryView):
n_lines = 3 * self.nx * self.ny * self.nz
self.cell_types = np.full(n_lines, VtkCellType.LINE)
self.cell_offsets = np.arange(0, 2 * n_lines + 2, 2, dtype=np.intc)
self.cell_offsets = np.arange(0, 2 * n_lines + 2, 2, dtype=np.int32)
self.connectivity, self.material_data = get_line_properties(
n_lines, self.nx, self.ny, self.nz, ID
@@ -330,7 +330,7 @@ class Metadata:
self.gprmax_version = __version__
self.dx_dy_dz = self.grid.dl
self.nx_ny_nz = np.array([self.grid.nx, self.grid.ny, self.grid.nz], dtype=np.intc)
self.nx_ny_nz = np.array([self.grid.nx, self.grid.ny, self.grid.nz], dtype=np.int32)
self.materials = self.materials_comment()
@@ -427,8 +427,8 @@ class Metadata:
def dx_dy_dz_comment(self) -> npt.NDArray[np.float64]:
return self.grid.dl
def nx_ny_nz_comment(self) -> npt.NDArray[np.intc]:
return np.array([self.grid.nx, self.grid.ny, self.grid.nz], dtype=np.intc)
def nx_ny_nz_comment(self) -> npt.NDArray[np.int32]:
return np.array([self.grid.nx, self.grid.ny, self.grid.nz], dtype=np.int32)
def materials_comment(self) -> List[str]:
if not self.averaged_materials:

查看文件

@@ -21,7 +21,7 @@ import itertools
import logging
import sys
from collections import OrderedDict
from typing import Any, Iterable, List, Tuple, Union
from typing import Any, Iterable, List, Literal, Tuple, Union
import numpy as np
import numpy.typing as npt
@@ -36,7 +36,6 @@ from gprMax.fractals import FractalVolume
from gprMax.materials import ListMaterial, Material, PeplinskiSoil, RangeMaterial, process_materials
from gprMax.pml import CFS, PML, print_pml_info
from gprMax.receivers import Rx
from gprMax.snapshots import Snapshot
from gprMax.sources import HertzianDipole, MagneticDipole, Source, TransmissionLine, VoltageSource
from gprMax.utilities.utilities import fft_power, get_terminal_width, round_value
from gprMax.waveforms import Waveform
@@ -55,9 +54,7 @@ class FDTDGrid:
self.name = "main_grid"
self.mem_use = 0
self.nx = 0
self.ny = 0
self.nz = 0
self.size = np.zeros(3, dtype=np.int32)
self.dl = np.ones(3, dtype=np.float64)
self.dt = 0.0
@@ -98,7 +95,8 @@ class FDTDGrid:
# itself does not matter, however, if must be the same from model to
# model otherwise the numerical precision from adding the PML
# corrections will be different.
self.pmls["thickness"] = OrderedDict((key, 10) for key in PML.boundaryIDs)
self.pmls["thickness"] = OrderedDict()
self.set_pml_thickness(10)
# Materials used by this grid
self.materials: List[Material] = []
@@ -112,10 +110,34 @@ class FDTDGrid:
self.magneticdipoles: List[MagneticDipole] = []
self.transmissionlines: List[TransmissionLine] = []
self.rxs: List[Rx] = []
self.snapshots: List[Snapshot] = []
self.snapshots = [] # List[Snapshot]
self.averagevolumeobjects = True
@property
def nx(self) -> int:
return self.size[0]
@nx.setter
def nx(self, value: int):
self.size[0] = value
@property
def ny(self) -> int:
return self.size[1]
@ny.setter
def ny(self, value: int):
self.size[1] = value
@property
def nz(self) -> int:
return self.size[2]
@nz.setter
def nz(self, value: int):
self.size[2] = value
@property
def dx(self) -> float:
return self.dl[0]
@@ -140,6 +162,33 @@ class FDTDGrid:
def dz(self, value: float):
self.dl[2] = value
def set_pml_thickness(self, thickness: Union[int, Tuple[int, int, int, int, int, int]]):
if isinstance(thickness, int) or len(thickness) == 1:
for key in PML.boundaryIDs:
self.pmls["thickness"][key] = int(thickness)
elif len(thickness) == 6:
self.pmls["thickness"]["x0"] = int(thickness[0])
self.pmls["thickness"]["y0"] = int(thickness[1])
self.pmls["thickness"]["z0"] = int(thickness[2])
self.pmls["thickness"]["xmax"] = int(thickness[3])
self.pmls["thickness"]["ymax"] = int(thickness[4])
self.pmls["thickness"]["zmax"] = int(thickness[5])
def add_source(self, source: Source):
if isinstance(source, VoltageSource):
self.voltagesources.append(source)
elif isinstance(source, HertzianDipole):
self.hertziandipoles.append(source)
elif isinstance(source, MagneticDipole):
self.magneticdipoles.append(source)
elif isinstance(source, TransmissionLine):
self.transmissionlines.append(source)
else:
raise TypeError(f"Source of type '{type(source)}' is unknown to gprMax")
def add_receiver(self, receiver: Rx):
self.rxs.append(receiver)
def build(self) -> None:
"""Build the grid."""
@@ -435,15 +484,15 @@ class FDTDGrid:
logger.exception("Receiver(s) will be stepped to a position outside the domain.")
raise ValueError from e
IntPoint = Tuple[int, int, int]
FloatPoint = Tuple[float, float, float]
def within_bounds(self, p: IntPoint):
def within_bounds(self, p: npt.NDArray[np.int32]) -> bool:
"""Check a point is within the grid.
Args:
p: Point to check.
Returns:
within_bounds: True if the point is within the grid bounds.
Raises:
ValueError: Raised if the point is outside the grid.
"""
@@ -454,7 +503,9 @@ class FDTDGrid:
if p[2] < 0 or p[2] > self.nz:
raise ValueError("z")
def discretise_point(self, p: FloatPoint) -> IntPoint:
return True
def discretise_point(self, p: Tuple[float, float, float]) -> Tuple[int, int, int]:
"""Calculate the nearest grid cell to the given point.
Args:
@@ -468,7 +519,7 @@ class FDTDGrid:
z = round_value(float(p[2]) / self.dz)
return (x, y, z)
def round_to_grid(self, p: FloatPoint) -> FloatPoint:
def round_to_grid(self, p: Tuple[float, float, float]) -> Tuple[float, float, float]:
"""Round the provided point to the nearest grid cell.
Args:
@@ -481,7 +532,7 @@ class FDTDGrid:
p_r = (p[0] * self.dx, p[1] * self.dy, p[2] * self.dz)
return p_r
def within_pml(self, p: IntPoint) -> bool:
def within_pml(self, p: npt.NDArray[np.int32]) -> bool:
"""Check if the provided point is within a PML.
Args:

查看文件

@@ -31,7 +31,6 @@ from gprMax.cython.pml_build import pml_sum_er_mr
from gprMax.grid.fdtd_grid import FDTDGrid
from gprMax.pml import MPIPML, PML
from gprMax.receivers import Rx
from gprMax.snapshots import MPISnapshot, Snapshot
from gprMax.sources import Source
logger = logging.getLogger(__name__)
@@ -57,22 +56,18 @@ class MPIGrid(FDTDGrid):
COORDINATOR_RANK = 0
def __init__(self, comm: MPI.Cartcomm):
self.size = np.zeros(3, dtype=np.intc)
super().__init__()
self.comm = comm
self.x_comm = comm.Sub([False, True, True])
self.y_comm = comm.Sub([True, False, True])
self.z_comm = comm.Sub([True, True, False])
self.pml_comm = MPI.COMM_NULL
self.mpi_tasks = np.array(self.comm.dims, dtype=np.intc)
self.mpi_tasks = np.array(self.comm.dims, dtype=np.int32)
self.lower_extent = np.zeros(3, dtype=np.intc)
self.upper_extent = np.zeros(3, dtype=np.intc)
self.lower_extent = np.zeros(3, dtype=np.int32)
self.upper_extent = np.zeros(3, dtype=np.int32)
self.negative_halo_offset = np.zeros(3, dtype=np.bool_)
self.global_size = np.zeros(3, dtype=np.intc)
self.global_size = np.zeros(3, dtype=np.int32)
self.neighbours = np.full((3, 2), -1, dtype=int)
self.neighbours[Dim.X] = self.comm.Shift(direction=Dim.X, disp=1)
@@ -82,6 +77,8 @@ class MPIGrid(FDTDGrid):
self.send_halo_map = np.empty((3, 2), dtype=MPI.Datatype)
self.recv_halo_map = np.empty((3, 2), dtype=MPI.Datatype)
super().__init__()
@property
def rank(self) -> int:
return self.comm.Get_rank()
@@ -91,28 +88,45 @@ class MPIGrid(FDTDGrid):
return self.comm.coords
@property
def nx(self) -> int:
return self.size[Dim.X]
def gx(self) -> int:
return self.global_size[Dim.X]
@nx.setter
def nx(self, value: int):
self.size[Dim.X] = value
@gx.setter
def gx(self, value: int):
self.global_size[Dim.X] = value
@property
def ny(self) -> int:
return self.size[Dim.Y]
def gy(self) -> int:
return self.global_size[Dim.Y]
@ny.setter
def ny(self, value: int):
self.size[Dim.Y] = value
@gy.setter
def gy(self, value: int):
self.global_size[Dim.Y] = value
@property
def nz(self) -> int:
return self.size[Dim.Z]
def gz(self) -> int:
return self.global_size[Dim.Z]
@nz.setter
def nz(self, value: int):
self.size[Dim.Z] = value
@gz.setter
def gz(self, value: int):
self.global_size[Dim.Z] = value
def set_pml_thickness(self, thickness: Union[int, Tuple[int, int, int, int, int, int]]):
super().set_pml_thickness(thickness)
# Set PML thickness to zero if not at the edge of the domain
if self.has_neighbour(Dim.X, Dir.NEG):
self.pmls["thickness"]["x0"] = 0
if self.has_neighbour(Dim.X, Dir.POS):
self.pmls["thickness"]["xmax"] = 0
if self.has_neighbour(Dim.Y, Dir.NEG):
self.pmls["thickness"]["y0"] = 0
if self.has_neighbour(Dim.Y, Dir.POS):
self.pmls["thickness"]["ymax"] = 0
if self.has_neighbour(Dim.Z, Dir.NEG):
self.pmls["thickness"]["z0"] = 0
if self.has_neighbour(Dim.Z, Dir.POS):
self.pmls["thickness"]["zmax"] = 0
def is_coordinator(self) -> bool:
"""Test if the current rank is the coordinator.
@@ -123,7 +137,30 @@ class MPIGrid(FDTDGrid):
"""
return self.rank == self.COORDINATOR_RANK
def get_grid_coord_from_coordinate(self, coord: npt.NDArray[np.intc]) -> npt.NDArray[np.intc]:
def create_sub_communicator(
self, local_start: npt.NDArray[np.int32], local_stop: npt.NDArray[np.int32]
) -> Optional[MPI.Cartcomm]:
if self.local_bounds_overlap_grid(local_start, local_stop):
comm = self.comm.Split()
assert isinstance(comm, MPI.Intracomm)
start_grid_coord = self.get_grid_coord_from_local_coordinate(local_start)
# Subtract 1 from local_stop as the upper extent is
# exclusive meaning the last coordinate included in the sub
# communicator is actually (local_stop - 1).
stop_grid_coord = self.get_grid_coord_from_local_coordinate(local_stop - 1) + 1
comm = comm.Create_cart((stop_grid_coord - start_grid_coord).tolist())
return comm
else:
self.comm.Split(MPI.UNDEFINED)
return None
def get_grid_coord_from_local_coordinate(
self, local_coord: npt.NDArray[np.int32]
) -> npt.NDArray[np.int32]:
coord = self.local_to_global_coordinate(local_coord)
return self.get_grid_coord_from_coordinate(coord)
def get_grid_coord_from_coordinate(self, coord: npt.NDArray[np.int32]) -> npt.NDArray[np.int32]:
"""Get the MPI grid coordinate for a global grid coordinate.
Args:
@@ -183,8 +220,8 @@ class MPIGrid(FDTDGrid):
return [coord_to_rank(coord) for coord in np.ndindex(*(stop - start))]
def global_to_local_coordinate(
self, global_coord: npt.NDArray[np.intc]
) -> npt.NDArray[np.intc]:
self, global_coord: npt.NDArray[np.int32]
) -> npt.NDArray[np.int32]:
"""Convert a global grid coordinate to a local grid coordinate.
The returned coordinate will be relative to the current MPI
@@ -199,7 +236,9 @@ class MPIGrid(FDTDGrid):
"""
return global_coord - self.lower_extent
def local_to_global_coordinate(self, local_coord: npt.NDArray[np.intc]) -> npt.NDArray[np.intc]:
def local_to_global_coordinate(
self, local_coord: npt.NDArray[np.int32]
) -> npt.NDArray[np.int32]:
"""Convert a local grid coordinate to a global grid coordinate.
Args:
@@ -211,7 +250,7 @@ class MPIGrid(FDTDGrid):
return local_coord + self.lower_extent
def global_coord_inside_grid(
self, global_coord: npt.NDArray[np.intc], allow_inside_halo: bool = False
self, global_coord: npt.NDArray[np.int32], allow_inside_halo: bool = False
) -> bool:
"""Check if a global coordinate falls with in the local grid.
@@ -235,19 +274,17 @@ class MPIGrid(FDTDGrid):
return all(global_coord >= lower_bound) and all(global_coord <= upper_bound)
def global_bounds_overlap_local_grid(
self, start: npt.NDArray[np.intc], stop: npt.NDArray[np.intc]
def local_bounds_overlap_grid(
self, local_start: npt.NDArray[np.int32], local_stop: npt.NDArray[np.int32]
) -> bool:
local_start = self.global_to_local_coordinate(start)
local_stop = self.global_to_local_coordinate(stop)
return all(local_start < self.size) and all(local_stop > self.negative_halo_offset)
def limit_global_bounds_to_within_local_grid(
self,
start: npt.NDArray[np.intc],
stop: npt.NDArray[np.intc],
step: npt.NDArray[np.intc] = np.ones(3, dtype=np.intc),
) -> Tuple[npt.NDArray[np.intc], npt.NDArray[np.intc], npt.NDArray[np.intc]]:
start: npt.NDArray[np.int32],
stop: npt.NDArray[np.int32],
step: npt.NDArray[np.int32] = np.ones(3, dtype=np.int32),
) -> Tuple[npt.NDArray[np.int32], npt.NDArray[np.int32], npt.NDArray[np.int32]]:
local_start = self.global_to_local_coordinate(start)
# Bring start into the local grid (and not in the negative halo)
@@ -340,7 +377,7 @@ class MPIGrid(FDTDGrid):
local grid.
"""
if self.is_coordinator():
snapshots_by_rank: List[List[Optional[Snapshot]]] = [[] for _ in range(self.comm.size)]
snapshots_by_rank = [[] for _ in range(self.comm.size)]
for snapshot in self.snapshots:
ranks = self.get_ranks_between_coordinates(snapshot.start, snapshot.stop)
for rank in range(
@@ -357,9 +394,7 @@ class MPIGrid(FDTDGrid):
else:
snapshots_by_rank = None
snapshots: List[Optional[MPISnapshot]] = self.comm.scatter(
snapshots_by_rank, root=self.COORDINATOR_RANK
)
snapshots = self.comm.scatter(snapshots_by_rank, root=self.COORDINATOR_RANK)
for snapshot in snapshots:
if snapshot is None:
@@ -469,37 +504,15 @@ class MPIGrid(FDTDGrid):
Global properties/objects are broadcast to all ranks whereas
local properties/objects are scattered to the relevant ranks.
"""
self.materials = self.comm.bcast(self.materials, root=self.COORDINATOR_RANK)
self.rxs = self.scatter_coord_objects(self.rxs)
self.voltagesources = self.scatter_coord_objects(self.voltagesources)
self.magneticdipoles = self.scatter_coord_objects(self.magneticdipoles)
self.hertziandipoles = self.scatter_coord_objects(self.hertziandipoles)
self.transmissionlines = self.scatter_coord_objects(self.transmissionlines)
pass
# self.scatter_snapshots()
self.scatter_snapshots()
self.pmls = self.comm.bcast(self.pmls, root=self.COORDINATOR_RANK)
if self.coords[Dim.X] != 0:
self.pmls["thickness"]["x0"] = 0
if self.coords[Dim.X] != self.mpi_tasks[Dim.X] - 1:
self.pmls["thickness"]["xmax"] = 0
if self.coords[Dim.Y] != 0:
self.pmls["thickness"]["y0"] = 0
if self.coords[Dim.Y] != self.mpi_tasks[Dim.Y] - 1:
self.pmls["thickness"]["ymax"] = 0
if self.coords[Dim.Z] != 0:
self.pmls["thickness"]["z0"] = 0
if self.coords[Dim.Z] != self.mpi_tasks[Dim.Z] - 1:
self.pmls["thickness"]["zmax"] = 0
if not self.is_coordinator():
# TODO: When scatter arrays properly, should initialise these to the local grid size
self.initialise_geometry_arrays()
self.ID = self.scatter_4d_array_with_positive_halo(self.ID)
self.solid = self.scatter_3d_array(self.solid)
self.rigidE = self.scatter_4d_array(self.rigidE)
self.rigidH = self.scatter_4d_array(self.rigidH)
# self._halo_swap_array(self.ID[0])
# self._halo_swap_array(self.ID[1])
# self._halo_swap_array(self.ID[2])
# self._halo_swap_array(self.ID[3])
# self._halo_swap_array(self.ID[4])
# self._halo_swap_array(self.ID[5])
def gather_grid_objects(self):
"""Gather sources and receivers."""
@@ -510,16 +523,6 @@ class MPIGrid(FDTDGrid):
self.hertziandipoles = self.gather_coord_objects(self.hertziandipoles)
self.transmissionlines = self.gather_coord_objects(self.transmissionlines)
def initialise_geometry_arrays(self, use_local_size=False):
# TODO: Remove this when scatter geometry arrays rather than broadcast
if use_local_size:
super().initialise_geometry_arrays()
else:
self.solid = np.ones(self.global_size, dtype=np.uint32)
self.rigidE = np.zeros((12, *self.global_size), dtype=np.int8)
self.rigidH = np.zeros((6, *self.global_size), dtype=np.int8)
self.ID = np.ones((6, *(self.global_size + 1)), dtype=np.uint32)
def _halo_swap(self, array: ndarray, dim: Dim, dir: Dir):
"""Perform a halo swap in the specifed dimension and direction.
@@ -666,7 +669,6 @@ class MPIGrid(FDTDGrid):
)
raise ValueError
self.calculate_local_extents()
self.set_halo_map()
self.distribute_grid()
@@ -751,6 +753,49 @@ class MPIGrid(FDTDGrid):
self.upper_extent = self.lower_extent + self.size
logger.debug(
f"Local grid size: {self.size}, Lower extent: {self.lower_extent}, Upper extent:"
f" {self.upper_extent}"
f"Global grid size: {self.global_size}, Local grid size: {self.size}, Lower extent:"
f" {self.lower_extent}, Upper extent: {self.upper_extent}"
)
def within_bounds(self, local_point: npt.NDArray[np.int32]) -> bool:
"""Check a local point is within the grid.
Args:
local_point: Point to check.
Returns:
within_bounds: True if the point is within the local grid
(i.e. this rank's grid). False otherwise.
Raises:
ValueError: Raised if the point is outside the global grid.
"""
gx, gy, gz = self.local_to_global_coordinate(local_point)
if gx < 0 or gx > self.gx:
raise ValueError("x")
if gy < 0 or gy > self.gy:
raise ValueError("y")
if gz < 0 or gz > self.gz:
raise ValueError("z")
return all(local_point >= self.negative_halo_offset) and all(local_point < self.size)
def within_pml(self, local_point: npt.NDArray[np.int32]) -> bool:
"""Check if the provided point is within a PML.
Args:
local_point: Point to check. This must use this grid's
coordinate system.
Returns:
within_pml: True if the point is within a PML.
"""
# within_pml check will only be valid if the point is also
# within the local grid
return (
super().within_pml(local_point)
and all(local_point >= self.negative_halo_offset)
and all(local_point <= self.size)
)

查看文件

@@ -31,13 +31,12 @@ from .user_objects.cmds_multiuse import (
MaterialRange,
Rx,
RxArray,
Snapshot,
SoilPeplinski,
TransmissionLine,
VoltageSource,
Waveform,
)
from .user_objects.cmds_output import GeometryObjectsWrite, GeometryView
from .user_objects.cmds_output import GeometryObjectsWrite, GeometryView, Snapshot
logger = logging.getLogger(__name__)
@@ -86,7 +85,7 @@ def process_multicmds(multicmds):
resistance=float(tmp[4]),
waveform_id=tmp[5],
start=float(tmp[6]),
end=float(tmp[7]),
stop=float(tmp[7]),
)
else:
logger.exception(
@@ -122,7 +121,7 @@ def process_multicmds(multicmds):
p1=(float(tmp[1]), float(tmp[2]), float(tmp[3])),
waveform_id=tmp[4],
start=float(tmp[5]),
end=float(tmp[6]),
stop=float(tmp[6]),
)
else:
logger.exception(
@@ -158,7 +157,7 @@ def process_multicmds(multicmds):
p1=(float(tmp[1]), float(tmp[2]), float(tmp[3])),
waveform_id=tmp[4],
start=float(tmp[5]),
end=float(tmp[6]),
stop=float(tmp[6]),
)
else:
logger.exception(
@@ -192,7 +191,7 @@ def process_multicmds(multicmds):
resistance=float(tmp[4]),
waveform_id=tmp[5],
start=tmp[6],
end=tmp[7],
stop=tmp[7],
)
else:
logger.exception(

查看文件

@@ -19,7 +19,7 @@
import datetime
import logging
import sys
from typing import List, Sequence
from typing import Dict, List, Optional, Sequence
import humanize
import numpy as np
@@ -29,6 +29,10 @@ from colorama import Fore, Style, init
from gprMax.grid.cuda_grid import CUDAGrid
from gprMax.grid.opencl_grid import OpenCLGrid
from gprMax.output_controllers.geometry_objects import GeometryObject
from gprMax.output_controllers.geometry_view_lines import GeometryViewLines
from gprMax.output_controllers.geometry_view_voxels import GeometryViewVoxels
from gprMax.output_controllers.geometry_views import GeometryView, save_geometry_views
from gprMax.subgrids.grid import SubGridBaseGrid
init()
@@ -36,13 +40,11 @@ init()
from tqdm import tqdm
import gprMax.config as config
from .fields_outputs import write_hdf5_outputfile
from .geometry_outputs import GeometryObjects, GeometryView, save_geometry_views
from .grid.fdtd_grid import FDTDGrid
from .snapshots import save_snapshots
from .utilities.host_info import mem_check_build_all, mem_check_run_all, set_omp_threads
from .utilities.utilities import get_terminal_width
from gprMax.fields_outputs import write_hdf5_outputfile
from gprMax.grid.fdtd_grid import FDTDGrid
from gprMax.snapshots import Snapshot, save_snapshots
from gprMax.utilities.host_info import mem_check_build_all, mem_check_run_all, set_omp_threads
from gprMax.utilities.utilities import get_terminal_width
logger = logging.getLogger(__name__)
@@ -64,7 +66,7 @@ class Model:
self.subgrids: List[SubGridBaseGrid] = []
self.geometryviews: List[GeometryView] = []
self.geometryobjects: List[GeometryObjects] = []
self.geometryobjects: List[GeometryObject] = []
# Monitor memory usage
self.p = None
@@ -170,6 +172,151 @@ class Model:
return grid
def set_size(self, size: npt.NDArray[np.int32]):
"""Set size of the model.
Args:
size: Array to set the size (3 dimensional).
"""
self.nx, self.ny, self.nz = size
def add_geometry_object(
self,
grid: FDTDGrid,
start: npt.NDArray[np.int32],
stop: npt.NDArray[np.int32],
basefilename: str,
) -> Optional[GeometryObject]:
"""Add a geometry object to the model.
Args:
grid: Grid to create a geometry object for.
start: Lower extent of the geometry object (x, y, z).
stop: Upper extent of the geometry object (x, y, z).
basefilename: Output filename of the geometry object.
Returns:
geometry_object: The created geometry object.
"""
geometry_object = GeometryObject(
grid, start[0], start[1], start[2], stop[0], stop[1], stop[2], basefilename
)
self.geometryobjects.append(geometry_object)
return geometry_object
def add_geometry_view_voxels(
self,
grid: FDTDGrid,
start: npt.NDArray[np.int32],
stop: npt.NDArray[np.int32],
dl: npt.NDArray[np.int32],
filename: str,
) -> Optional[GeometryViewVoxels]:
"""Add a voxel geometry view to the model.
Args:
grid: Grid to create a geometry view for.
start: Lower extent of the geometry view (x, y, z).
stop: Upper extent of the geometry view (x, y, z).
dl: Discritisation of the geometry view (x, y, z).
filename: Output filename of the geometry view.
Returns:
geometry_view: The created geometry view.
"""
geometry_view = GeometryViewVoxels(
start[0],
start[1],
start[2],
stop[0],
stop[1],
stop[2],
dl[0],
dl[1],
dl[2],
filename,
grid,
)
self.geometryviews.append(geometry_view)
return geometry_view
def add_geometry_view_lines(
self,
grid: FDTDGrid,
start: npt.NDArray[np.int32],
stop: npt.NDArray[np.int32],
filename: str,
) -> Optional[GeometryViewLines]:
"""Add a lines geometry view to the model.
Args:
grid: Grid to create a geometry view for.
start: Lower extent of the geometry view (x, y, z).
stop: Upper extent of the geometry view (x, y, z).
filename: Output filename of the geometry view.
Returns:
geometry_view: The created geometry view.
"""
geometry_view = GeometryViewLines(
start[0],
start[1],
start[2],
stop[0],
stop[1],
stop[2],
filename,
grid,
)
self.geometryviews.append(geometry_view)
return geometry_view
def add_snapshot(
self,
grid: FDTDGrid,
start: npt.NDArray[np.int32],
stop: npt.NDArray[np.int32],
dl: npt.NDArray[np.int32],
time: int,
filename: str,
fileext: str,
outputs: Dict[str, bool],
) -> Optional[Snapshot]:
"""Add a snapshot to the provided grid.
Args:
grid: Grid to create a snapshot for.
start: Lower extent of the snapshot (x, y, z).
stop: Upper extent of the snapshot (x, y, z).
dl: Discritisation of the snapshot (x, y, z).
time: Iteration number to take the snapshot on
filename: Output filename of the snapshot.
fileext: File extension of the snapshot.
outputs: Fields to use in the snapshot.
Returns:
snapshot: The created snapshot.
"""
snapshot = Snapshot(
start[0],
start[1],
start[2],
stop[0],
stop[1],
stop[2],
dl[0],
dl[1],
dl[2],
time,
filename,
fileext,
outputs,
grid,
)
# TODO: Move snapshots into the Model
grid.snapshots.append(snapshot)
return snapshot
def build(self):
"""Builds the Yee cells for a model."""
@@ -202,8 +349,11 @@ class Model:
and not self.geometryobjects
and config.sim_config.args.geometry_only
):
logger.exception("\nNo geometry views or geometry objects found.")
raise ValueError
logger.warning(
"Geometry only run specified, but no geometry views or geometry objects found."
)
return
save_geometry_views(self.geometryviews)
if self.geometryobjects:
@@ -219,7 +369,7 @@ class Model:
file=sys.stdout,
disable=not config.sim_config.general["progressbars"],
)
go.write_hdf5(self.title, self.G, pbar)
go.write_hdf5(self.title, pbar)
pbar.close()
logger.info("")

查看文件

@@ -1,14 +1,18 @@
import logging
from typing import Optional
from typing import Dict, Optional
import numpy as np
import numpy.typing as npt
from mpi4py import MPI
from gprMax import config
from gprMax.fields_outputs import write_hdf5_outputfile
from gprMax.grid.mpi_grid import MPIGrid
from gprMax.model import Model
from gprMax.snapshots import save_snapshots
from gprMax.output_controllers.geometry_objects import MPIGeometryObject
from gprMax.output_controllers.geometry_view_lines import MPIGeometryViewLines
from gprMax.output_controllers.geometry_view_voxels import MPIGeometryViewVoxels
from gprMax.snapshots import MPISnapshot, Snapshot, save_snapshots
logger = logging.getLogger(__name__)
@@ -53,55 +57,182 @@ class MPIModel(Model):
def is_coordinator(self):
return self.rank == 0
def build_geometry(self):
self._broadcast_model()
def set_size(self, size: npt.NDArray[np.int32]):
super().set_size(size)
super().build_geometry()
self.G.calculate_local_extents()
self._filter_geometry_objects()
def add_geometry_object(
self,
grid: MPIGrid,
start: npt.NDArray[np.int32],
stop: npt.NDArray[np.int32],
basefilename: str,
) -> Optional[MPIGeometryObject]:
"""Add a geometry object to the model.
def _broadcast_model(self):
self.title = self.comm.bcast(self.title)
Args:
grid: Grid to create a geometry object for
start: Lower extent of the geometry object (x, y, z)
stop: Upper extent of the geometry object (x, y, z)
basefilename: Output filename of the geometry object
self.nx = self.comm.bcast(self.nx)
self.ny = self.comm.bcast(self.ny)
self.nz = self.comm.bcast(self.nz)
Returns:
geometry_object: The new geometry object or None if no
geometry object was created.
"""
if grid.local_bounds_overlap_grid(start, stop):
geometry_object = MPIGeometryObject(
grid, start[0], start[1], start[2], stop[0], stop[1], stop[2], basefilename
)
self.geometryobjects.append(geometry_object)
return geometry_object
else:
# The MPIGridView created by the MPIGeometryObject will
# create a new communicator using MPI_Split. Calling this
# here prevents deadlock if not all ranks create the new
# MPIGeometryObject.
grid.comm.Split(MPI.UNDEFINED)
return None
self.comm.Bcast(self.dl)
self.dt = self.comm.bcast(self.dt)
def add_geometry_view_voxels(
self,
grid: MPIGrid,
start: npt.NDArray[np.int32],
stop: npt.NDArray[np.int32],
dl: npt.NDArray[np.int32],
filename: str,
) -> Optional[MPIGeometryViewVoxels]:
"""Add a voxel geometry view to the model.
self.iterations = self.comm.bcast(self.iterations)
Args:
grid: Grid to create a geometry view for.
start: Lower extent of the geometry view (x, y, z).
stop: Upper extent of the geometry view (x, y, z).
dl: Discritisation of the geometry view (x, y, z).
filename: Output filename of the geometry view.
self.srcsteps = self.comm.bcast(self.srcsteps)
self.rxsteps = self.comm.bcast(self.rxsteps)
Returns:
geometry_view: The new geometry view or None if no geometry
view was created.
"""
if grid.local_bounds_overlap_grid(start, stop):
geometry_view = MPIGeometryViewVoxels(
start[0],
start[1],
start[2],
stop[0],
stop[1],
stop[2],
dl[0],
dl[1],
dl[2],
filename,
grid,
)
self.geometryviews.append(geometry_view)
return geometry_view
else:
# The MPIGridView created by MPIGeometryViewVoxels will
# create a new communicator using MPI_Split. Calling this
# here prevents deadlock if not all ranks create the new
# MPIGeometryViewVoxels.
grid.comm.Split(MPI.UNDEFINED)
return None
model_config = config.get_model_config()
model_config.materials["maxpoles"] = self.comm.bcast(model_config.materials["maxpoles"])
model_config.ompthreads = self.comm.bcast(model_config.ompthreads)
def add_geometry_view_lines(
self,
grid: MPIGrid,
start: npt.NDArray[np.int32],
stop: npt.NDArray[np.int32],
filename: str,
) -> Optional[MPIGeometryViewLines]:
"""Add a lines geometry view to the model.
def _filter_geometry_objects(self):
objects = self.comm.bcast(self.geometryobjects)
self.geometryobjects = []
Args:
grid: Grid to create a geometry view for.
start: Lower extent of the geometry view (x, y, z).
stop: Upper extent of the geometry view (x, y, z).
filename: Output filename of the geometry view.
for go in objects:
start = np.array([go.xs, go.ys, go.zs], dtype=np.intc)
stop = np.array([go.xf, go.yf, go.zf], dtype=np.intc)
if self.G.global_bounds_overlap_local_grid(start, stop):
comm = self.comm.Split()
assert isinstance(comm, MPI.Intracomm)
start_grid_coord = self.G.get_grid_coord_from_coordinate(start)
stop_grid_coord = self.G.get_grid_coord_from_coordinate(stop) + 1
go.comm = comm.Create_cart((stop_grid_coord - start_grid_coord).tolist())
Returns:
geometry_view: The new geometry view or None if no geometry
view was created.
"""
if grid.local_bounds_overlap_grid(start, stop):
geometry_view = MPIGeometryViewLines(
start[0],
start[1],
start[2],
stop[0],
stop[1],
stop[2],
filename,
grid,
)
self.geometryviews.append(geometry_view)
return geometry_view
else:
# The MPIGridView created by MPIGeometryViewLines will
# create a new communicator using MPI_Split. Calling this
# here prevents deadlock if not all ranks create the new
# MPIGeometryViewLines.
grid.comm.Split(MPI.UNDEFINED)
return None
go.global_size = np.array([go.nx, go.ny, go.nz], dtype=np.intc)
start, stop, offset = self.G.limit_global_bounds_to_within_local_grid(start, stop)
go.size = stop - start
go.start = start
go.stop = stop
go.offset = offset
self.geometryobjects.append(go)
else:
self.comm.Split(MPI.UNDEFINED)
def add_snapshot(
self,
grid: MPIGrid,
start: npt.NDArray[np.int32],
stop: npt.NDArray[np.int32],
dl: npt.NDArray[np.int32],
time: int,
filename: str,
fileext: str,
outputs: Dict[str, bool],
) -> Optional[MPISnapshot]:
"""Add a snapshot to the provided grid.
Args:
grid: Grid to create a snapshot for.
start: Lower extent of the snapshot (x, y, z).
stop: Upper extent of the snapshot (x, y, z).
dl: Discritisation of the snapshot (x, y, z).
time: Iteration number to take the snapshot on
filename: Output filename of the snapshot.
fileext: File extension of the snapshot.
outputs: Fields to use in the snapshot.
Returns:
snapshot: The new snapshot or None if no snapshot was
created.
"""
if grid.local_bounds_overlap_grid(start, stop):
snapshot = MPISnapshot(
start[0],
start[1],
start[2],
stop[0],
stop[1],
stop[2],
dl[0],
dl[1],
dl[2],
time,
filename,
fileext,
outputs,
grid,
)
# TODO: Move snapshots into the Model
grid.snapshots.append(snapshot)
return snapshot
else:
# The MPIGridView created by MPISnapshot will create a new
# communicator using MPI_Split. Calling this here prevents
# deadlock if not all ranks create the new MPISnapshot.
grid.comm.Split(MPI.UNDEFINED)
return None
def write_output_data(self):
"""Writes output data, i.e. field data for receivers and snapshots to

查看文件

@@ -0,0 +1,175 @@
from io import TextIOWrapper
from pathlib import Path
from typing import Generic
import h5py
import numpy as np
from mpi4py import MPI
from tqdm import tqdm
from gprMax import config
from gprMax._version import __version__
from gprMax.grid.fdtd_grid import FDTDGrid
from gprMax.grid.mpi_grid import MPIGrid
from gprMax.materials import Material
from gprMax.output_controllers.grid_view import GridType, GridView, MPIGridView
class GeometryObject(Generic[GridType]):
"""Geometry objects to be written to file."""
@property
def GRID_VIEW_TYPE(self) -> type[GridView]:
return GridView
def __init__(
self, grid: GridType, xs: int, ys: int, zs: int, xf: int, yf: int, zf: int, filename: str
):
"""
Args:
xs, xf, ys, yf, zs, zf: ints for extent of the volume in cells.
filename: string for filename.
"""
self.grid_view = self.GRID_VIEW_TYPE(grid, xs, ys, zs, xf, yf, zf)
# Set filenames
parts = config.sim_config.input_file_path.with_suffix("").parts
self.filename = Path(*parts[:-1], filename)
self.filename_hdf5 = self.filename.with_suffix(".h5")
self.filename_materials = Path(f"{self.filename}_materials")
self.filename_materials = self.filename_materials.with_suffix(".txt")
# Sizes of arrays to write necessary to update progress bar
self.solidsize = (float)(np.prod(self.grid_view.size) * np.dtype(np.uint32).itemsize)
self.rigidsize = (float)(18 * np.prod(self.grid_view.size) * np.dtype(np.int8).itemsize)
self.IDsize = (float)(6 * np.prod(self.grid_view.size + 1) * np.dtype(np.uint32).itemsize)
self.datawritesize = self.solidsize + self.rigidsize + self.IDsize
@property
def grid(self) -> GridType:
return self.grid_view.grid
def write_metadata(self, file_handler: h5py.File, title: str):
file_handler.attrs["gprMax"] = __version__
file_handler.attrs["Title"] = title
file_handler.attrs["dx_dy_dz"] = (self.grid.dx, self.grid.dy, self.grid.dz)
def output_material(self, material: Material, file: TextIOWrapper):
file.write(
f"#material: {material.er:g} {material.se:g} "
f"{material.mr:g} {material.sm:g} {material.ID}\n"
)
if hasattr(material, "poles"):
if "debye" in material.type:
dispersionstr = "#add_dispersion_debye: " f"{material.poles:g} "
for pole in range(material.poles):
dispersionstr += f"{material.deltaer[pole]:g} " f"{material.tau[pole]:g} "
elif "lorenz" in material.type:
dispersionstr = f"#add_dispersion_lorenz: " f"{material.poles:g} "
for pole in range(material.poles):
dispersionstr += (
f"{material.deltaer[pole]:g} "
f"{material.tau[pole]:g} "
f"{material.alpha[pole]:g} "
)
elif "drude" in material.type:
dispersionstr = f"#add_dispersion_drude: " f"{material.poles:g} "
for pole in range(material.poles):
dispersionstr += f"{material.tau[pole]:g} " f"{material.alpha[pole]:g} "
dispersionstr += material.ID
file.write(dispersionstr + "\n")
def write_hdf5(self, title: str, pbar: tqdm):
"""Writes a geometry objects file in HDF5 format.
Args:
G: FDTDGrid class describing a grid in a model.
pbar: Progress bar class instance.
"""
self.grid_view.initialise_materials()
ID = self.grid_view.get_ID()
data = self.grid_view.get_solid().astype(np.int16)
rigidE = self.grid_view.get_rigidE()
rigidH = self.grid_view.get_rigidH()
ID = self.grid_view.map_to_view_materials(ID)
data = self.grid_view.map_to_view_materials(data)
with h5py.File(self.filename_hdf5, "w") as fdata:
self.write_metadata(fdata, title)
fdata["/data"] = data
pbar.update(self.solidsize)
fdata["/rigidE"] = rigidE
fdata["/rigidH"] = rigidH
pbar.update(self.rigidsize)
fdata["/ID"] = ID
pbar.update(self.IDsize)
# Write materials list to a text file
with open(self.filename_materials, "w") as fmaterials:
for material in self.grid_view.materials:
self.output_material(material, fmaterials)
class MPIGeometryObject(GeometryObject[MPIGrid]):
@property
def GRID_VIEW_TYPE(self) -> type[MPIGridView]:
return MPIGridView
def write_hdf5(self, title: str, pbar: tqdm):
"""Writes a geometry objects file in HDF5 format.
Args:
G: FDTDGrid class describing a grid in a model.
pbar: Progress bar class instance.
"""
assert isinstance(self.grid_view, self.GRID_VIEW_TYPE)
self.grid_view.initialise_materials()
ID = self.grid_view.get_ID()
data = self.grid_view.get_solid().astype(np.int16)
rigidE = self.grid_view.get_rigidE()
rigidH = self.grid_view.get_rigidH()
ID = self.grid_view.map_to_view_materials(ID)
data = self.grid_view.map_to_view_materials(data)
with h5py.File(self.filename_hdf5, "w", driver="mpio", comm=self.grid_view.comm) as fdata:
self.write_metadata(fdata, title)
dset_slice = self.grid_view.get_3d_output_slice()
dset = fdata.create_dataset("/data", self.grid_view.global_size, dtype=data.dtype)
dset[dset_slice] = data
pbar.update(self.solidsize)
rigid_E_dataset = fdata.create_dataset(
"/rigidE", (12, *self.grid_view.global_size), dtype=rigidE.dtype
)
rigid_E_dataset[:, dset_slice[0], dset_slice[1], dset_slice[2]] = rigidE
rigid_H_dataset = fdata.create_dataset(
"/rigidH", (6, *self.grid_view.global_size), dtype=rigidH.dtype
)
rigid_H_dataset[:, dset_slice[0], dset_slice[1], dset_slice[2]] = rigidH
pbar.update(self.rigidsize)
dset_slice = self.grid_view.get_3d_output_slice(upper_bound_exclusive=False)
dset = fdata.create_dataset(
"/ID", (6, *(self.grid_view.global_size + 1)), dtype=ID.dtype
)
dset[:, dset_slice[0], dset_slice[1], dset_slice[2]] = ID
pbar.update(self.IDsize)
# Write materials list to a text file
if self.grid_view.materials is not None:
with open(self.filename_materials, "w") as materials_file:
for material in self.grid_view.materials:
self.output_material(material, materials_file)

查看文件

@@ -0,0 +1,185 @@
# Copyright (C) 2015-2024: The University of Edinburgh, United Kingdom
# Authors: Craig Warren, Antonis Giannopoulos, and John Hartley
#
# This file is part of gprMax.
#
# gprMax is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# gprMax is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with gprMax. If not, see <http://www.gnu.org/licenses/>.
import logging
import numpy as np
from gprMax._version import __version__
from gprMax.cython.geometry_outputs import get_line_properties
from gprMax.grid.fdtd_grid import FDTDGrid
from gprMax.grid.mpi_grid import MPIGrid
from gprMax.output_controllers.geometry_views import GeometryView, Metadata, MPIMetadata
from gprMax.output_controllers.grid_view import GridType, MPIGridView
from gprMax.subgrids.grid import SubGridBaseGrid
from gprMax.vtkhdf_filehandlers.vtk_unstructured_grid import VtkUnstructuredGrid
from gprMax.vtkhdf_filehandlers.vtkhdf import VtkCellType
logger = logging.getLogger(__name__)
class GeometryViewLines(GeometryView[GridType]):
"""Unstructured grid for a per-cell-edge geometry view."""
def __init__(
self,
xs: int,
ys: int,
zs: int,
xf: int,
yf: int,
zf: int,
filename: str,
grid: GridType,
):
super().__init__(xs, ys, zs, xf, yf, zf, 1, 1, 1, filename, grid)
def prep_vtk(self):
"""Prepares data for writing to VTKHDF file."""
self.grid_view.initialise_materials(filter_materials=False)
ID = self.grid_view.get_ID()
x = np.arange(self.grid_view.nx + 1, dtype=np.float64)
y = np.arange(self.grid_view.ny + 1, dtype=np.float64)
z = np.arange(self.grid_view.nz + 1, dtype=np.float64)
coords = np.meshgrid(x, y, z, indexing="ij")
self.points = np.vstack(list(map(np.ravel, coords))).T
self.points += self.grid_view.start
self.points *= self.grid_view.step * self.grid.dl
# Add offset to subgrid geometry to correctly locate within main grid
if isinstance(self.grid, SubGridBaseGrid):
offset = [self.grid.i0, self.grid.j0, self.grid.k0]
self.points += offset * self.grid.dl * self.grid.ratio
# Each point is the 'source' for 3 lines.
# NB: Excluding points at the far edge of the geometry as those
# are the 'source' for no lines
n_lines = 3 * np.prod(self.grid_view.size)
self.cell_types = np.full(n_lines, VtkCellType.LINE)
self.cell_offsets = np.arange(0, 2 * n_lines + 2, 2, dtype=np.intc)
self.connectivity, self.material_data = get_line_properties(
n_lines, *self.grid_view.size, ID
)
assert isinstance(self.connectivity, np.ndarray)
assert isinstance(self.material_data, np.ndarray)
# Write information about any PMLs, sources, receivers
self.metadata = Metadata(self.grid_view, averaged_materials=True, materials_only=True)
# Number of bytes of data to be written to file
self.nbytes = (
self.points.nbytes
+ self.cell_types.nbytes
+ self.connectivity.nbytes
+ self.cell_offsets.nbytes
+ self.material_data.nbytes
)
# Use sorted material IDs rather than default ordering
self.material_data = self.grid_view.map_to_view_materials(self.material_data)
def write_vtk(self):
"""Writes geometry information to a VTKHDF file."""
# Write the VTK file
with VtkUnstructuredGrid(
self.filename,
self.points,
self.cell_types,
self.connectivity,
self.cell_offsets,
) as f:
f.add_cell_data("Material", self.material_data)
self.metadata.write_to_vtkhdf(f)
class MPIGeometryViewLines(GeometryViewLines[MPIGrid]):
"""Image data for a per-cell geometry view."""
@property
def GRID_VIEW_TYPE(self) -> type[MPIGridView]:
return MPIGridView
def prep_vtk(self):
"""Prepares data for writing to VTKHDF file."""
assert isinstance(self.grid_view, MPIGridView)
self.grid_view.initialise_materials(filter_materials=False)
ID = self.grid_view.get_ID()
x = np.arange(self.grid_view.gx + 1, dtype=np.float64)
y = np.arange(self.grid_view.gy + 1, dtype=np.float64)
z = np.arange(self.grid_view.gz + 1, dtype=np.float64)
coords = np.meshgrid(x, y, z, indexing="ij")
self.points = np.vstack(list(map(np.ravel, coords))).T
self.points += self.grid_view.global_start
self.points *= self.grid_view.step * self.grid.dl
# Each point is the 'source' for 3 lines.
# NB: Excluding points at the far edge of the geometry as those
# are the 'source' for no lines
n_lines = 3 * np.prod(self.grid_view.global_size)
self.cell_types = np.full(n_lines, VtkCellType.LINE)
self.cell_offsets = np.arange(0, 2 * n_lines + 2, 2, dtype=np.intc)
self.connectivity, self.material_data = get_line_properties(
n_lines, *self.grid_view.size, ID
)
assert isinstance(self.connectivity, np.ndarray)
assert isinstance(self.material_data, np.ndarray)
# Write information about any PMLs, sources, receivers
self.metadata = MPIMetadata(self.grid_view, averaged_materials=True, materials_only=True)
# Number of bytes of data to be written to file
self.nbytes = (
self.points.nbytes
+ self.cell_types.nbytes
+ self.connectivity.nbytes
+ self.cell_offsets.nbytes
+ self.material_data.nbytes
)
# Use global material IDs rather than local IDs
self.material_data = self.grid_view.map_to_view_materials(self.material_data)
def write_vtk(self):
"""Writes geometry information to a VTKHDF file."""
assert isinstance(self.grid_view, MPIGridView)
with VtkUnstructuredGrid(
self.filename,
self.points,
self.cell_types,
self.connectivity,
self.cell_offsets,
comm=self.grid_view.comm,
) as f:
self.metadata.write_to_vtkhdf(f)
f.add_cell_data("Material", self.material_data, self.grid_view.offset)

查看文件

@@ -0,0 +1,102 @@
# Copyright (C) 2015-2024: The University of Edinburgh, United Kingdom
# Authors: Craig Warren, Antonis Giannopoulos, and John Hartley
#
# This file is part of gprMax.
#
# gprMax is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# gprMax is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with gprMax. If not, see <http://www.gnu.org/licenses/>.
import logging
import numpy as np
from gprMax._version import __version__
from gprMax.grid.mpi_grid import MPIGrid
from gprMax.output_controllers.geometry_views import GeometryView, Metadata, MPIMetadata
from gprMax.output_controllers.grid_view import GridType, MPIGridView
from gprMax.subgrids.grid import SubGridBaseGrid
from gprMax.vtkhdf_filehandlers.vtk_image_data import VtkImageData
logger = logging.getLogger(__name__)
class GeometryViewVoxels(GeometryView[GridType]):
"""Image data for a per-cell geometry view."""
def prep_vtk(self):
"""Prepares data for writing to VTKHDF file."""
self.material_data = self.grid_view.get_solid()
if isinstance(self.grid, SubGridBaseGrid):
self.origin = np.array(
[
(self.grid.i0 * self.grid.dx * self.grid.ratio),
(self.grid.j0 * self.grid.dy * self.grid.ratio),
(self.grid.k0 * self.grid.dz * self.grid.ratio),
]
)
else:
self.origin = self.grid_view.start * self.grid.dl
self.spacing = self.grid_view.step * self.grid.dl
self.nbytes = self.material_data.nbytes
# Write information about any PMLs, sources, receivers
self.metadata = Metadata(self.grid_view)
def write_vtk(self):
"""Writes geometry information to a VTKHDF file."""
with VtkImageData(self.filename, self.grid_view.size, self.origin, self.spacing) as f:
f.add_cell_data("Material", self.material_data)
self.metadata.write_to_vtkhdf(f)
class MPIGeometryViewVoxels(GeometryViewVoxels[MPIGrid]):
"""Image data for a per-cell geometry view."""
@property
def GRID_VIEW_TYPE(self) -> type[MPIGridView]:
return MPIGridView
def prep_vtk(self):
"""Prepares data for writing to VTKHDF file."""
assert isinstance(self.grid_view, self.GRID_VIEW_TYPE)
self.material_data = self.grid_view.get_solid()
self.origin = self.grid_view.global_start * self.grid.dl
self.spacing = self.grid_view.step * self.grid.dl
self.nbytes = self.material_data.nbytes
# Write information about any PMLs, sources, receivers
self.metadata = MPIMetadata(self.grid_view)
def write_vtk(self):
"""Writes geometry information to a VTKHDF file."""
assert isinstance(self.grid_view, self.GRID_VIEW_TYPE)
with VtkImageData(
self.filename,
self.grid_view.global_size,
self.origin,
self.spacing,
comm=self.grid_view.comm,
) as f:
self.metadata.write_to_vtkhdf(f)
f.add_cell_data("Material", self.material_data, self.grid_view.offset)

查看文件

@@ -0,0 +1,312 @@
# Copyright (C) 2015-2024: The University of Edinburgh, United Kingdom
# Authors: Craig Warren, Antonis Giannopoulos, and John Hartley
#
# This file is part of gprMax.
#
# gprMax is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# gprMax is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with gprMax. If not, see <http://www.gnu.org/licenses/>.
import logging
import sys
from abc import abstractmethod
from pathlib import Path
from typing import Dict, Generic, List, Optional, Sequence, Tuple, Union
import h5py
import numpy as np
import numpy.typing as npt
from mpi4py import MPI
from tqdm import tqdm
import gprMax.config as config
from gprMax._version import __version__
from gprMax.grid.mpi_grid import MPIGrid
from gprMax.output_controllers.grid_view import GridType, GridView, MPIGridView
from gprMax.receivers import Rx
from gprMax.sources import Source
from gprMax.utilities.utilities import get_terminal_width
from gprMax.vtkhdf_filehandlers.vtkhdf import VtkHdfFile
logger = logging.getLogger(__name__)
def save_geometry_views(gvs: "List[GeometryView]"):
"""Creates and saves geometryviews.
Args:
gvs: list of all GeometryViews.
"""
logger.info("")
for i, gv in enumerate(gvs):
gv.set_filename()
gv.prep_vtk()
pbar = tqdm(
total=gv.nbytes,
unit="byte",
unit_scale=True,
desc=f"Writing geometry view file {i + 1}/{len(gvs)}, {gv.filename.name}",
ncols=get_terminal_width() - 1,
file=sys.stdout,
disable=not config.sim_config.general["progressbars"],
)
gv.write_vtk()
pbar.update(gv.nbytes)
pbar.close()
logger.info("")
class GeometryView(Generic[GridType]):
"""Base class for Geometry Views."""
FILE_EXTENSION = ".vtkhdf"
@property
def GRID_VIEW_TYPE(self) -> type[GridView]:
return GridView
def __init__(
self,
xs: int,
ys: int,
zs: int,
xf: int,
yf: int,
zf: int,
dx: int,
dy: int,
dz: int,
filename: str,
grid: GridType,
):
"""
Args:
xs, xf, ys, yf, zs, zf: ints for extent of geometry view in cells.
dx, dy, dz: ints for spatial discretisation of geometry view in cells.
filename: string for filename.
grid: FDTDGrid class describing a grid in a model.
"""
self.grid_view = self.GRID_VIEW_TYPE(grid, xs, ys, zs, xf, yf, zf, dx, dy, dz)
self.filenamebase = filename
self.nbytes = None
self.material_data = None
self.materials = None
@property
def grid(self) -> GridType:
return self.grid_view.grid
def set_filename(self):
"""Construct filename from user-supplied name and model number."""
parts = config.get_model_config().output_file_path.parts
self.filename = Path(
*parts[:-1], self.filenamebase + config.get_model_config().appendmodelnumber
).with_suffix(self.FILE_EXTENSION)
@abstractmethod
def prep_vtk(self):
pass
@abstractmethod
def write_vtk(self):
pass
class Metadata(Generic[GridType]):
"""Comments can be strings included in the header of XML VTK file, and are
used to hold extra (gprMax) information about the VTK data.
"""
def __init__(
self,
grid_view: GridView[GridType],
averaged_materials: bool = False,
materials_only: bool = False,
):
self.grid_view = grid_view
self.averaged_materials = averaged_materials
self.materials_only = materials_only
self.gprmax_version = __version__
self.dx_dy_dz = self.dx_dy_dz_comment()
self.nx_ny_nz = self.nx_ny_nz_comment()
self.materials = self.materials_comment()
# Write information on PMLs, sources, and receivers
if not self.materials_only:
# Information on PML thickness
self.pml_thickness = self.pml_gv_comment()
sources = (
self.grid.hertziandipoles
+ self.grid.magneticdipoles
+ self.grid.voltagesources
+ self.grid.transmissionlines
)
sources_comment = self.srcs_rx_gv_comment(sources)
if sources_comment is None:
self.source_ids = self.source_positions = None
else:
self.source_ids, self.source_positions = sources_comment
receivers_comment = self.srcs_rx_gv_comment(self.grid.rxs)
if receivers_comment is None:
self.receiver_ids = self.receiver_positions = None
else:
self.receiver_ids, self.receiver_positions = receivers_comment
@property
def grid(self) -> GridType:
return self.grid_view.grid
def write_to_vtkhdf(self, file_handler: VtkHdfFile):
file_handler.add_field_data("gprMax_version", self.gprmax_version)
file_handler.add_field_data("dx_dy_dz", self.dx_dy_dz)
file_handler.add_field_data("nx_ny_nz", self.nx_ny_nz)
self.write_material_ids(file_handler)
if not self.materials_only:
if self.pml_thickness is not None:
file_handler.add_field_data("pml_thickness", self.pml_thickness)
if self.source_ids is not None and self.source_positions is not None:
file_handler.add_field_data("source_ids", self.source_ids)
file_handler.add_field_data("sources", self.source_positions)
if self.receiver_ids is not None and self.receiver_positions is not None:
file_handler.add_field_data("receiver_ids", self.receiver_ids)
file_handler.add_field_data("receivers", self.receiver_positions)
def write_material_ids(self, file_handler: VtkHdfFile):
file_handler.add_field_data("material_ids", self.materials)
def pml_gv_comment(self) -> Optional[npt.NDArray[np.int64]]:
grid = self.grid
if not grid.pmls["slabs"]:
return None
# Only render PMLs if they are in the geometry view
thickness: Dict[str, int] = grid.pmls["thickness"]
gv_pml_depth = dict.fromkeys(thickness, 0)
if self.grid_view.xs < thickness["x0"]:
gv_pml_depth["x0"] = thickness["x0"] - self.grid_view.xs
if self.grid_view.ys < thickness["y0"]:
gv_pml_depth["y0"] = thickness["y0"] - self.grid_view.ys
if thickness["z0"] - self.grid_view.zs > 0:
gv_pml_depth["z0"] = thickness["z0"] - self.grid_view.zs
if self.grid_view.xf > grid.nx - thickness["xmax"]:
gv_pml_depth["xmax"] = self.grid_view.xf - (grid.nx - thickness["xmax"])
if self.grid_view.yf > grid.ny - thickness["ymax"]:
gv_pml_depth["ymax"] = self.grid_view.yf - (grid.ny - thickness["ymax"])
if self.grid_view.zf > grid.nz - thickness["zmax"]:
gv_pml_depth["zmax"] = self.grid_view.zf - (grid.nz - thickness["zmax"])
return np.array(list(gv_pml_depth.values()), dtype=np.int64)
def srcs_rx_gv_comment(
self, srcs: Union[Sequence[Source], List[Rx]]
) -> Optional[Tuple[List[str], npt.NDArray[np.float64]]]:
"""Used to name sources and/or receivers."""
if not srcs:
return None
names: List[str] = []
positions = np.empty((len(srcs), 3))
for index, src in enumerate(srcs):
position = src.coord * self.grid.dl
names.append(src.ID)
positions[index] = position
return names, positions
def dx_dy_dz_comment(self) -> npt.NDArray[np.float64]:
return self.grid.dl
def nx_ny_nz_comment(self) -> npt.NDArray[np.int32]:
return self.grid.size
def materials_comment(self) -> Optional[List[str]]:
if hasattr(self.grid_view, "materials"):
materials = self.grid_view.materials
else:
materials = self.grid.materials
if materials is None:
return None
if not self.averaged_materials:
return [m.ID for m in materials if m.type != "dielectric-smoothed"]
else:
return [m.ID for m in materials]
class MPIMetadata(Metadata[MPIGrid]):
def nx_ny_nz_comment(self) -> npt.NDArray[np.int32]:
return self.grid.global_size
def pml_gv_comment(self) -> Optional[npt.NDArray[np.int64]]:
gv_pml_depth = super().pml_gv_comment()
if gv_pml_depth is None:
gv_pml_depth = np.zeros(6, dtype=np.int64)
assert isinstance(self.grid_view, MPIGridView)
recv_buffer = np.empty((self.grid_view.comm.size, 6), dtype=np.int64)
self.grid_view.comm.Allgather(gv_pml_depth, recv_buffer)
gv_pml_depth = np.max(recv_buffer, axis=0)
return None if all(gv_pml_depth == 0) else gv_pml_depth
def srcs_rx_gv_comment(
self, srcs: Union[Sequence[Source], List[Rx]]
) -> Optional[Tuple[List[str], npt.NDArray[np.float64]]]:
objects: Dict[str, npt.NDArray[np.float64]] = {}
for src in srcs:
position = self.grid.local_to_global_coordinate(src.coord) * self.grid.dl
objects[src.ID] = position
assert isinstance(self.grid_view, MPIGridView)
global_objects: List[Dict[str, npt.NDArray[np.float64]]] = self.grid_view.comm.allgather(
objects
)
objects = {k: v for d in global_objects for k, v in d.items()}
objects = dict(sorted(objects.items()))
return (list(objects.keys()), np.array(list(objects.values()))) if objects else None
def write_material_ids(self, file_handler: VtkHdfFile):
assert isinstance(self.grid_view, MPIGridView)
# Only rank 0 has all the material data. However, creating the
# 'material_ids' dataset is a collective operation, so all ranks
# need to know the shape and datatype of the dataset.
if self.materials is None:
buffer = np.empty(2, dtype=np.int32)
else:
shape = len(self.materials)
max_length = max([len(m) for m in self.materials])
buffer = np.array([shape, max_length], dtype=np.int32)
self.grid_view.comm.Bcast([buffer, MPI.INT32_T])
shape, max_length = buffer
dtype = h5py.string_dtype(length=int(max_length))
file_handler.add_field_data("material_ids", self.materials, shape=(shape,), dtype=dtype)

查看文件

@@ -0,0 +1,531 @@
import logging
from itertools import chain
from typing import Generic, Tuple
import numpy as np
import numpy.typing as npt
from mpi4py import MPI
from typing_extensions import TypeVar
from gprMax.grid.fdtd_grid import FDTDGrid
from gprMax.grid.mpi_grid import MPIGrid
from gprMax.materials import Material
logger = logging.getLogger(__name__)
GridType = TypeVar("GridType", bound=FDTDGrid)
class GridView(Generic[GridType]):
def __init__(
self,
grid: GridType,
xs: int,
ys: int,
zs: int,
xf: int,
yf: int,
zf: int,
dx: int = 1,
dy: int = 1,
dz: int = 1,
):
"""Create a new GridView.
A grid view provides an interface to allow easy access to a
subsection of an FDTDGrid.
Args:
grid: Grid to create a view of.
xs: Start x coordinate of the grid view.
ys: Start y coordinate of the grid view.
zs: Start z coordinate of the grid view.
xf: End x coordinate of the grid view.
yf: End y coordinate of the grid view.
zf: End z coordinate of the grid view.
dx: Optional step size of the grid view in the x dimension. Defaults to 1.
dy: Optional step size of the grid view in the y dimension. Defaults to 1.
dz: Optional step size of the grid view in the z dimension. Defaults to 1.
"""
self.start = np.array([xs, ys, zs], dtype=np.int32)
self.stop = np.array([xf, yf, zf], dtype=np.int32)
self.step = np.array([dx, dy, dz], dtype=np.int32)
self.size = np.ceil((self.stop - self.start) / self.step).astype(np.int32)
self.grid = grid
self._ID = None
logger.debug(
f"Created GridView for grid '{self.grid.name}' (start={self.start}, stop={self.stop},"
f" step={self.step}, size={self.size})"
)
@property
def xs(self) -> int:
return self.start[0]
@property
def ys(self) -> int:
return self.start[1]
@property
def zs(self) -> int:
return self.start[2]
@property
def xf(self) -> int:
return self.stop[0]
@property
def yf(self) -> int:
return self.stop[1]
@property
def zf(self) -> int:
return self.stop[2]
@property
def nx(self) -> int:
return self.size[0]
@property
def ny(self) -> int:
return self.size[1]
@property
def nz(self) -> int:
return self.size[2]
def get_slice(self, dimension: int, upper_bound_exclusive: bool = True) -> slice:
"""Create a slice object for the specified dimension.
Args:
dimension: Dimension to create the slice object for. Values
0, 1, and 2 map to the x, y, and z dimensions
respectively.
upper_bound_exclusive: Optionally specify if the upper bound
of the slice should be exclusive or inclusive. Defaults
to True.
Returns:
slice: Slice object
"""
if upper_bound_exclusive:
stop = self.stop[dimension]
else:
stop = self.stop[dimension] + self.step[dimension]
return slice(self.start[dimension], stop, self.step[dimension])
def slice_array(self, array: npt.NDArray, upper_bound_exclusive: bool = True) -> npt.NDArray:
"""Slice an array according to the dimensions of the grid view.
It is assumed the last 3 dimensions of the provided array
represent the x, y, z spacial information. Other dimensions will
not be sliced.
E.g. For an array of shape (10, 100, 50, 50) this function would
return an array of shape (10, x, y, z) where x, y, and z are
specified by the size/shape of the grid view.
Args:
array: Array to slice. Must have at least 3 dimensions.
upper_bound_exclusive: Optionally specify if the upper bound
of the slice should be exclusive or inclusive. Defaults
to True.
Returns:
array: Sliced array
"""
return np.ascontiguousarray(
array[
...,
self.get_slice(0, upper_bound_exclusive),
self.get_slice(1, upper_bound_exclusive),
self.get_slice(2, upper_bound_exclusive),
]
)
def initialise_materials(self, filter_materials: bool = True):
"""Create a new ID map for materials in the grid view.
Rather than using the default material IDs (as per the main grid
object), we may want to create a new index for materials inside
this grid view. Unlike using the default material IDs, this new
index will be continuous from 0 - number of materials for the
materials in the grid view.
This function should be called before calling the
map_to_view_materials() function.
"""
# Get unique materials in the grid view
if filter_materials:
ID = self.get_ID(force_refresh=True)
materials_in_grid_view = np.unique(ID)
# Get actual Material objects
self.materials = np.array(self.grid.materials, dtype=Material)[materials_in_grid_view]
else:
self.materials = np.array(self.grid.materials, dtype=Material)
# Sort materials
self.materials.sort()
# Create map from material ID to 0 - number of materials
materials_map = {material.numID: index for index, material in enumerate(self.materials)}
self.map_materials_func = np.vectorize(lambda id: materials_map[id])
NDArrayType = TypeVar("NDArrayType", bound=npt.NDArray)
def map_to_view_materials(self, array: NDArrayType) -> NDArrayType:
"""Map from the main grid material IDs to the grid view IDs.
Ensure initialise_materials() has been called before using this
function.
Args:
array: Array to map.
Returns:
array: Mapped array.
"""
return self.map_materials_func(array).astype(array.dtype)
def get_ID(self, force_refresh=False) -> npt.NDArray[np.uint32]:
"""Get a view of the ID array.
By default, the slice of the ID array is cached to prevent
unnecessary reconstruction of the view on repeat calls. E.g.
from the initialise_materials() function as well as a user call
to get_ID().
Args:
force_refresh: Optionally force reloading the ID array from
the main grid object. Defaults to False.
Returns:
ID: View of the ID array.
"""
if self._ID is None or force_refresh:
self._ID = self.slice_array(self.grid.ID, upper_bound_exclusive=False)
return self._ID
def get_solid(self) -> npt.NDArray[np.uint32]:
"""Get a view of the solid array.
Returns:
solid: View of the solid array
"""
return self.slice_array(self.grid.solid)
def get_rigidE(self) -> npt.NDArray[np.int8]:
"""Get a view of the rigidE array.
Returns:
rigidE: View of the rigidE array
"""
return self.slice_array(self.grid.rigidE)
def get_rigidH(self) -> npt.NDArray[np.int8]:
"""Get a view of the rigidH array.
Returns:
rigidH: View of the rigidH array
"""
return self.slice_array(self.grid.rigidH)
def get_Ex(self) -> npt.NDArray[np.float32]:
"""Get a view of the Ex array.
Returns:
Ex: View of the Ex array
"""
return self.slice_array(self.grid.Ex, upper_bound_exclusive=False)
def get_Ey(self) -> npt.NDArray[np.float32]:
"""Get a view of the Ey array.
Returns:
Ey: View of the Ey array
"""
return self.slice_array(self.grid.Ey, upper_bound_exclusive=False)
def get_Ez(self) -> npt.NDArray[np.float32]:
"""Get a view of the Ez array.
Returns:
Ez: View of the Ez array
"""
return self.slice_array(self.grid.Ez, upper_bound_exclusive=False)
def get_Hx(self) -> npt.NDArray[np.float32]:
"""Get a view of the Hx array.
Returns:
Hx: View of the Hx array
"""
return self.slice_array(self.grid.Hx, upper_bound_exclusive=False)
def get_Hy(self) -> npt.NDArray[np.float32]:
"""Get a view of the Hy array.
Returns:
Hy: View of the Hy array
"""
return self.slice_array(self.grid.Hy, upper_bound_exclusive=False)
def get_Hz(self) -> npt.NDArray[np.float32]:
"""Get a view of the Hz array.
Returns:
Hz: View of the Hz array
"""
return self.slice_array(self.grid.Hz, upper_bound_exclusive=False)
class MPIGridView(GridView[MPIGrid]):
def __init__(
self,
grid: MPIGrid,
xs: int,
ys: int,
zs: int,
xf: int,
yf: int,
zf: int,
dx: int = 1,
dy: int = 1,
dz: int = 1,
):
"""Create a new MPIGridView.
An MPI grid view provides an interface to allow easy access to a
subsection of an MPIGrid.
Args:
grid: MPI grid to create a view of.
xs: Start x coordinate of the grid view.
ys: Start y coordinate of the grid view.
zs: Start z coordinate of the grid view.
xf: End x coordinate of the grid view.
yf: End y coordinate of the grid view.
zf: End z coordinate of the grid view.
dx: Optional step size of the grid view in the x dimension. Defaults to 1.
dy: Optional step size of the grid view in the y dimension. Defaults to 1.
dz: Optional step size of the grid view in the z dimension. Defaults to 1.
"""
super().__init__(grid, xs, ys, zs, xf, yf, zf, dx, dy, dz)
comm = grid.comm.Split()
assert isinstance(comm, MPI.Intracomm)
# Calculate start, stop and size for the global grid view
self.global_start = self.grid.local_to_global_coordinate(self.start)
self.global_stop = self.grid.local_to_global_coordinate(self.stop)
self.global_size = self.size
# Create new cartesean communicator by finding MPI grid coords
# for the start and end of the grid view.
# Subtract 1 from global_stop as the upper extent is exclusive
# meaning the last coordinate included in the grid view is
# actually (global_stop - 1).
start_grid_coord = grid.get_grid_coord_from_coordinate(self.global_start)
stop_grid_coord = grid.get_grid_coord_from_coordinate(self.global_stop - 1) + 1
self.comm = comm.Create_cart((stop_grid_coord - start_grid_coord).tolist())
self.has_negative_neighbour = self.start < self.grid.negative_halo_offset
# Bring start into the local grid (and not in the negative halo)
# start must still be aligned with the provided step.
self.start = np.where(
self.has_negative_neighbour,
self.grid.negative_halo_offset
+ ((self.start - self.grid.negative_halo_offset) % self.step),
self.start,
)
self.has_positive_neighbour = self.stop > self.grid.size
# Limit stop such that it is at most one step beyond the max
# index of the grid. As stop is the upper bound, it is
# exclusive, meaning when used to slice an array (with the
# provided step), the last element accessed will one step below
# stop.
# Note: using self.grid.size as an index in any dimension would
# fall in the positive halo (this counts as outside the local
# grid).
self.stop = np.where(
self.has_positive_neighbour,
self.grid.size + ((self.stop - self.grid.size) % self.step),
self.stop,
)
# Calculate offset for the local grid view
self.offset = self.grid.local_to_global_coordinate(self.start) - self.global_start
# Update local size
self.size = np.ceil((self.stop - self.start) / self.step).astype(np.int32)
logger.debug(
f"Created MPIGridView for grid '{self.grid.name}' (global_start={self.global_start},"
f" global_stop={self.global_stop}, global_size={self.global_size}, start={self.start},"
f" stop={self.stop}, step={self.step}, size={self.size}, offset={self.offset})"
)
@property
def gx(self) -> int:
return self.global_size[0]
@property
def gy(self) -> int:
return self.global_size[1]
@property
def gz(self) -> int:
return self.global_size[2]
def get_slice(self, dimension: int, upper_bound_exclusive: bool = True) -> slice:
"""Create a slice object for the specified dimension.
Args:
dimension: Dimension to create the slice object for. Values
0, 1, and 2 map to the x, y, and z dimensions
respectively.
upper_bound_exclusive: Optionally specify if the upper bound
of the slice should be exclusive or inclusive. Defaults
to True.
Returns:
slice: Slice object
"""
if upper_bound_exclusive or self.has_positive_neighbour[dimension]:
stop = self.stop[dimension]
else:
# Make slice of array one step larger if this rank does not
# have a positive neighbour
stop = self.stop[dimension] + self.step[dimension]
return slice(self.start[dimension], stop, self.step[dimension])
def get_output_slice(self, dimension: int, upper_bound_exclusive: bool = True) -> slice:
"""Create an output slice object for the specified dimension.
This provides a slice of the grid view for the section of the
grid view managed by this rank. This can be used when writing
out arrays provided by the grid view as part of a collective
operation.
For example:
```
dset_slice = (
grid_view.get_output_slice(0),
grid_view.get_output_slice(1),
grid_view.get_output_slice(2),
)
dset[dset_slice] = grid_view.get_solid()
```
Args:
dimension: Dimension to create the slice object for. Values
0, 1, and 2 map to the x, y, and z dimensions
respectively.
upper_bound_exclusive: Optionally specify if the upper bound
of the slice should be exclusive or inclusive. Defaults
to True.
Returns:
slice: Slice object
"""
if upper_bound_exclusive or self.has_positive_neighbour[dimension]:
size = self.size[dimension]
else:
# Make slice of array one step larger if this rank does not
# have a positive neighbour
size = self.size[dimension] + 1
offset = self.offset[dimension] // self.step[dimension]
return slice(offset, offset + size)
def get_3d_output_slice(self, upper_bound_exclusive: bool = True) -> Tuple[slice, slice, slice]:
"""Create a 3D output slice object.
This provides a slice of the grid view for the section of the
grid view managed by this rank. This can be used when writing
out arrays provided by the grid view as part of a collective
operation.
For example:
`dset[grid_view.get_3d_output_slice()] = grid_view.get_solid()`
Args:
upper_bound_exclusive: Optionally specify if the upper bound
of the slice should be exclusive or inclusive. Defaults
to True.
Returns:
slice: 3D Slice object
"""
return (
self.get_output_slice(0, upper_bound_exclusive),
self.get_output_slice(1, upper_bound_exclusive),
self.get_output_slice(2, upper_bound_exclusive),
)
def initialise_materials(self, filter_materials: bool = True):
"""Create a new ID map for materials in the grid view.
Rather than using the default material IDs (as per the main grid
object), we may want to create a new index for materials inside
this grid view. Unlike using the default material IDs, this new
index will be continuous from 0 - number of materials for the
materials in the grid view.
This function should only be called if required as it needs MPI
communication to construct the new map. It should also be called
before the map_to_view_materials() function.
"""
if filter_materials:
ID = self.get_ID(force_refresh=True)
local_material_ids = np.unique(ID)
local_materials = np.array(self.grid.materials, dtype=Material)[local_material_ids]
else:
local_materials = np.array(self.grid.materials, dtype=Material)
local_materials.sort()
local_material_ids = [m.numID for m in local_materials]
# Send all materials to the coordinating rank
materials_by_rank = self.comm.gather(local_materials, root=0)
if materials_by_rank is not None:
# Filter out duplicate materials and sort by material ID
all_materials = np.fromiter(chain.from_iterable(materials_by_rank), dtype=Material)
self.materials = np.unique(all_materials)
# The new material IDs corespond to each material's index in
# the sorted self.materials array. For each rank, get the
# new IDs of each material it sent to send back
for rank in range(1, self.comm.size):
new_material_ids = np.where(np.isin(self.materials, materials_by_rank[rank]))[0]
self.comm.Isend([new_material_ids.astype(np.int32), MPI.INT], rank)
new_material_ids = np.where(np.isin(self.materials, materials_by_rank[0]))[0]
new_material_ids = new_material_ids.astype(np.int32)
else:
self.materials = None
# Get list of global IDs for this rank's local materials
new_material_ids = np.empty(len(local_materials), dtype=np.int32)
self.comm.Recv([new_material_ids, MPI.INT], 0)
# Create map from local material ID to global material ID
materials_map = {
local_material_ids[index]: new_id for index, new_id in enumerate(new_material_ids)
}
# Create map from material ID to 0 - number of materials
self.map_materials_func = np.vectorize(lambda id: materials_map[id])

查看文件

@@ -31,8 +31,8 @@ class Rx:
def __init__(self):
self.ID: str
self.outputs = {}
self.coord = np.zeros(3, dtype=np.intc)
self.coordorigin = np.zeros(3, dtype=np.intc)
self.coord = np.zeros(3, dtype=np.int32)
self.coordorigin = np.zeros(3, dtype=np.int32)
@property
def xcoord(self) -> int:

查看文件

@@ -21,6 +21,7 @@ from typing import List, Sequence
from gprMax.grid.fdtd_grid import FDTDGrid
from gprMax.materials import create_built_in_materials
from gprMax.model import Model
from gprMax.mpi_model import MPIModel
from gprMax.subgrids.user_objects import SubGridBase as SubGridUserBase
from gprMax.user_objects.cmds_geometry.add_grass import AddGrass
from gprMax.user_objects.cmds_geometry.add_surface_roughness import AddSurfaceRoughness

查看文件

@@ -20,16 +20,17 @@ import logging
import sys
from enum import IntEnum, unique
from pathlib import Path
from typing import Dict, List
from typing import Dict, Generic, List
import h5py
import numpy as np
import numpy.typing as npt
from evtk.hl import imageToVTK
from mpi4py import MPI
from tqdm import tqdm
import gprMax.config as config
from gprMax.grid.mpi_grid import MPIGrid
from gprMax.output_controllers.grid_view import GridType, GridView, MPIGridView
from ._version import __version__
from .cython.snapshots import calculate_snapshot_fields
@@ -52,7 +53,7 @@ def save_snapshots(snapshots: List["Snapshot"]):
logger.info(f"Snapshot directory: {snapshotdir.resolve()}")
for i, snap in enumerate(snapshots):
fn = snapshotdir / Path(snap.filename)
fn = snapshotdir / snap.filename
snap.filename = fn.with_suffix(snap.fileext)
pbar = tqdm(
total=snap.nbytes,
@@ -69,7 +70,7 @@ def save_snapshots(snapshots: List["Snapshot"]):
logger.info("")
class Snapshot:
class Snapshot(Generic[GridType]):
"""Snapshots of the electric and magnetic field values."""
allowableoutputs = {
@@ -95,6 +96,10 @@ class Snapshot:
# GPU - blocks per grid - set according to largest requested snapshot
bpg = None
@property
def GRID_VIEW_TYPE(self) -> type[GridView]:
return GridView
def __init__(
self,
xs: int,
@@ -110,8 +115,7 @@ class Snapshot:
filename: str,
fileext: str,
outputs: Dict[str, bool],
grid_dl: npt.NDArray[np.float64],
grid_dt: float,
grid: GridType,
):
"""
Args:
@@ -124,89 +128,62 @@ class Snapshot:
"""
self.fileext = fileext
self.filename = filename
self.filename = Path(filename)
self.time = time
self.outputs = outputs
self.grid_dl = grid_dl
self.grid_dt = grid_dt
self.start = np.array([xs, ys, zs], dtype=np.intc)
self.stop = np.array([xf, yf, zf], dtype=np.intc)
self.step = np.array([dx, dy, dz], dtype=np.intc)
self.size = np.ceil((self.stop - self.start) / self.step).astype(np.intc)
self.slice: list[slice] = list(map(slice, self.start, self.stop + self.step, self.step))
self.grid_view = self.GRID_VIEW_TYPE(grid, xs, ys, zs, xf, yf, zf, dx, dy, dz)
self.nbytes = 0
# Create arrays to hold the field data for snapshot
self.snapfields = {}
@property
def grid(self) -> GridType:
return self.grid_view.grid
# Properties for backwards compatibility
@property
def xs(self) -> int:
return self.start[0]
return self.grid_view.xs
@property
def ys(self) -> int:
return self.start[1]
return self.grid_view.ys
@property
def zs(self) -> int:
return self.start[2]
return self.grid_view.zs
@property
def xf(self) -> int:
return self.stop[0]
return self.grid_view.xf
@property
def yf(self) -> int:
return self.stop[1]
return self.grid_view.yf
@property
def zf(self) -> int:
return self.stop[2]
@property
def dx(self) -> int:
return self.step[0]
@property
def dy(self) -> int:
return self.step[1]
@property
def dz(self) -> int:
return self.step[2]
return self.grid_view.zf
@property
def nx(self) -> int:
return self.size[0]
return self.grid_view.nx
@property
def ny(self) -> int:
return self.size[1]
return self.grid_view.ny
@property
def nz(self) -> int:
return self.size[2]
@property
def sx(self) -> slice:
return self.slice[0]
@property
def sy(self) -> slice:
return self.slice[1]
@property
def sz(self) -> slice:
return self.slice[2]
return self.grid_view.nz
def initialise_snapfields(self):
for k, v in self.outputs.items():
if v:
self.snapfields[k] = np.zeros(
(self.nx, self.ny, self.nz),
self.grid_view.size,
dtype=config.sim_config.dtypes["float_or_double"],
)
self.nbytes += self.snapfields[k].nbytes
@@ -217,7 +194,7 @@ class Snapshot:
(1, 1, 1), dtype=config.sim_config.dtypes["float_or_double"]
)
def store(self, G):
def store(self):
"""Store (in memory) electric and magnetic field values for snapshot.
Args:
@@ -225,12 +202,12 @@ class Snapshot:
"""
# Memory views of field arrays to dimensions required for the snapshot
Exslice = np.ascontiguousarray(G.Ex[self.sx, self.sy, self.sz])
Eyslice = np.ascontiguousarray(G.Ey[self.sx, self.sy, self.sz])
Ezslice = np.ascontiguousarray(G.Ez[self.sx, self.sy, self.sz])
Hxslice = np.ascontiguousarray(G.Hx[self.sx, self.sy, self.sz])
Hyslice = np.ascontiguousarray(G.Hy[self.sx, self.sy, self.sz])
Hzslice = np.ascontiguousarray(G.Hz[self.sx, self.sy, self.sz])
Exslice = self.grid_view.get_Ex()
Eyslice = self.grid_view.get_Ey()
Ezslice = self.grid_view.get_Ez()
Hxslice = self.grid_view.get_Hx()
Hyslice = self.grid_view.get_Hy()
Hzslice = self.grid_view.get_Hz()
# Calculate field values at points (comes from averaging field
# components in cells)
@@ -286,10 +263,13 @@ class Snapshot:
if self.outputs.get(k)
}
origin = self.grid_view.start * self.grid.dl
spacing = self.grid_view.step * self.grid.dl
imageToVTK(
str(self.filename.with_suffix("")),
origin=tuple(self.start * self.step * self.grid_dl),
spacing=tuple(self.step * self.grid_dl),
origin=tuple(origin),
spacing=tuple(spacing),
cellData=celldata,
)
@@ -312,9 +292,9 @@ class Snapshot:
f.attrs["gprMax"] = __version__
# TODO: Output model name (title) and grid name? in snapshot output
# f.attrs["Title"] = G.title
f.attrs["nx_ny_nz"] = (self.nx, self.ny, self.nz)
f.attrs["dx_dy_dz"] = self.step * self.grid_dl
f.attrs["time"] = self.time * self.grid_dt
f.attrs["nx_ny_nz"] = tuple(self.grid_view.size)
f.attrs["dx_dy_dz"] = self.grid_view.step * self.grid.dl
f.attrs["time"] = self.time * self.grid.dt
for key in ["Ex", "Ey", "Ez", "Hx", "Hy", "Hz"]:
if self.outputs[key]:
@@ -337,12 +317,16 @@ class Dir(IntEnum):
POS = 1
class MPISnapshot(Snapshot):
class MPISnapshot(Snapshot[MPIGrid]):
H_TAG = 0
EX_TAG = 1
EY_TAG = 2
EZ_TAG = 3
@property
def GRID_VIEW_TYPE(self) -> type[MPIGridView]:
return MPIGridView
def __init__(
self,
xs: int,
@@ -358,27 +342,23 @@ class MPISnapshot(Snapshot):
filename: str,
fileext: str,
outputs: Dict[str, bool],
grid_dl: npt.NDArray[np.float64],
grid_dt: float,
grid: MPIGrid,
):
super().__init__(
xs, ys, zs, xf, yf, zf, dx, dy, dz, time, filename, fileext, outputs, grid_dl, grid_dt
)
super().__init__(xs, ys, zs, xf, yf, zf, dx, dy, dz, time, filename, fileext, outputs, grid)
self.offset = np.zeros(3, dtype=np.intc)
self.global_size = self.size.copy()
assert isinstance(self.grid_view, self.GRID_VIEW_TYPE)
self.comm = self.grid_view.comm
self.comm: MPI.Cartcomm = None # type: ignore
def initialise_snapfields(self):
# Start and stop may have changed since initialisation
self.size = np.ceil((self.stop - self.start) / self.step).astype(np.intc)
return super().initialise_snapfields()
# Get neighbours
self.neighbours = np.full((3, 2), -1, dtype=int)
self.neighbours[Dim.X] = self.comm.Shift(direction=Dim.X, disp=1)
self.neighbours[Dim.Y] = self.comm.Shift(direction=Dim.Y, disp=1)
self.neighbours[Dim.Z] = self.comm.Shift(direction=Dim.Z, disp=1)
def has_neighbour(self, dimension: Dim, direction: Dir) -> bool:
return self.neighbours[dimension][direction] != -1
def store(self, G):
def store(self):
"""Store (in memory) electric and magnetic field values for snapshot.
Args:
@@ -387,31 +367,17 @@ class MPISnapshot(Snapshot):
logger.debug(f"Saving snapshot for iteration: {self.time}")
# Get neighbours
self.neighbours = np.full((3, 2), -1, dtype=int)
self.neighbours[Dim.X] = self.comm.Shift(direction=Dim.X, disp=1)
self.neighbours[Dim.Y] = self.comm.Shift(direction=Dim.Y, disp=1)
self.neighbours[Dim.Z] = self.comm.Shift(direction=Dim.Z, disp=1)
# If we do not have a positive neighbour, add an extra step to
# make the upper bound inclusive. Otherwise the additional step
# will be provided by the received halo.
slice_stop = np.where(
self.neighbours[:, Dir.POS] == -1,
self.stop + self.step,
self.stop,
)
self.slice = list(map(slice, self.start, slice_stop, self.step))
# Memory views of field arrays to dimensions required for the snapshot
Exslice = np.ascontiguousarray(G.Ex[self.sx, self.sy, self.sz])
Eyslice = np.ascontiguousarray(G.Ey[self.sx, self.sy, self.sz])
Ezslice = np.ascontiguousarray(G.Ez[self.sx, self.sy, self.sz])
Hxslice = np.ascontiguousarray(G.Hx[self.sx, self.sy, self.sz])
Hyslice = np.ascontiguousarray(G.Hy[self.sx, self.sy, self.sz])
Hzslice = np.ascontiguousarray(G.Hz[self.sx, self.sy, self.sz])
Exslice = self.grid_view.get_Ex()
Eyslice = self.grid_view.get_Ey()
Ezslice = self.grid_view.get_Ez()
Hxslice = self.grid_view.get_Hx()
Hyslice = self.grid_view.get_Hy()
Hzslice = self.grid_view.get_Hz()
"""
Halos required by each field to average field components:
Exslice - y + z halo
Eyslice - x + z halo
Ezslice - x + y halo
@@ -579,25 +545,23 @@ class MPISnapshot(Snapshot):
Args:
pbar: Progress bar class instance.
"""
assert isinstance(self.grid_view, self.GRID_VIEW_TYPE)
f = h5py.File(self.filename, "w", driver="mpio", comm=self.comm)
f.attrs["gprMax"] = __version__
# TODO: Output model name (title) and grid name? in snapshot output
# f.attrs["Title"] = G.title
f.attrs["nx_ny_nz"] = self.global_size
f.attrs["dx_dy_dz"] = self.step * self.grid_dl
f.attrs["time"] = self.time * self.grid_dt
f.attrs["nx_ny_nz"] = self.grid_view.global_size
f.attrs["dx_dy_dz"] = self.grid_view.step * self.grid.dl
f.attrs["time"] = self.time * self.grid.dt
dset_slice = self.grid_view.get_3d_output_slice()
for key in ["Ex", "Ey", "Ez", "Hx", "Hy", "Hz"]:
if self.outputs[key]:
dset = f.create_dataset(key, self.global_size)
# TODO: Is there a better way to do this slice?
start = self.offset
stop = start + self.size
dset[start[0] : stop[0], start[1] : stop[1], start[2] : stop[2]] = self.snapfields[
key
]
dset = f.create_dataset(key, self.grid_view.global_size)
dset[dset_slice] = self.snapfields[key]
pbar.update(n=self.snapfields[key].nbytes)
f.close()

查看文件

@@ -33,8 +33,8 @@ class Source:
def __init__(self):
self.ID: str
self.polarisation = None
self.coord = np.zeros(3, dtype=np.intc)
self.coordorigin = np.zeros(3, dtype=np.intc)
self.coord = np.zeros(3, dtype=np.int32)
self.coordorigin = np.zeros(3, dtype=np.int32)
self.start = 0.0
self.stop = 0.0
self.waveform: Waveform

查看文件

@@ -107,7 +107,7 @@ class SubGridBase(ModelUserObject):
p2 = self.kwargs["p2"]
uip = self._create_uip(model.G)
p1, p2 = uip.check_box_points(p1, p2, self.__str__())
_, p1, p2 = uip.check_box_points(p1, p2, self.__str__())
self.set_discretisation(sg, model.G)

查看文件

@@ -50,7 +50,7 @@ class CPUUpdates(Updates[GridType]):
"""
for snap in self.grid.snapshots:
if snap.time == iteration + 1:
snap.store(self.grid)
snap.store()
def update_magnetic(self):
"""Updates magnetic field components."""

查看文件

@@ -18,15 +18,17 @@
from __future__ import annotations
import logging
from typing import Generic
from typing import Generic, Tuple
import numpy as np
import numpy.typing as npt
from typing_extensions import TypeVar
from gprMax.grid.fdtd_grid import FDTDGrid
from gprMax.grid.mpi_grid import MPIGrid
from gprMax.subgrids.grid import SubGridBaseGrid
from .utilities.utilities import round_value
from .utilities.utilities import round_int
logger = logging.getLogger(__name__)
@@ -39,7 +41,7 @@ logger = logging.getLogger(__name__)
encapulsated here.
"""
GridType = TypeVar("GridType", bound=FDTDGrid, default=FDTDGrid)
GridType = TypeVar("GridType", bound=FDTDGrid)
class UserInput(Generic[GridType]):
@@ -48,9 +50,9 @@ class UserInput(Generic[GridType]):
def __init__(self, grid: GridType):
self.grid = grid
def point_within_bounds(self, p, cmd_str, name):
def point_within_bounds(self, p: npt.NDArray[np.int32], cmd_str: str, name: str = "") -> bool:
try:
self.grid.within_bounds(p)
return self.grid.within_bounds(p)
except ValueError as err:
v = ["x", "y", "z"]
# Discretisation
@@ -64,30 +66,74 @@ class UserInput(Generic[GridType]):
logger.exception(s)
raise
def check_point_within_bounds(self, p) -> bool:
try:
self.grid.within_bounds(p)
return True
except ValueError:
return False
def discretise_static_point(self, point: Tuple[float, float, float]) -> npt.NDArray[np.int32]:
"""Get the nearest grid index to a continuous static point.
def grid_upper_bound(self) -> list[int]:
return [self.grid.nx, self.grid.ny, self.grid.nz]
For a static point, the point of the origin of the grid is
ignored. I.e. it is assumed to be at (0, 0, 0). There are no
checks of the validity of the point such as bound checking.
def discretise_point(self, p):
"""Gets the index of a continuous point with the grid."""
rv = np.vectorize(round_value)
return rv(p / self.grid.dl)
Args:
point: x, y, z coordinates of the point in space.
def round_to_grid(self, p):
"""Gets the nearest continuous point on the grid from a continuous point
in space.
Returns:
discretised_point: x, y, z indices of the point on the grid.
"""
return self.discretise_point(p) * self.grid.dl
rv = np.vectorize(round_int, otypes=[np.int32])
return rv(point / self.grid.dl)
def descretised_to_continuous(self, p):
"""Returns a point given as indices to a continuous point in the real space."""
return p * self.grid.dl
def round_to_grid_static_point(
self, point: Tuple[float, float, float]
) -> npt.NDArray[np.float64]:
"""Round a continuous static point to the nearest point on the grid.
For a static point, the point of the origin of the grid is
ignored. I.e. it is assumed to be at (0, 0, 0). There are no
checks of the validity of the point such as bound checking.
Args:
point: x, y, z coordinates of the point in space.
Returns:
rounded_point: x, y, z coordinates of the nearest continuous
point on the grid.
"""
return self.discretise_static_point(point) * self.grid.dl
def discretise_point(self, point: Tuple[float, float, float]) -> npt.NDArray[np.int32]:
"""Get the nearest grid index to a continuous static point.
This function translates user points to the correct index for
building objects. Points will be mapped from the user coordinate
space to the local coordinate space of the grid. There are no
checks of the validity of the point such as bound checking.
Args:
point: x, y, z coordinates of the point in space.
Returns:
discretised_point: x, y, z indices of the point on the grid.
"""
return self.discretise_static_point(point)
def round_to_grid(self, point: Tuple[float, float, float]) -> npt.NDArray[np.float64]:
"""Round a continuous static point to the nearest point on the grid.
The point will be mapped from the user coordinate space to the
local coordinate space of the grid. There are no checks of the
validity of the point such as bound checking.
Args:
point: x, y, z coordinates of the point in space.
Returns:
rounded_point: x, y, z coordinates of the nearest continuous
point on the grid.
"""
return self.discretise_point(point) * self.grid.dl
def discrete_to_continuous(self, point: npt.NDArray[np.int32]) -> npt.NDArray[np.float64]:
return point * self.grid.dl
class MainGridUserInput(UserInput[GridType]):
@@ -96,52 +142,195 @@ class MainGridUserInput(UserInput[GridType]):
def __init__(self, grid):
super().__init__(grid)
def check_point(self, p, cmd_str, name=""):
def check_point(
self, point: Tuple[float, float, float], cmd_str: str, name: str = ""
) -> Tuple[bool, npt.NDArray[np.int32]]:
"""Discretises point and check its within the domain"""
p = self.discretise_point(p)
self.point_within_bounds(p, cmd_str, name)
return p
discretised_point = self.discretise_point(point)
within_bounds = self.point_within_bounds(discretised_point, cmd_str, name)
return within_bounds, discretised_point
def check_src_rx_point(self, p, cmd_str, name=""):
p = self.check_point(p, cmd_str, name)
def check_src_rx_point(
self, point: Tuple[float, float, float], cmd_str: str, name: str = ""
) -> Tuple[bool, npt.NDArray[np.int32]]:
within_bounds, discretised_point = self.check_point(point, cmd_str, name)
if self.grid.within_pml(p):
if self.grid.within_pml(discretised_point):
logger.warning(
f"'{cmd_str}' sources and receivers should not normally be positioned within the PML."
)
return p
return within_bounds, discretised_point
def check_box_points(self, p1, p2, cmd_str):
p1 = self.check_point(p1, cmd_str, name="lower")
p2 = self.check_point(p2, cmd_str, name="upper")
def _check_2d_points(
self, p1: Tuple[float, float, float], p2: Tuple[float, float, float], cmd_str: str
) -> Tuple[bool, npt.NDArray[np.int32], npt.NDArray[np.int32]]:
lower_within_grid, lower_point = self.check_point(p1, cmd_str, "lower")
upper_within_grid, upper_point = self.check_point(p2, cmd_str, "upper")
if np.greater(p1, p2).any():
logger.exception(
if np.greater(lower_point, upper_point).any() or np.equal(lower_point, upper_point).all():
raise ValueError(
f"'{cmd_str}' the lower coordinates should be less than the upper coordinates."
)
raise ValueError
return p1, p2
return lower_within_grid and upper_within_grid, lower_point, upper_point
def check_tri_points(self, p1, p2, p3, cmd_str):
p1 = self.check_point(p1, cmd_str, name="vertex_1")
p2 = self.check_point(p2, cmd_str, name="vertex_2")
p3 = self.check_point(p3, cmd_str, name="vertex_3")
def check_output_object_bounds(
self, p1: Tuple[float, float, float], p2: Tuple[float, float, float], cmd_str: str
) -> Tuple[npt.NDArray[np.int32], npt.NDArray[np.int32]]:
# We only care if the bounds are in the global grid (an error
# will be thrown if that is not the case).
_, lower_bound, upper_bound = self._check_2d_points(p1, p2, cmd_str)
return lower_bound, upper_bound
return p1, p2, p3
def check_box_points(
self, p1: Tuple[float, float, float], p2: Tuple[float, float, float], cmd_str: str
) -> Tuple[bool, npt.NDArray[np.int32], npt.NDArray[np.int32]]:
return self._check_2d_points(p1, p2, cmd_str)
def discretise_static_point(self, p):
"""Gets the index of a continuous point regardless of the point of
origin of the grid.
def check_tri_points(
self,
p1: Tuple[float, float, float],
p2: Tuple[float, float, float],
p3: Tuple[float, float, float],
cmd_str: str,
) -> Tuple[npt.NDArray[np.int32], npt.NDArray[np.int32], npt.NDArray[np.int32]]:
# We only care if the point are in the global grid (an error
# will be thrown if that is not the case).
_, p1_checked = self.check_point(p1, cmd_str, name="vertex_1")
_, p2_checked = self.check_point(p2, cmd_str, name="vertex_2")
_, p3_checked = self.check_point(p3, cmd_str, name="vertex_3")
return p1_checked, p2_checked, p3_checked
def check_thickness(
self,
dimension: str,
lower_extent: float,
thickness: float,
cmd_str: str,
) -> Tuple[bool, float, float]:
"""Check the thickness of an object in a specified dimension.
Args:
dimension: Dimension to check the thickness value for.
This must have value x, y, or z.
lower_extent: Lower extent of the object in the specified
dimension.
thickness: Thickness of the object.
Raises:
ValueError: Raised if dimension has an invalid value.
Returns:
within_grid: True if part of the object is within the
current grid. False otherwise.
lower_extent: Lower extent limited to the bounds of the
grid.
thickness: Thickness value such that lower_extent +
thickness is within the bounds of the grid.
"""
return super().discretise_point(p)
if thickness < 0:
raise ValueError(f"'{cmd_str}' requires a non negative thickness")
def round_to_grid_static_point(self, p):
"""Gets the index of a continuous point regardless of the point of
origin of the grid.
if lower_extent < 0:
raise ValueError(
f"'{cmd_str}' lower extent should be non negative in the {dimension} dimension"
)
upper_extent = lower_extent + thickness
if dimension == "x":
lower_point = self.discretise_point((lower_extent, 0, 0))
upper_point = self.discretise_point((upper_extent, 0, 0))
thickness_point = self.discretise_static_point((thickness, 0, 0))
index = 0
elif dimension == "y":
lower_point = self.discretise_point((0, lower_extent, 0))
upper_point = self.discretise_point((0, upper_extent, 0))
thickness_point = self.discretise_static_point((0, thickness, 0))
index = 1
elif dimension == "z":
lower_point = self.discretise_point((0, 0, lower_extent))
upper_point = self.discretise_point((0, 0, upper_extent))
thickness_point = self.discretise_static_point((0, 0, thickness))
index = 2
else:
raise ValueError("Dimension should have value x, y, or z")
try:
self.grid.within_bounds(upper_point)
except ValueError:
raise ValueError(
f"'{cmd_str}' extends beyond the size of the model in the {dimension} dimension"
)
# Work with discretised (int) values as reduces imprecision due
# to floating point calculations
size = self.grid.size[index]
lower_extent = lower_point[index]
upper_extent = upper_point[index]
thickness = thickness_point[index]
# These should only trigger for MPIGrids.
# TODO: Can this be structured so these checks happen in the
# MPIGridUserInput object?
if lower_extent < 0:
thickness += lower_extent
lower_extent = 0
if upper_extent > size:
thickness -= upper_extent - size
dl = self.grid.dl[index]
return (
lower_extent <= size
and upper_extent >= 0
and not (upper_extent > size and thickness <= 0),
lower_extent * dl,
thickness * dl,
)
class MPIUserInput(MainGridUserInput[MPIGrid]):
"""Handles (x, y, z) points supplied by the user for MPI grids.
This class autotranslates points from the global coordinate system
to the grid's local coordinate system.
"""
def discretise_point(self, point: Tuple[float, float, float]) -> npt.NDArray[np.int32]:
"""Get the nearest grid index to a continuous static point.
This function translates user points to the correct index for
building objects. Points will be mapped from the global
coordinate space to the local coordinate space of the grid.
There are no checks of the validity of the point such as bound
checking.
Args:
point: x, y, z coordinates of the point in space.
Returns:
discretised_point: x, y, z indices of the point on the grid.
"""
return super().discretise_point(p) * self.grid.dl
discretised_point = super().discretise_point(point)
return self.grid.global_to_local_coordinate(discretised_point)
def check_box_points(
self, p1: Tuple[float, float, float], p2: Tuple[float, float, float], cmd_str: str
) -> Tuple[bool, npt.NDArray[np.int32], npt.NDArray[np.int32]]:
_, lower_point, upper_point = super().check_box_points(p1, p2, cmd_str)
# Restrict points to the bounds of the local grid
lower_point = np.where(lower_point < 0, 0, lower_point)
upper_point = np.where(upper_point > self.grid.size, self.grid.size, upper_point)
return (
all(lower_point <= upper_point) and all(lower_point < self.grid.size),
lower_point,
upper_point,
)
class SubgridUserInput(MainGridUserInput[SubGridBaseGrid]):
@@ -160,7 +349,7 @@ class SubgridUserInput(MainGridUserInput[SubGridBaseGrid]):
self.outer_bound = np.subtract([grid.nx, grid.ny, grid.nz], self.inner_bound)
def translate_to_gap(self, p):
def translate_to_gap(self, p) -> npt.NDArray[np.int32]:
"""Translates the user input point to the real point in the subgrid."""
p1 = (p[0] - self.grid.i0 * self.grid.ratio) + self.grid.n_boundary_cells_x
@@ -169,20 +358,26 @@ class SubgridUserInput(MainGridUserInput[SubGridBaseGrid]):
return np.array([p1, p2, p3])
def discretise_point(self, p):
"""Discretises a point. Does not provide any checks. The user enters
coordinates relative to self.inner_bound. This function translate
the user point to the correct index for building objects.
def discretise_point(self, point: Tuple[float, float, float]) -> npt.NDArray[np.int32]:
"""Get the nearest grid index to a continuous static point.
This function translates user points to the correct index for
building objects. The user enters coordinates relative to
self.inner_bound which are mapped to the local coordinate space
of the grid. There are no checks of the validity of the point
such as bound checking.
Args:
point: x, y, z coordinates of the point in space relative to
self.inner_bound.
Returns:
discretised_point: x, y, z indices of the point on the grid.
"""
p = super().discretise_point(p)
p_t = self.translate_to_gap(p)
return p_t
def round_to_grid(self, p):
p_t = self.discretise_point(p)
p_m = p_t * self.grid.dl
return p_m
discretised_point = super().discretise_point(point)
discretised_point = self.translate_to_gap(discretised_point)
return discretised_point
def check_point(self, p, cmd_str, name=""):
p_t = super().check_point(p, cmd_str, name)
@@ -194,13 +389,3 @@ class SubgridUserInput(MainGridUserInput[SubGridBaseGrid]):
f"'{cmd_str}' this object traverses the Outer Surface. This is an advanced feature."
)
return p_t
def discretise_static_point(self, p):
"""Gets the index of a continuous point regardless of the point of
origin of the grid."""
return super().discretise_point(p)
def round_to_grid_static_point(self, p):
"""Gets the index of a continuous point regardless of the point of
origin of the grid."""
return super().discretise_point(p) * self.grid.dl

查看文件

@@ -99,7 +99,7 @@ class AddGrass(RotatableMixin, GeometryUserObject):
raise
uip = self._create_uip(grid)
p1, p2 = uip.check_box_points(p1, p2, self.__str__())
_, p1, p2 = uip.check_box_points(p1, p2, self.__str__())
xs, ys, zs = p1
xf, yf, zf = p2

查看文件

@@ -99,7 +99,7 @@ class AddSurfaceRoughness(RotatableMixin, GeometryUserObject):
raise ValueError
uip = self._create_uip(grid)
p1, p2 = uip.check_box_points(p1, p2, self.__str__())
_, p1, p2 = uip.check_box_points(p1, p2, self.__str__())
xs, ys, zs = p1
xf, yf, zf = p2

查看文件

@@ -81,7 +81,7 @@ class AddSurfaceWater(RotatableMixin, GeometryUserObject):
raise ValueError
uip = self._create_uip(grid)
p1, p2 = uip.check_box_points(p1, p2, self.__str__())
_, p1, p2 = uip.check_box_points(p1, p2, self.__str__())
xs, ys, zs = p1
xf, yf, zf = p2

查看文件

@@ -91,7 +91,13 @@ class Box(RotatableMixin, GeometryUserObject):
averagebox = grid.averagevolumeobjects
uip = self._create_uip(grid)
p3, p4 = uip.check_box_points(p1, p2, self.__str__())
grid_contains_box, p3, p4 = uip.check_box_points(p1, p2, self.__str__())
# Exit early if none of the box is in this grid as there is
# nothing else to do.
if not grid_contains_box:
return
# Find nearest point on grid without translation
p5 = uip.round_to_grid_static_point(p1)
p6 = uip.round_to_grid_static_point(p2)

查看文件

@@ -74,6 +74,31 @@ class CylindricalSector(GeometryUserObject):
logger.exception(self.__str__())
raise
# Check thickness of the object first as may be able to exit
# early if fully outside the grid.
uip = self._create_uip(grid)
# yz-plane cylindrical sector
if normal == "x":
level, ctr1, ctr2 = uip.round_to_grid((extent1, ctr1, ctr2))
# xz-plane cylindrical sector
elif normal == "y":
ctr1, level, ctr2 = uip.round_to_grid((ctr1, extent1, ctr2))
# xy-plane cylindrical sector
elif normal == "z":
ctr1, ctr2, level = uip.round_to_grid((ctr1, ctr2, extent1))
sector_within_grid, level, thickness = uip.check_thickness(
normal, extent1, thickness, self.__str__()
)
# Exit early if none of the cylindrical sector is in this grid
# as there is nothing else to do.
if not sector_within_grid:
return
# Check averaging
try:
# Try user-specified averaging
@@ -162,19 +187,6 @@ class CylindricalSector(GeometryUserObject):
numIDy = materials[1].numID
numIDz = materials[2].numID
uip = self._create_uip(grid)
# yz-plane cylindrical sector
if normal == "x":
level, ctr1, ctr2 = uip.round_to_grid((extent1, ctr1, ctr2))
# xz-plane cylindrical sector
elif normal == "y":
ctr1, level, ctr2 = uip.round_to_grid((ctr1, extent1, ctr2))
# xy-plane cylindrical sector
elif normal == "z":
ctr1, ctr2, level = uip.round_to_grid((ctr1, ctr2, extent1))
build_cylindrical_sector(
ctr1,
ctr2,

查看文件

@@ -68,12 +68,18 @@ class Edge(RotatableMixin, GeometryUserObject):
self._do_rotate(grid)
uip = self._create_uip(grid)
p3 = uip.round_to_grid_static_point(p1)
p4 = uip.round_to_grid_static_point(p2)
p1, p2 = uip.check_box_points(p1, p2, self.__str__())
xs, ys, zs = p1
xf, yf, zf = p2
edge_within_grid, discretised_p1, discretised_p2 = uip.check_box_points(
p1, p2, self.__str__()
)
# Exit early if none of the edge is in this grid as there is
# nothing else to do.
if not edge_within_grid:
return
xs, ys, zs = discretised_p1
xf, yf, zf = discretised_p2
material = next((x for x in grid.materials if x.ID == material_id), None)
@@ -102,6 +108,9 @@ class Edge(RotatableMixin, GeometryUserObject):
for k in range(zs, zf):
build_edge_z(xs, ys, k, material.numID, grid.rigidE, grid.rigidH, grid.ID)
p3 = uip.round_to_grid_static_point(p1)
p4 = uip.round_to_grid_static_point(p2)
logger.info(
f"{self.grid_name(grid)}Edge from {p3[0]:g}m, {p3[1]:g}m, "
f"{p3[2]:g}m, to {p4[0]:g}m, {p4[1]:g}m, {p4[2]:g}m of "

查看文件

@@ -110,7 +110,13 @@ class FractalBox(RotatableMixin, GeometryUserObject):
p3 = uip.round_to_grid_static_point(p1)
p4 = uip.round_to_grid_static_point(p2)
p1, p2 = uip.check_box_points(p1, p2, self.__str__())
grid_contains_fractal_box, p1, p2 = uip.check_box_points(p1, p2, self.__str__())
# Exit early if none of the fractal box is in this grid as there
# is nothing else to do.
if not grid_contains_fractal_box:
return
xs, ys, zs = p1
xf, yf, zf = p2

查看文件

@@ -81,7 +81,13 @@ class Plate(RotatableMixin, GeometryUserObject):
p3 = uip.round_to_grid_static_point(p1)
p4 = uip.round_to_grid_static_point(p2)
p1, p2 = uip.check_box_points(p1, p2, self.__str__())
plate_within_grid, p1, p2 = uip.check_box_points(p1, p2, self.__str__())
# Exit early if none of the plate is in this grid as there is
# nothing else to do.
if not plate_within_grid:
return
xs, ys, zs = p1
xf, yf, zf = p2
@@ -90,17 +96,16 @@ class Plate(RotatableMixin, GeometryUserObject):
(xs == xf and (ys == yf or zs == zf))
or (ys == yf and (xs == xf or zs == zf))
or (zs == zf and (xs == xf or ys == yf))
or (xs != xf and ys != yf and zs != zf)
):
logger.exception(f"{self.__str__()} the plate is not specified correctly")
raise ValueError
raise ValueError(f"{self.__str__()} the plate is not specified correctly")
# Look up requested materials in existing list of material instances
materials = [y for x in materialsrequested for y in grid.materials if y.ID == x]
if len(materials) != len(materialsrequested):
notfound = [x for x in materialsrequested if x not in materials]
logger.exception(f"{self.__str__()} material(s) {notfound} do not exist")
raise ValueError
raise ValueError(f"{self.__str__()} material(s) {notfound} do not exist")
# yz-plane plate
if xs == xf:

查看文件

@@ -97,35 +97,51 @@ class Triangle(RotatableMixin, GeometryUserObject):
raise
uip = self._create_uip(grid)
p4 = uip.round_to_grid_static_point(up1)
p5 = uip.round_to_grid_static_point(up2)
p6 = uip.round_to_grid_static_point(up3)
# Check whether points are valid against grid
uip.check_tri_points(up1, up2, up3, object)
dp1, dp2, dp3 = uip.check_tri_points(up1, up2, up3, self.__str__())
# Convert points to metres
x1, y1, z1 = uip.round_to_grid(up1)
x2, y2, z2 = uip.round_to_grid(up2)
x3, y3, z3 = uip.round_to_grid(up3)
if thickness < 0:
logger.exception(f"{self.__str__()} requires a positive value for thickness")
raise ValueError
x1, y1, z1 = uip.discrete_to_continuous(dp1)
x2, y2, z2 = uip.discrete_to_continuous(dp2)
x3, y3, z3 = uip.discrete_to_continuous(dp3)
# Check for valid orientations
# yz-plane triangle
if x1 == x2 == x3:
normal = "x"
lower_extent = up1[0]
# xz-plane triangle
elif y1 == y2 == y3:
normal = "y"
lower_extent = up1[1]
# xy-plane triangle
elif z1 == z2 == z3:
normal = "z"
lower_extent = up1[2]
else:
logger.exception(f"{self.__str__()} the triangle is not specified correctly")
raise ValueError
triangle_within_grid, lower_extent, thickness = uip.check_thickness(
normal, lower_extent, thickness, self.__str__()
)
# Exit early if none of the triangle is in this grid as there is
# nothing else to do.
if not triangle_within_grid:
return
# Update start bound of the triangle
# yz-plane triangle
if normal == "x":
x1 = x2 = x3 = lower_extent
# xz-plane triangle
elif normal == "y":
y1 = y2 = y3 = lower_extent
# xy-plane triangle
elif normal == "z":
z1 = z2 = z3 = lower_extent
# Look up requested materials in existing list of material instances
materials = [y for x in materialsrequested for y in grid.materials if y.ID == x]
@@ -202,6 +218,10 @@ class Triangle(RotatableMixin, GeometryUserObject):
grid.ID,
)
p4 = uip.round_to_grid_static_point(up1)
p5 = uip.round_to_grid_static_point(up2)
p6 = uip.round_to_grid_static_point(up3)
if thickness > 0:
dielectricsmoothing = "on" if averaging else "off"
logger.info(

文件差异内容过多而无法显示 加载差异

查看文件

@@ -1,16 +1,249 @@
import logging
from typing import List, Optional, Tuple
import numpy as np
import numpy.typing as npt
from gprMax.geometry_outputs import GeometryObjects as GeometryObjectsUser
from gprMax.geometry_outputs import MPIGeometryObjects as MPIGeometryObjectsUser
from gprMax.grid.fdtd_grid import FDTDGrid
from gprMax.grid.mpi_grid import MPIGrid
from gprMax.model import Model
from gprMax.snapshots import Snapshot as SnapshotUser
from gprMax.subgrids.grid import SubGridBaseGrid
from gprMax.user_objects.user_objects import OutputUserObject
from gprMax.utilities.utilities import round_int
logger = logging.getLogger(__name__)
class Snapshot(OutputUserObject):
"""Obtains information about the electromagnetic fields within a volume
of the model at a given time instant.
Attributes:
p1: tuple required to specify lower left (x,y,z) coordinates of volume
of snapshot in metres.
p2: tuple required to specify upper right (x,y,z) coordinates of volume
of snapshot in metres.
dl: tuple require to specify spatial discretisation of the snapshot
in metres.
filename: string required for name of the file to store snapshot.
time/iterations: either a float for time or an int for iterations
must be specified for point in time at which the
snapshot will be taken.
fileext: optional string to indicate type for snapshot file, either
'.vti' (default) or '.h5'
outputs: optional list of outputs for receiver. It can be any
selection from Ex, Ey, Ez, Hx, Hy, or Hz.
"""
@property
def order(self):
return 9
@property
def hash(self):
return "#snapshot"
def __init__(
self,
p1: Tuple[float, float, float],
p2: Tuple[float, float, float],
dl: Tuple[float, float, float],
filename: str,
fileext: Optional[str] = None,
iterations: Optional[int] = None,
time: Optional[float] = None,
outputs: Optional[List[str]] = None,
):
super().__init__(
p1=p1,
p2=p2,
dl=dl,
filename=filename,
fileext=fileext,
iterations=iterations,
time=time,
outputs=outputs,
)
self.lower_bound = p1
self.upper_bound = p2
self.dl = dl
self.filename = filename
self.file_extension = fileext
self.iterations = iterations
self.time = time
self.outputs = outputs
def _calculate_upper_bound(
self, start: npt.NDArray[np.int32], step: npt.NDArray[np.int32], size: npt.NDArray[np.int32]
) -> npt.NDArray[np.int32]:
return start + step * np.ceil(size / step)
def build(self, model: Model, grid: FDTDGrid):
if isinstance(grid, SubGridBaseGrid):
raise ValueError(f"{self.params_str()} do not add snapshots to subgrids.")
uip = self._create_uip(grid)
discretised_lower_bound, discretised_upper_bound = uip.check_output_object_bounds(
self.lower_bound, self.upper_bound, self.params_str()
)
discretised_dl = uip.discretise_static_point(self.dl)
snapshot_size = discretised_upper_bound - discretised_lower_bound
# If p2 does not line up with the set discretisation, the actual
# maximum element accessed in the grid will be this upper bound.
upper_bound = self._calculate_upper_bound(
discretised_lower_bound, discretised_dl, snapshot_size
)
# Each coordinate may need a different method to correct p2.
# Therefore, this check needs to be repeated after each
# correction has been applied.
while any(discretised_upper_bound < upper_bound):
try:
uip.point_within_bounds(
upper_bound, f"[{upper_bound[0]}, {upper_bound[1]}, {upper_bound[2]}]"
)
upper_bound_within_grid = True
except ValueError:
upper_bound_within_grid = False
# Ideally extend p2 up to the correct upper bound. This will
# not change the snapshot output.
if upper_bound_within_grid:
discretised_upper_bound = upper_bound
upper_bound_continuous = discretised_upper_bound * grid.dl
logger.warning(
f"{self.params_str()} upper bound not aligned with discretisation. Updating 'p2'"
f" to {upper_bound_continuous}"
)
# If the snapshot size cannot be increased, the
# discretisation may need reducing. E.g. for snapshots of 2D
# models.
elif any(discretised_dl > snapshot_size):
discretised_dl = np.where(
discretised_dl > snapshot_size, snapshot_size, discretised_dl
)
upper_bound = self._calculate_upper_bound(
discretised_lower_bound, discretised_dl, snapshot_size
)
dl_continuous = discretised_dl * grid.dl
logger.warning(
f"{self.params_str()} current bounds and discretisation would go outside"
f" domain. As discretisation is larger than the snapshot size in at least one"
f" dimension, limiting 'dl' to {dl_continuous}"
)
# Otherwise, limit p2 to the discretisation step below the
# current snapshot size. This will reduce the size of the
# snapshot by 1 in the effected dimension(s), but avoid out
# of memory access.
else:
discretised_upper_bound = np.where(
discretised_upper_bound < upper_bound,
upper_bound - discretised_dl,
discretised_upper_bound,
)
snapshot_size = discretised_upper_bound - discretised_lower_bound
upper_bound = self._calculate_upper_bound(
discretised_lower_bound, discretised_dl, snapshot_size
)
upper_bound_continuous = discretised_upper_bound * grid.dl
logger.warning(
f"{self.params_str()} current bounds and discretisation would go outside"
f" domain. Limiting 'p2' to {upper_bound_continuous}"
)
# Raise error to prevent an infinite loop. This is here
# as a precaution, it shouldn't be needed.
if any(discretised_upper_bound < upper_bound):
raise ValueError(f"{self.params_str()} invalid snapshot.")
if any(discretised_dl < 0):
raise ValueError(f"{self.params_str()} the step size should not be less than zero.")
if any(discretised_dl < 1):
raise ValueError(
f"{self.params_str()} the step size should not be less than the spatial discretisation."
)
if self.iterations is not None and self.time is not None:
logger.warning(
f"{self.params_str()} Time and iterations were both specified, using 'iterations'"
)
# If number of iterations given
if self.iterations is not None:
if self.iterations <= 0 or self.iterations > grid.iterations:
raise ValueError(f"{self.params_str()} time value is not valid.")
# If time value given
elif self.time is not None:
if self.time > 0:
self.iterations = round_int((self.time / grid.dt)) + 1
else:
raise ValueError(f"{self.params_str()} time value must be greater than zero.")
# No iteration or time value given
else:
raise ValueError(f"{self} specify a time or number of iterations")
if self.file_extension is None:
self.file_extension = SnapshotUser.fileexts[0]
elif self.file_extension not in SnapshotUser.fileexts:
raise ValueError(
f"'{self.file_extension}' is not a valid format for a snapshot file."
f" Valid options are: {' '.join(SnapshotUser.fileexts)}."
)
# TODO: Allow VTKHDF files when they are implemented
if isinstance(grid, MPIGrid) and self.file_extension != ".h5":
raise ValueError(
f"{self.params_str()} currently only '.h5' snapshots are compatible with MPI."
)
if self.outputs is None:
outputs = dict.fromkeys(SnapshotUser.allowableoutputs, True)
else:
outputs = dict.fromkeys(SnapshotUser.allowableoutputs, False)
# Check and set output names
for output in self.outputs:
if output not in SnapshotUser.allowableoutputs.keys():
raise ValueError(
f"{self.params_str()} contains an output type that is not"
" allowable. Allowable outputs in current context are "
f"{', '.join(SnapshotUser.allowableoutputs.keys())}."
)
else:
outputs[output] = True
snapshot = model.add_snapshot(
grid,
discretised_lower_bound,
discretised_upper_bound,
discretised_dl,
self.iterations,
self.filename,
self.file_extension,
outputs,
)
if snapshot is not None:
p1 = uip.round_to_grid_static_point(self.lower_bound)
p2 = uip.round_to_grid_static_point(self.upper_bound)
dl = uip.round_to_grid_static_point(self.dl)
logger.info(
f"{self.grid_name(grid)}Snapshot from"
f" {p1[0]:g}m, {p1[1]:g}m, {p1[2]:g}m, to"
f" {p2[0]:g}m, {p2[1]:g}m, {p2[2]:g}m, discretisation"
f" {dl[0]:g}m, {dl[1]:g}m, {dl[2]:g}m, at"
f" {snapshot.time * grid.dt:g} secs with field outputs"
f" {', '.join([k for k, v in outputs.items() if v])} "
f" and filename {snapshot.filename}{snapshot.fileext}"
" will be created."
)
class GeometryView(OutputUserObject):
"""Outputs to file(s) information about the geometry (mesh) of model.
@@ -36,8 +269,20 @@ class GeometryView(OutputUserObject):
def hash(self):
return "#geometry_view"
def __init__(self, **kwargs):
super().__init__(**kwargs)
def __init__(
self,
p1: Tuple[float, float, float],
p2: Tuple[float, float, float],
dl: Tuple[float, float, float],
output_type: str,
filename: str,
):
super().__init__(p1=p1, p2=p2, dl=dl, filename=filename, output_type=output_type)
self.lower_bound = p1
self.upper_bound = p2
self.dl = dl
self.filename = filename
self.output_type = output_type
def geometry_view_constructor(self, output_type):
"""Selects appropriate class for geometry view dependent on geometry
@@ -52,71 +297,61 @@ class GeometryView(OutputUserObject):
return GeometryViewUser
def build(self, model: Model, grid: FDTDGrid):
try:
p1 = self.kwargs["p1"]
p2 = self.kwargs["p2"]
dl = self.kwargs["dl"]
output_type = self.kwargs["output_type"].lower()
filename = self.kwargs["filename"]
except KeyError:
logger.exception(f"{self.params_str()} requires exactly eleven parameters.")
raise
GeometryViewUser = self.geometry_view_constructor(output_type)
uip = self._create_uip(grid)
try:
p3 = uip.round_to_grid_static_point(p1)
p4 = uip.round_to_grid_static_point(p2)
p1, p2 = uip.check_box_points(p1, p2, self.params_str())
except ValueError:
logger.exception(f"{self.params_str()} point is outside the domain.")
raise
xs, ys, zs = p1
xf, yf, zf = p2
discretised_lower_bound, discretised_upper_bound = uip.check_output_object_bounds(
self.lower_bound, self.upper_bound, self.params_str()
)
discretised_dl = uip.discretise_static_point(self.dl)
dx, dy, dz = uip.discretise_static_point(dl)
if dx < 0 or dy < 0 or dz < 0:
logger.exception(f"{self.params_str()} the step size should not be less than zero.")
raise ValueError
if dx > grid.nx or dy > grid.ny or dz > grid.nz:
logger.exception(
if any(discretised_dl < 0):
raise ValueError(f"{self.params_str()} the step size should not be less than zero.")
if any(discretised_dl > grid.size):
raise ValueError(
f"{self.params_str()} the step size should be less than the domain size."
)
raise ValueError
if dx < 1 or dy < 1 or dz < 1:
logger.exception(
f"{self.params_str()} the step size should not be less than the spatial discretisation."
if any(discretised_dl < 1):
raise ValueError(
f"{self.params_str()} the step size should not be less than the spatial"
" discretisation."
)
raise ValueError
if output_type not in ["n", "f"]:
logger.exception(
if self.output_type == "f" and any(discretised_dl != 1):
raise ValueError(
f"{self.params_str()} requires the spatial discretisation for the geometry view to"
" be the same as the model for geometry view of type f (fine)."
)
if self.output_type == "n":
g = model.add_geometry_view_voxels(
grid,
discretised_lower_bound,
discretised_upper_bound,
discretised_dl,
self.filename,
)
elif self.output_type == "f":
g = model.add_geometry_view_lines(
grid,
discretised_lower_bound,
discretised_upper_bound,
self.filename,
)
else:
raise ValueError(
f"{self.params_str()} requires type to be either n (normal) or f (fine)."
)
raise ValueError
if output_type == "f" and (
dx * grid.dx != grid.dx or dy * grid.dy != grid.dy or dz * grid.dz != grid.dz
):
logger.exception(
f"{self.params_str()} requires the spatial "
"discretisation for the geometry view to be the "
"same as the model for geometry view of "
"type f (fine)"
if g is not None:
p1 = uip.round_to_grid_static_point(self.lower_bound)
p2 = uip.round_to_grid_static_point(self.upper_bound)
dl = discretised_dl * grid.dl
logger.info(
f"{self.grid_name(grid)}Geometry view from"
f" {p1[0]:g}m, {p1[1]:g}m, {p1[2]:g}m,"
f" to {p2[0]:g}m, {p2[1]:g}m, {p2[2]:g}m,"
f" discretisation {dl[0]:g}m, {dl[1]:g}m, {dl[2]:g}m,"
f" with filename base {g.filenamebase} created."
)
raise ValueError
g = GeometryViewUser(xs, ys, zs, xf, yf, zf, dx, dy, dz, filename, grid)
logger.info(
f"{self.grid_name(grid)}Geometry view from {p3[0]:g}m, "
f"{p3[1]:g}m, {p3[2]:g}m, to {p4[0]:g}m, {p4[1]:g}m, "
f"{p4[2]:g}m, discretisation {dx * grid.dx:g}m, "
f"{dy * grid.dy:g}m, {dz * grid.dz:g}m, with filename "
f"base {g.filename} created."
)
model.geometryviews.append(g)
class GeometryObjectsWrite(OutputUserObject):
@@ -140,41 +375,35 @@ class GeometryObjectsWrite(OutputUserObject):
def hash(self):
return "#geometry_objects_write"
def __init__(self, **kwargs):
super().__init__(**kwargs)
def __init__(
self, p1: Tuple[float, float, float], p2: Tuple[float, float, float], filename: str
):
super().__init__(p1=p1, p2=p2, filename=filename)
self.lower_bound = p1
self.upper_bound = p2
self.basefilename = filename
def build(self, model: Model, grid: FDTDGrid):
if isinstance(grid, SubGridBaseGrid):
logger.exception(f"{self.params_str()} do not add geometry objects to subgrids.")
raise ValueError
try:
p1 = self.kwargs["p1"]
p2 = self.kwargs["p2"]
basefilename = self.kwargs["filename"]
except KeyError:
logger.exception(f"{self.params_str()} requires exactly seven parameters.")
raise
raise ValueError(f"{self.params_str()} do not add geometry objects to subgrids.")
uip = self._create_uip(grid)
p1, p2 = uip.check_box_points(p1, p2, self.params_str())
x0, y0, z0 = p1
x1, y1, z1 = p2
# TODO: Remove these when add parallel build
if isinstance(grid, MPIGrid):
geometry_object_type = MPIGeometryObjectsUser
else:
geometry_object_type = GeometryObjectsUser
g = geometry_object_type(x0, y0, z0, x1, y1, z1, basefilename)
logger.info(
f"Geometry objects in the volume from {p1[0] * grid.dx:g}m, "
f"{p1[1] * grid.dy:g}m, {p1[2] * grid.dz:g}m, to "
f"{p2[0] * grid.dx:g}m, {p2[1] * grid.dy:g}m, "
f"{p2[2] * grid.dz:g}m, will be written to "
f"{g.filename_hdf5}, with materials written to "
f"{g.filename_materials}"
discretised_lower_bound, discretised_upper_bound = uip.check_output_object_bounds(
self.lower_bound, self.upper_bound, self.params_str()
)
model.geometryobjects.append(g)
g = model.add_geometry_object(
grid, discretised_lower_bound, discretised_upper_bound, self.basefilename
)
if g is not None:
p1 = uip.round_to_grid_static_point(self.lower_bound)
p2 = uip.round_to_grid_static_point(self.upper_bound)
logger.info(
f"Geometry objects in the volume from {p1[0]:g}m,"
f" {p1[1]:g}m, {p1[2]:g}m, to {p2[0]:g}m, {p2[1]:g}m,"
f" {p2[2]:g}m, will be written to {g.filename_hdf5},"
f" with materials written to {g.filename_materials}"
)

查看文件

@@ -122,12 +122,10 @@ class Domain(ModelUserObject):
def build(self, model: Model):
uip = self._create_uip(model.G)
model.nx, model.ny, model.nz = uip.discretise_point(self.domain_size)
# TODO: Remove when distribute full build for MPI
if isinstance(model.G, MPIGrid):
model.G.nx = model.nx
model.G.ny = model.ny
model.G.nz = model.nz
discretised_domain_size = uip.discretise_static_point(self.domain_size)
model.set_size(discretised_domain_size)
if model.nx == 0 or model.ny == 0 or model.nz == 0:
raise ValueError(f"{self} requires at least one cell in every dimension")
@@ -327,7 +325,7 @@ class PMLFormulation(ModelUserObject):
def build(self, model: Model):
if self.formulation not in PML.formulations:
logger.exception(f"{self} requires the value to be one of {' '.join(PML.formulations)}")
raise ValueError(f"{self} requires the value to be one of {' '.join(PML.formulations)}")
model.G.pmls["formulation"] = self.formulation
@@ -367,27 +365,23 @@ class PMLThickness(ModelUserObject):
def build(self, model: Model):
grid = model.G
if isinstance(self.thickness, int) or len(self.thickness) == 1:
for key in grid.pmls["thickness"].keys():
grid.pmls["thickness"][key] = int(self.thickness)
elif len(self.thickness) == 6:
grid.pmls["thickness"]["x0"] = int(self.thickness[0])
grid.pmls["thickness"]["y0"] = int(self.thickness[1])
grid.pmls["thickness"]["z0"] = int(self.thickness[2])
grid.pmls["thickness"]["xmax"] = int(self.thickness[3])
grid.pmls["thickness"]["ymax"] = int(self.thickness[4])
grid.pmls["thickness"]["zmax"] = int(self.thickness[5])
else:
if not (
isinstance(self.thickness, int) or len(self.thickness) == 1 or len(self.thickness) == 6
):
raise ValueError(f"{self} requires either one or six parameter(s)")
model.G.set_pml_thickness(self.thickness)
# Check each PML does not take up more than half the grid
# TODO: MPI ranks not containing a PML will not throw an error
# here.
if (
2 * grid.pmls["thickness"]["x0"] >= grid.nx
or 2 * grid.pmls["thickness"]["y0"] >= grid.ny
or 2 * grid.pmls["thickness"]["z0"] >= grid.nz
or 2 * grid.pmls["thickness"]["xmax"] >= grid.nx
or 2 * grid.pmls["thickness"]["ymax"] >= grid.ny
or 2 * grid.pmls["thickness"]["zmax"] >= grid.nz
2 * grid.pmls["thickness"]["x0"] >= model.nx
or 2 * grid.pmls["thickness"]["y0"] >= model.ny
or 2 * grid.pmls["thickness"]["z0"] >= model.nz
or 2 * grid.pmls["thickness"]["xmax"] >= model.nx
or 2 * grid.pmls["thickness"]["ymax"] >= model.ny
or 2 * grid.pmls["thickness"]["zmax"] >= model.nz
):
raise ValueError(f"{self} has too many cells for the domain size")
@@ -525,7 +519,7 @@ class SrcSteps(ModelUserObject):
def build(self, model: Model):
uip = self._create_uip(model.G)
model.srcsteps = np.array(uip.discretise_point(self.step_size), dtype=np.int32)
model.srcsteps = uip.discretise_static_point(self.step_size)
logger.info(
f"Simple sources will step {model.srcsteps[0] * model.dx:g}m, "
@@ -562,7 +556,7 @@ class RxSteps(ModelUserObject):
def build(self, model: Model):
uip = self._create_uip(model.G)
model.rxsteps = np.array(uip.discretise_point(self.step_size), dtype=np.int32)
model.rxsteps = uip.discretise_static_point(self.step_size)
logger.info(
f"All receivers will step {model.rxsteps[0] * model.dx:g}m, "

查看文件

@@ -3,9 +3,10 @@ from typing import List, Union
from gprMax import config
from gprMax.grid.fdtd_grid import FDTDGrid
from gprMax.grid.mpi_grid import MPIGrid
from gprMax.model import Model
from gprMax.subgrids.grid import SubGridBaseGrid
from gprMax.user_inputs import MainGridUserInput, SubgridUserInput
from gprMax.user_inputs import MainGridUserInput, MPIUserInput, SubgridUserInput
class UserObject(ABC):
@@ -58,7 +59,7 @@ class UserObject(ABC):
"""Readable string of parameters given to object."""
return f"{self.hash}: {str(self.kwargs)}"
def _create_uip(self, grid: FDTDGrid) -> Union[SubgridUserInput, MainGridUserInput]:
def _create_uip(self, grid: FDTDGrid) -> MainGridUserInput:
"""Returns a point checker class based on the grid supplied.
Args:
@@ -77,6 +78,8 @@ class UserObject(ABC):
and self.autotranslate
):
return SubgridUserInput(grid)
elif isinstance(grid, MPIGrid):
return MPIUserInput(grid)
else:
return MainGridUserInput(grid)

查看文件

@@ -141,6 +141,33 @@ def logo(version):
return str
def round_int(value: float) -> int:
"""Round number to nearest integer (half values are rounded down).
Args:
value: Number to round.
Returns:
rounded: Rounded value.
"""
return int(d.Decimal(value).quantize(d.Decimal("1"), rounding=d.ROUND_HALF_DOWN))
def round_float(value: float, decimalplaces: int) -> float:
"""Round down to a specific number of decimal places.
Args:
value: Number to round.
decimalplaces: Number of decimal places of float to represent
rounded value.
Returns:
rounded: Rounded value.
"""
precision = f"1.{'0' * decimalplaces}"
return float(d.Decimal(value).quantize(d.Decimal(precision), rounding=d.ROUND_FLOOR))
def round_value(value: float, decimalplaces: int = 0) -> Union[float, int]:
"""Rounding function.
@@ -155,12 +182,11 @@ def round_value(value: float, decimalplaces: int = 0) -> Union[float, int]:
# Rounds to nearest integer (half values are rounded downwards)
if decimalplaces == 0:
rounded = int(d.Decimal(value).quantize(d.Decimal("1"), rounding=d.ROUND_HALF_DOWN))
rounded = round_int(value)
# Rounds down to nearest float represented by number of decimal places
else:
precision = f"1.{'0' * decimalplaces}"
rounded = float(d.Decimal(value).quantize(d.Decimal(precision), rounding=d.ROUND_FLOOR))
rounded = round_float(value, decimalplaces)
return rounded

查看文件

@@ -29,7 +29,7 @@ class VtkImageData(VtkHdfFile):
def __init__(
self,
filename: Union[str, PathLike],
shape: npt.NDArray[np.intc],
shape: npt.NDArray[np.int32],
origin: Optional[npt.NDArray[np.float32]] = None,
spacing: Optional[npt.NDArray[np.float32]] = None,
direction: Optional[npt.NDArray[np.float32]] = None,
@@ -68,11 +68,11 @@ class VtkImageData(VtkHdfFile):
if len(shape) > self.DIMENSIONS:
raise ValueError(f"Shape must not have more than {self.DIMENSIONS} dimensions.")
elif len(shape) < self.DIMENSIONS:
shape = np.concatenate((shape, np.ones(self.DIMENSIONS - len(shape), dtype=np.intc)))
shape = np.concatenate((shape, np.ones(self.DIMENSIONS - len(shape), dtype=np.int32)))
self.shape = shape
whole_extent = np.zeros(2 * self.DIMENSIONS, dtype=np.intc)
whole_extent = np.zeros(2 * self.DIMENSIONS, dtype=np.int32)
whole_extent[1::2] = self.shape
self._set_root_attribute(self.WHOLE_EXTENT_ATTR, whole_extent)
@@ -89,7 +89,7 @@ class VtkImageData(VtkHdfFile):
self.set_direction(direction)
@property
def whole_extent(self) -> npt.NDArray[np.intc]:
def whole_extent(self) -> npt.NDArray[np.int32]:
return self._get_root_attribute(self.WHOLE_EXTENT_ATTR)
@property
@@ -141,7 +141,7 @@ class VtkImageData(VtkHdfFile):
self._set_root_attribute(self.DIRECTION_ATTR, direction)
def add_point_data(
self, name: str, data: npt.NDArray, offset: Optional[npt.NDArray[np.intc]] = None
self, name: str, data: npt.NDArray, offset: Optional[npt.NDArray[np.int32]] = None
):
"""Add point data to the VTKHDF file.
@@ -163,7 +163,7 @@ class VtkImageData(VtkHdfFile):
return super().add_point_data(name, data, points_shape, offset)
def add_cell_data(
self, name: str, data: npt.NDArray, offset: Optional[npt.NDArray[np.intc]] = None
self, name: str, data: npt.NDArray, offset: Optional[npt.NDArray[np.int32]] = None
):
"""Add cell data to the VTKHDF file.

查看文件

@@ -85,7 +85,7 @@ class VtkUnstructuredGrid(VtkHdfFile):
raise ValueError("Connectivity array is shorter than final cell_offsets value")
elif len(connectivity) > cell_offsets[-1]:
raise logger.warning(
logger.warning(
"Connectivity array longer than final cell_offsets value."
" Some connectivity data will be ignored"
)
@@ -101,20 +101,20 @@ class VtkUnstructuredGrid(VtkHdfFile):
@property
def number_of_cells(self) -> int:
number_of_cells = self._get_root_dataset(self.Dataset.NUMBER_OF_CELLS)
return np.sum(number_of_cells, dtype=np.intc)
return np.sum(number_of_cells, dtype=np.int32)
@property
def number_of_connectivity_ids(self) -> int:
number_of_connectivity_ids = self._get_root_dataset(self.Dataset.NUMBER_OF_CONNECTIVITY_IDS)
return np.sum(number_of_connectivity_ids, dtype=np.intc)
return np.sum(number_of_connectivity_ids, dtype=np.int32)
@property
def number_of_points(self) -> int:
number_of_points = self._get_root_dataset(self.Dataset.NUMBER_OF_POINTS)
return np.sum(number_of_points, dtype=np.intc)
return np.sum(number_of_points, dtype=np.int32)
def add_point_data(
self, name: str, data: npt.NDArray, offset: Optional[npt.NDArray[np.intc]] = None
self, name: str, data: npt.NDArray, offset: Optional[npt.NDArray[np.int32]] = None
):
"""Add point data to the VTKHDF file.
@@ -143,7 +143,7 @@ class VtkUnstructuredGrid(VtkHdfFile):
return super().add_point_data(name, data, shape, offset)
def add_cell_data(
self, name: str, data: npt.NDArray, offset: Optional[npt.NDArray[np.intc]] = None
self, name: str, data: npt.NDArray, offset: Optional[npt.NDArray[np.int32]] = None
):
"""Add cell data to the VTKHDF file.

查看文件

@@ -5,7 +5,7 @@ from enum import Enum
from os import PathLike
from pathlib import Path
from types import TracebackType
from typing import Optional, Union
from typing import Optional, Tuple, Union
import h5py
import numpy as np
@@ -185,8 +185,8 @@ class VtkHdfFile(AbstractContextManager):
self,
name: "VtkHdfFile.Dataset",
data: npt.ArrayLike,
shape: Optional[npt.NDArray[np.intc]] = None,
offset: Optional[npt.NDArray[np.intc]] = None,
shape: Optional[npt.NDArray[np.int32]] = None,
offset: Optional[npt.NDArray[np.int32]] = None,
xyz_data_ordering=True,
):
"""Write specified dataset to the root group of the VTKHDF file.
@@ -211,8 +211,8 @@ class VtkHdfFile(AbstractContextManager):
self,
path: str,
data: npt.ArrayLike,
shape: Optional[npt.NDArray[np.intc]] = None,
offset: Optional[npt.NDArray[np.intc]] = None,
shape: Optional[Union[npt.NDArray[np.int32], Tuple[int, ...]]] = None,
offset: Optional[npt.NDArray[np.int32]] = None,
dtype: Optional[npt.DTypeLike] = None,
xyz_data_ordering=True,
):
@@ -228,6 +228,8 @@ class VtkHdfFile(AbstractContextManager):
Can be omitted if data provides the full dataset.
offset (optional): Offset to store the provided data at. Can
be omitted if data provides the full dataset.
dtype (optional): Type of the data. If omitted, the type
will be deduced from the provided data.
xyz_data_ordering (optional): If True, the data will be
transposed as VTKHDF stores datasets using ZYX ordering.
Default True.
@@ -237,17 +239,37 @@ class VtkHdfFile(AbstractContextManager):
and offset are invalid.
"""
# If dtype is a string, ensure it is ascii encoded and use
# variable length strings
if dtype is not None and h5py.check_string_dtype(dtype) is not None:
dtype = h5py.string_dtype(encoding="ascii", length=None)
# If dtype is a string and using parallel I/O, ensure using
# fixed length strings
if isinstance(dtype, np.dtype) and self.comm is not None:
string_info = h5py.check_string_dtype(dtype)
if string_info is not None and string_info.length is None:
logger.warning(
"HDF5 does not support variable length strings with parallel I/O."
" Using fixed length strings instead."
)
dtype = h5py.string_dtype(encoding="ascii", length=0)
if not isinstance(data, np.ndarray):
data = np.array(data, dtype=dtype)
if data.ndim < 1:
data = np.expand_dims(data, axis=-1)
if dtype is None:
if data.dtype.kind == "U":
if dtype is not None: # Only log warning if user specified a data type
logger.warning(
"NumPy UTF-32 ('U' dtype) is not supported by HDF5."
" Converting to bytes array ('S' dtype)."
)
data = data.astype("S")
# Explicitly define string datatype
# VTKHDF only supports ascii strings (not UTF-8)
if data.dtype.kind == "S":
dtype = h5py.string_dtype(encoding="ascii", length=data.dtype.itemsize)
data = data.astype(dtype)
elif dtype is None:
dtype = data.dtype
# VTKHDF stores datasets using ZYX ordering rather than XYZ
@@ -260,8 +282,14 @@ class VtkHdfFile(AbstractContextManager):
if offset is not None:
offset = np.flip(offset)
logger.debug(
f"Writing dataset '{path}', shape: {shape}, data.shape: {data.shape}, dtype: {dtype}"
)
if shape is None or all(shape == data.shape):
self.file_handler.create_dataset(path, data=data, dtype=dtype)
shape = data.shape if shape is None else shape
dataset = self.file_handler.create_dataset(path, shape=shape, dtype=dtype)
dataset[:] = data
elif offset is None:
raise ValueError(
"Offset must not be None as the full dataset has not been provided."
@@ -293,16 +321,57 @@ class VtkHdfFile(AbstractContextManager):
start = offset
stop = offset + data.shape
dataset_slice = (slice(start[i], stop[i]) for i in range(dimensions))
dataset_slice = tuple([slice(start[i], stop[i]) for i in range(dimensions)])
dataset[dataset_slice] = data
def _create_dataset(
self, path: str, shape: Union[npt.NDArray[np.int32], Tuple[int, ...]], dtype: npt.DTypeLike
):
"""Create dataset in the VTKHDF file without writing any data.
Args:
path: Absolute path to the dataset.
shape: Size of the full dataset being created.
dtype: Type of the data.
Raises:
TypeError: Raised if attempt to use variable length strings
with parallel I/O.
"""
dtype = np.dtype(dtype)
# If dtype is a string and using parallel I/O, ensure using
# fixed length strings
if self.comm is not None:
string_info = h5py.check_string_dtype(dtype)
if string_info is not None and string_info.length is None:
raise TypeError(
"HDF5 does not support variable length strings with parallel I/O."
" Use fixed length strings instead."
)
if dtype.kind == "U":
logger.warning(
"NumPy UTF-32 ('U' dtype) is not supported by HDF5."
" Converting to bytes array ('S' dtype)."
)
# Explicitly define string datatype
# VTKHDF only supports ascii strings (not UTF-8)
if dtype.kind == "U" or dtype.kind == "S":
dtype = h5py.string_dtype(encoding="ascii", length=dtype.itemsize)
logger.debug(f"Creating dataset '{path}', shape: {shape}, dtype: {dtype}")
self.file_handler.create_dataset(path, shape=shape, dtype=dtype)
def add_point_data(
self,
name: str,
data: npt.NDArray,
shape: Optional[npt.NDArray[np.intc]] = None,
offset: Optional[npt.NDArray[np.intc]] = None,
shape: Optional[Union[npt.NDArray[np.int32], Tuple[int, ...]]] = None,
offset: Optional[npt.NDArray[np.int32]] = None,
):
"""Add point data to the VTKHDF file.
@@ -321,8 +390,8 @@ class VtkHdfFile(AbstractContextManager):
self,
name: str,
data: npt.NDArray,
shape: Optional[npt.NDArray[np.intc]] = None,
offset: Optional[npt.NDArray[np.intc]] = None,
shape: Optional[Union[npt.NDArray[np.int32], Tuple[int, ...]]] = None,
offset: Optional[npt.NDArray[np.int32]] = None,
):
"""Add cell data to the VTKHDF file.
@@ -340,25 +409,41 @@ class VtkHdfFile(AbstractContextManager):
def add_field_data(
self,
name: str,
data: npt.ArrayLike,
shape: Optional[npt.NDArray[np.intc]] = None,
offset: Optional[npt.NDArray[np.intc]] = None,
data: Optional[npt.ArrayLike],
shape: Optional[Union[npt.NDArray[np.int32], Tuple[int, ...]]] = None,
offset: Optional[npt.NDArray[np.int32]] = None,
dtype: Optional[npt.DTypeLike] = None,
):
"""Add field data to the VTKHDF file.
Args:
name: Name of the dataset.
data: Data to be saved.
data: Data to be saved. Can be None if both shape and dtype
are specified. If None, the dataset will be created but
no data written. This can be useful if, for example,
not all ranks are writing the data. As long as all ranks
know the shape and dtype, ranks not writing data can
perform the collective operation of creating the
dataset, but only the rank(s) with the data need to
write data.
shape (optional): Size of the full dataset being created.
Can be omitted if data provides the full dataset.
offset (optional): Offset to store the provided data at. Can
be omitted if data provides the full dataset.
dtype (optional): Type of the data. If omitted, the type
will be deduced from the provided data.
"""
dataset_path = self._build_dataset_path("FieldData", name)
self._write_dataset(
dataset_path, data, shape=shape, offset=offset, dtype=dtype, xyz_data_ordering=False
)
if data is not None:
self._write_dataset(
dataset_path, data, shape=shape, offset=offset, dtype=dtype, xyz_data_ordering=False
)
elif shape is not None and dtype is not None:
self._create_dataset(dataset_path, shape, dtype)
else:
raise ValueError(
"If data is None, shape and dtype must be provided. I.e. they must not be None"
)
class VtkCellType(np.uint8, Enum):

查看文件

@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:4a8134fca3e6228211e4a4bc3e504e64f888c55f22e025b482ec501e0051d253
size 75740296
oid sha256:c7c7303daffcd4debb8df4c0c5e6cd62a6fd07c6e4b33e8c47c9e190c42d75cf
size 75736481

查看文件

@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:2ccd95ff4ef73c224c99a8c321ad6f8014c673a1b33bbb741bff415dfbffd0aa
size 4931176
oid sha256:a0a195bfeec73f72083e6dbeb5e3713259721d2d4e9118bf019ee9905f02f5a8
size 4927361

查看文件

@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:82b8ac914a9fe20b49f2512215782f01305a2e3211aaac51913f88296a0e3ac5
size 4012444
oid sha256:67c7c15c0025feac5b5899c3a691929ae1d7338c68714708c7a41e51a7f71441
size 4009216

查看文件

@@ -9,6 +9,8 @@ import os
from pathlib import Path
from typing import Literal, Optional, Union
import numpy as np
import numpy.typing as npt
import reframe.utility.sanity as sn
import reframe.utility.typecheck as typ
from reframe import RunOnlyRegressionTest, simple_test
@@ -23,7 +25,7 @@ from reframe.core.builtins import (
variable,
)
from reframe.core.exceptions import DependencyError
from reframe.utility import udeps
from reframe.utility import osext, udeps
from reframe_tests.tests.regression_checks import RegressionCheck
from reframe_tests.utilities.deferrable import path_join
@@ -43,6 +45,7 @@ class CreatePyenvTest(RunOnlyRegressionTest):
valid_systems = ["generic", "archer2:login"]
valid_prog_environs = ["builtin", "PrgEnv-gnu"]
modules = ["cray-python"]
time_limit = "20m"
prerun_cmds = [
"python -m venv --system-site-packages --prompt gprMax .venv",
@@ -122,13 +125,14 @@ class GprMaxBaseTest(RunOnlyRegressionTest):
valid_systems = ["archer2:compute"]
valid_prog_environs = ["PrgEnv-gnu"]
modules = ["cray-python"]
time_limit = "10m"
num_cpus_per_task = 16
exclusive_access = True
model = parameter()
sourcesdir = required
executable = "time -p python -m gprMax"
executable = "python -m gprMax"
regression_checks = variable(typ.List[RegressionCheck], value=[])
@@ -389,49 +393,134 @@ class GprMaxBaseTest(RunOnlyRegressionTest):
@performance_function("s", perf_key="run_time")
def extract_run_time(self):
"""Extract total runtime from the last task to complete."""
return sn.extractsingle(
r"real\s+(?P<run_time>\S+)", self.stderr, "run_time", float, self.num_tasks - 1
"""Extract total runtime from SLURM."""
sactt_command = osext.run_command(
[
"sacct",
"--format=JobID,JobName,State,Elapsed",
"-j",
self.job.jobid,
]
)
hours, minutes, seconds = sn.extractsingle_s(
self.job.jobid
+ r"\.0\s+python\s+COMPLETED\s+(?P<hours>\d+):(?P<minutes>\d+):(?P<seconds>\d+)",
sactt_command.stdout,
["hours", "minutes", "seconds"],
int,
)
return hours * 3600 + minutes * 60 + seconds
@performance_function("s", perf_key="simulation_time")
def extract_simulation_time(self):
"""Extract simulation time reported by gprMax."""
"""Extract average simulation time reported by gprMax."""
return sn.round(self.extract_simulation_time_per_rank().sum() / self.num_tasks, 2)
# sn.extractall throws an error if a group has value None.
# Therefore have to handle the < 1 min, >= 1 min and >= 1 hour cases separately.
timeframe = sn.extractsingle(
r"=== Simulation completed in \S+ (?P<timeframe>hour|minute|second)",
# @performance_function("s", perf_key="max_simulation_time")
# def extract_max_simulation_time(self):
# """Extract maximum simulation time reported by gprMax."""
# return sn.round(self.extract_simulation_time_per_rank().max(), 2)
# @performance_function("s", perf_key="min_simulation_time")
# def extract_min_simulation_time(self):
# """Extract minimum simulation time reported by gprMax."""
# return sn.round(self.extract_simulation_time_per_rank().min(), 2)
# @performance_function("s", perf_key="wall_time")
# def extract_wall_time(self):
# """Extract total simulation time reported by gprMax."""
# return sn.round(self.extract_simulation_time_per_rank().sum(), 2)
def extract_simulation_time_per_rank(self) -> npt.NDArray[np.float64]:
"""Extract simulation time reported by gprMax from each rank.
Raises:
ValueError: Raised if not all ranks report the simulation
time.
Returns:
simulation_times: Simulation time for each rank in seconds.
"""
simulation_time = sn.extractall(
r"=== Simulation completed in "
r"((?<= )(?P<hours>\d+) hours?)?\D*"
r"((?<= )(?P<minutes>\d+) minutes?)?\D*"
r"((?<= )(?P<seconds>[\d\.]+) seconds?)?\D*=+",
self.stdout,
"timeframe",
["hours", "minutes", "seconds"],
lambda x: 0.0 if x is None else float(x),
)
if timeframe == "hour":
simulation_time = sn.extractall(
r"=== Simulation completed in (?P<hours>\S+) hours?, (?P<minutes>\S+) minutes? and (?P<seconds>\S+) seconds? =*",
self.stdout,
["hours", "minutes", "seconds"],
float,
# Check simulation time was reported by all ranks
if sn.len(simulation_time) != self.num_tasks:
raise ValueError(
f"Simulation time not reported for all ranks. Found {sn.len(simulation_time)}, expected {self.num_tasks}"
)
hours = simulation_time[0][0]
minutes = simulation_time[0][1]
seconds = simulation_time[0][2]
elif timeframe == "minute":
hours = 0
simulation_time = sn.extractall(
r"=== Simulation completed in (?P<minutes>\S+) minutes? and (?P<seconds>\S+) seconds? =*",
self.stdout,
["minutes", "seconds"],
float,
# Convert hour and minute values to seconds
simulation_time = np.array(simulation_time.evaluate())
simulation_time[:, 0] *= 3600
simulation_time[:, 1] *= 60
# Return simulation time in seconds for each rank
return simulation_time.sum(axis=1)
@performance_function("GB", perf_key="total_memory_use")
def extract_total_memory_use(self):
"""Extract total memory use across all ranks."""
return sn.round(self.extract_memory_use_per_rank().sum(), 2)
@performance_function("GB", perf_key="average_memory_use")
def extract_average_memory_use(self):
"""Extract average memory use for each rank."""
return sn.round(self.extract_memory_use_per_rank().sum() / self.num_tasks, 2)
# @performance_function("GB", perf_key="min_memory_use")
# def extract_min_memory_use(self):
# """Extract minimum memory use by a single rank."""
# return sn.round(self.extract_memory_use_per_rank().min(), 2)
# @performance_function("GB", perf_key="max_memory_use")
# def extract_max_memory_use(self):
# """Extract maximum memory use by a single rank."""
# return sn.round(self.extract_memory_use_per_rank().max(), 2)
def extract_memory_use_per_rank(self) -> npt.NDArray[np.float64]:
"""Extract gprMax report of the estimated memory use per rank.
Raises:
ValueError: Raised if not all ranks report their estimated
memory usage.
Returns:
usages: Estimated memory usage for each rank in GB.
"""
memory_report = sn.extractall(
r"Memory used \(estimated\): ~(?P<memory_usage>\S+) (?P<units>\S+)",
self.stdout,
["memory_usage", "units"],
[float, str],
)
# Check all ranks reported their estimated memory usage
if sn.len(memory_report) != self.num_tasks:
raise ValueError(
f"Memory usage not reported for all ranks. Found {sn.len(memory_report)}, expected {self.num_tasks}"
)
minutes = simulation_time[0][0]
seconds = simulation_time[0][1]
else:
hours = 0
minutes = 0
seconds = sn.extractsingle(
r"=== Simulation completed in (?P<seconds>\S+) seconds? =*",
self.stdout,
"seconds",
float,
)
return hours * 3600 + minutes * 60 + seconds
usages = np.zeros(self.num_tasks)
# Convert all values into GB
for index, (value, unit) in enumerate(memory_report):
if unit == "MB":
value /= 1024
elif unit == "KB":
value /= 1048576
elif unit != "GB":
raise ValueError(f"Unknown unit of memory '{unit}'")
usages[index] = value
return usages

查看文件

@@ -9,3 +9,5 @@
#material: 8 0 1 0 half_space
#box: 0 0 0 0.100 0.100 0.100 half_space
#geometry_objects_write: 0 0 0 0.1 0.1 0.1 full_volume

查看文件

@@ -9,3 +9,5 @@
#material: 8 0 1 0 half_space
#box: 0 0 0 0.100 0.100 0.050 half_space
#geometry_objects_write: 0 0 0 0.1 0.1 0.1 full_volume

查看文件

@@ -9,3 +9,5 @@
#material: 8 0 1 0 half_space
#box: 0.020 0.020 0.020 0.080 0.080 0.050 half_space
#geometry_objects_write: 0 0 0 0.1 0.1 0.1 full_volume

查看文件

@@ -0,0 +1,13 @@
#title: Hertzian dipole over a half-space
#domain: 0.100 0.100 0.100
#dx_dy_dz: 0.001 0.001 0.001
#time_window: 3e-9
#waveform: gaussiandot 1 1e9 myWave
#hertzian_dipole: z 0.050 0.050 0.050 myWave
#rx: 0.070 0.070 0.070
#material: 8 0 1 0 half_space
#box: 0.05 0.05 0.05 0.100 0.100 0.100 half_space n
#geometry_objects_write: 0 0 0 0.1 0.1 0.1 full_volume

查看文件

@@ -9,3 +9,5 @@
#material: 8 0 1 0 half_space
#box: 0 0 0 0.020 0.020 0.020 half_space
#geometry_objects_write: 0 0 0 0.1 0.1 0.1 full_volume

查看文件

@@ -9,3 +9,5 @@
#material: 8 0 1 0 half_space
#box: 0.030 0.030 0.030 0.045 0.045 0.045 half_space
#geometry_objects_write: 0 0 0 0.1 0.1 0.1 full_volume

查看文件

@@ -0,0 +1,11 @@
#title: Hertzian dipole over a half-space
#domain: 0.100 0.100 0.100
#dx_dy_dz: 0.001 0.001 0.001
#time_window: 3e-9
#waveform: gaussiandot 1 1e9 myWave
#hertzian_dipole: z 0.020 0.020 0.020 myWave
#rx: 0.080 0.080 0.080
#material: 8 0 1 0 half_space
#cone: 0 0.05 0.05 0.100 0.05 0.05 0.05 0 half_space

查看文件

@@ -0,0 +1,11 @@
#title: Hertzian dipole over a half-space
#domain: 0.100 0.100 0.100
#dx_dy_dz: 0.001 0.001 0.001
#time_window: 3e-9
#waveform: gaussiandot 1 1e9 myWave
#hertzian_dipole: z 0.020 0.020 0.020 myWave
#rx: 0.080 0.080 0.080
#material: 8 0 1 0 half_space
#cone: 0.01 0.05 0.04 0.150 0.08 0.05 0.07 0.01 half_space

查看文件

@@ -0,0 +1,11 @@
#title: Hertzian dipole over a half-space
#domain: 0.100 0.100 0.100
#dx_dy_dz: 0.001 0.001 0.001
#time_window: 3e-9
#waveform: gaussiandot 1 1e9 myWave
#hertzian_dipole: z 0.020 0.020 0.020 myWave
#rx: 0.080 0.080 0.080
#material: 8 0 1 0 half_space
#cone: 0 0.05 0.05 0.150 0.05 0.05 0.05 0 half_space

查看文件

@@ -0,0 +1,11 @@
#title: Hertzian dipole over a half-space
#domain: 0.100 0.100 0.100
#dx_dy_dz: 0.001 0.001 0.001
#time_window: 3e-9
#waveform: gaussiandot 1 1e9 myWave
#hertzian_dipole: z 0.020 0.020 0.020 myWave
#rx: 0.080 0.080 0.080
#material: 8 0 1 0 half_space
#cone: 0 0.05 0.05 0.100 0.05 0.05 0.05 0 half_space n

查看文件

@@ -0,0 +1,11 @@
#title: Hertzian dipole over a half-space
#domain: 0.100 0.100 0.100
#dx_dy_dz: 0.001 0.001 0.001
#time_window: 3e-9
#waveform: gaussiandot 1 1e9 myWave
#hertzian_dipole: z 0.020 0.020 0.020 myWave
#rx: 0.080 0.080 0.080
#material: 8 0 1 0 half_space
#cone: 0.07 0.06 0.07 0.07 0.08 0.07 0.015 0.005 half_space

查看文件

@@ -0,0 +1,13 @@
#title: Hertzian dipole over a half-space
#domain: 0.100 0.100 0.100
#dx_dy_dz: 0.001 0.001 0.001
#time_window: 3e-9
#waveform: gaussiandot 1 1e9 myWave
#hertzian_dipole: z 0.020 0.020 0.020 myWave
#rx: 0.080 0.080 0.080
#material: 8 0 1 0 half_space
#cylinder: 0 0.05 0.05 0.100 0.05 0.05 0.05 half_space
#geometry_objects_write: 0 0 0 0.1 0.1 0.1 full_volume

查看文件

@@ -0,0 +1,13 @@
#title: Hertzian dipole over a half-space
#domain: 0.100 0.100 0.100
#dx_dy_dz: 0.001 0.001 0.001
#time_window: 3e-9
#waveform: gaussiandot 1 1e9 myWave
#hertzian_dipole: z 0.020 0.020 0.020 myWave
#rx: 0.080 0.080 0.080
#material: 8 0 1 0 half_space
#cylinder: 0.01 0.05 0.04 0.150 0.08 0.05 0.04 half_space
#geometry_objects_write: 0 0 0 0.1 0.1 0.1 full_volume

查看文件

@@ -0,0 +1,13 @@
#title: Hertzian dipole over a half-space
#domain: 0.100 0.100 0.100
#dx_dy_dz: 0.001 0.001 0.001
#time_window: 3e-9
#waveform: gaussiandot 1 1e9 myWave
#hertzian_dipole: z 0.020 0.020 0.020 myWave
#rx: 0.080 0.080 0.080
#material: 8 0 1 0 half_space
#cylinder: 0 0.05 0.05 0.150 0.05 0.05 0.05 half_space
#geometry_objects_write: 0 0 0 0.1 0.1 0.1 full_volume

查看文件

@@ -0,0 +1,13 @@
#title: Hertzian dipole over a half-space
#domain: 0.100 0.100 0.100
#dx_dy_dz: 0.001 0.001 0.001
#time_window: 3e-9
#waveform: gaussiandot 1 1e9 myWave
#hertzian_dipole: z 0.020 0.020 0.020 myWave
#rx: 0.080 0.080 0.080
#material: 8 0 1 0 half_space
#cylinder: 0 0.05 0.05 0.100 0.05 0.05 0.05 half_space n
#geometry_objects_write: 0 0 0 0.1 0.1 0.1 full_volume

查看文件

@@ -0,0 +1,13 @@
#title: Hertzian dipole over a half-space
#domain: 0.100 0.100 0.100
#dx_dy_dz: 0.001 0.001 0.001
#time_window: 3e-9
#waveform: gaussiandot 1 1e9 myWave
#hertzian_dipole: z 0.020 0.020 0.020 myWave
#rx: 0.080 0.080 0.080
#material: 8 0 1 0 half_space
#cylinder: 0.07 0.06 0.07 0.07 0.08 0.07 0.015 half_space
#geometry_objects_write: 0 0 0 0.1 0.1 0.1 full_volume

查看文件

@@ -0,0 +1,13 @@
#title: Hertzian dipole over a half-space
#domain: 0.100 0.100 0.100
#dx_dy_dz: 0.001 0.001 0.001
#time_window: 3e-9
#waveform: gaussiandot 1 1e9 myWave
#hertzian_dipole: z 0.020 0.020 0.020 myWave
#rx: 0.080 0.080 0.080
#material: 8 0 1 0 half_space
#cylindrical_sector: x 0.05 0.05 0 0.1 0.05 60 330 half_space
#geometry_objects_write: 0 0 0 0.1 0.1 0.1 full_volume

查看文件

@@ -0,0 +1,13 @@
#title: Hertzian dipole over a half-space
#domain: 0.100 0.100 0.100
#dx_dy_dz: 0.001 0.001 0.001
#time_window: 3e-9
#waveform: gaussiandot 1 1e9 myWave
#hertzian_dipole: z 0.020 0.020 0.020 myWave
#rx: 0.080 0.080 0.080
#material: 8 0 1 0 half_space
#cylindrical_sector: x 0.05 0.05 0.05 0.1 0.05 60 330 half_space
#geometry_objects_write: 0 0 0 0.1 0.1 0.1 full_volume

查看文件

@@ -0,0 +1,13 @@
#title: Hertzian dipole over a half-space
#domain: 0.100 0.100 0.100
#dx_dy_dz: 0.001 0.001 0.001
#time_window: 3e-9
#waveform: gaussiandot 1 1e9 myWave
#hertzian_dipole: z 0.020 0.020 0.020 myWave
#rx: 0.080 0.080 0.080
#material: 8 0 1 0 half_space
#cylindrical_sector: y 0.07 0.075 0.01 0.04 0.02 310 120 half_space
#geometry_objects_write: 0 0 0 0.1 0.1 0.1 full_volume

查看文件

@@ -0,0 +1,13 @@
#title: Hertzian dipole over a half-space
#domain: 0.100 0.100 0.100
#dx_dy_dz: 0.001 0.001 0.001
#time_window: 3e-9
#waveform: gaussiandot 1 1e9 myWave
#hertzian_dipole: z 0.020 0.020 0.020 myWave
#rx: 0.080 0.080 0.080
#material: 8 0 1 0 half_space
#cylindrical_sector: z 0.07 0.06 0.04 0.1 0.05 30 240 half_space
#geometry_objects_write: 0 0 0 0.1 0.1 0.1 full_volume

查看文件

@@ -0,0 +1,13 @@
#title: Hertzian dipole over a half-space
#domain: 0.100 0.100 0.100
#dx_dy_dz: 0.001 0.001 0.001
#time_window: 3e-9
#waveform: gaussiandot 1 1e9 myWave
#hertzian_dipole: z 0.020 0.020 0.020 myWave
#rx: 0.080 0.080 0.080
#material: 8 0 1 0 half_space
#cylindrical_sector: z 0.05 0.05 0 0.1 0.05 0 180 half_space n
#geometry_objects_write: 0 0 0 0.1 0.1 0.1 full_volume

查看文件

@@ -12,4 +12,4 @@
## 1mm gap at centre of dipole
#edge: 0.025 0.025 0.100 0.025 0.025 0.101 free_space
#geometry_view: 0.020 0.020 0.020 0.030 0.030 0.180 0.001 0.001 0.001 antenna_wire_dipole_fs f
#geometry_objects_write: 0 0 0 0.05 0.05 0.2 full_volume

某些文件未显示,因为此 diff 中更改的文件太多 显示更多