Merge branch '26-mpi-geometry-objects' into mpi

这个提交包含在:
nmannall
2024-12-06 14:38:21 +00:00
当前提交 35f2b34b3e
共有 41 个文件被更改,包括 1987 次插入429 次删除

5
.gitattributes vendored
查看文件

@@ -1 +1,6 @@
tools/Jupyter_notebooks/* linguist-vendored
reframe_tests/regression_checks/TestGeometryView_5176823e/partial_volume.vtkhdf filter=lfs diff=lfs merge=lfs -text
reframe_tests/regression_checks/TestGeometryView_5176823e/full_volume.vtkhdf filter=lfs diff=lfs merge=lfs -text
reframe_tests/regression_checks/TestGeometryView_77980202/full_volume.vtkhdf filter=lfs diff=lfs merge=lfs -text
reframe_tests/regression_checks/TestGeometryObject_a6a096cb/full_volume.h5 filter=lfs diff=lfs merge=lfs -text
reframe_tests/regression_checks/TestGeometryObject_a6a096cb/partial_volume.h5 filter=lfs diff=lfs merge=lfs -text

查看文件

@@ -117,7 +117,7 @@ class Box(UserObjectGeometry):
numIDx = materials[0].numID
numIDy = materials[1].numID
numIDz = materials[2].numID
requiredID = materials[0].ID + "+" + materials[1].ID + "+" + materials[2].ID
requiredID = Material.create_compound_id(materials[0], materials[1], materials[2])
averagedmaterial = [x for x in grid.materials if x.ID == requiredID]
if averagedmaterial:
numID = averagedmaterial.numID

查看文件

@@ -119,7 +119,7 @@ class Cone(UserObjectGeometry):
numIDx = materials[0].numID
numIDy = materials[1].numID
numIDz = materials[2].numID
requiredID = materials[0].ID + "+" + materials[1].ID + "+" + materials[2].ID
requiredID = Material.create_compound_id(materials[0], materials[1], materials[2])
averagedmaterial = [x for x in grid.materials if x.ID == requiredID]
if averagedmaterial:
numID = averagedmaterial.numID

查看文件

@@ -104,7 +104,7 @@ class Cylinder(UserObjectGeometry):
numIDx = materials[0].numID
numIDy = materials[1].numID
numIDz = materials[2].numID
requiredID = materials[0].ID + "+" + materials[1].ID + "+" + materials[2].ID
requiredID = Material.create_compound_id(materials[0], materials[1], materials[2])
averagedmaterial = [x for x in grid.materials if x.ID == requiredID]
if averagedmaterial:
numID = averagedmaterial.numID

查看文件

@@ -128,7 +128,7 @@ class CylindricalSector(UserObjectGeometry):
numIDx = materials[0].numID
numIDy = materials[1].numID
numIDz = materials[2].numID
requiredID = f"{materials[0].ID}+{materials[1].ID}+{materials[2].ID}"
requiredID = Material.create_compound_id(materials[0], materials[1], materials[2])
averagedmaterial = [x for x in grid.materials if x.ID == requiredID]
if averagedmaterial:
numID = averagedmaterial.numID

查看文件

@@ -99,7 +99,7 @@ class Ellipsoid(UserObjectGeometry):
numIDx = materials[0].numID
numIDy = materials[1].numID
numIDz = materials[2].numID
requiredID = materials[0].ID + "+" + materials[1].ID + "+" + materials[2].ID
requiredID = Material.create_compound_id(materials[0], materials[1], materials[2])
averagedmaterial = [x for x in grid.materials if x.ID == requiredID]
if averagedmaterial:
numID = averagedmaterial.numID

查看文件

@@ -94,7 +94,7 @@ class Sphere(UserObjectGeometry):
numIDx = materials[0].numID
numIDy = materials[1].numID
numIDz = materials[2].numID
requiredID = materials[0].ID + "+" + materials[1].ID + "+" + materials[2].ID
requiredID = Material.create_compound_id(materials[0], materials[1], materials[2])
averagedmaterial = [x for x in grid.materials if x.ID == requiredID]
if averagedmaterial:
numID = averagedmaterial.numID

查看文件

@@ -145,7 +145,7 @@ class Triangle(UserObjectGeometry):
numIDx = materials[0].numID
numIDy = materials[1].numID
numIDz = materials[2].numID
requiredID = materials[0].ID + "+" + materials[1].ID + "+" + materials[2].ID
requiredID = Material.create_compound_id(materials[0], materials[1], materials[2])
averagedmaterial = [x for x in grid.materials if x.ID == requiredID]
if averagedmaterial:
numID = averagedmaterial.numID

查看文件

@@ -37,6 +37,7 @@ from .cmds_geometry.cmds_geometry import (
rotate_polarisation,
)
from .geometry_outputs import GeometryObjects as GeometryObjectsUser
from .geometry_outputs import MPIGeometryObjects as MPIGeometryObjectsUser
from .materials import DispersiveMaterial as DispersiveMaterialUser
from .materials import ListMaterial as ListMaterialUser
from .materials import Material as MaterialUser
@@ -1996,7 +1997,12 @@ class GeometryObjectsWrite(UserObjectMulti):
x0, y0, z0 = p1
x1, y1, z1 = p2
g = GeometryObjectsUser(x0, y0, z0, x1, y1, z1, basefilename)
if isinstance(grid, MPIGrid):
geometry_object_type = MPIGeometryObjectsUser
else:
geometry_object_type = GeometryObjectsUser
g = geometry_object_type(x0, y0, z0, x1, y1, z1, basefilename)
logger.info(
f"Geometry objects in the volume from {p1[0] * grid.dx:g}m, "

查看文件

@@ -92,7 +92,7 @@ class Discretisation(UserObjectSingle):
def build(self, model, uip):
try:
model.dl = np.array(self.kwargs["p1"], dtype=float)
model.dl = np.array(self.kwargs["p1"], dtype=np.float64)
except KeyError:
logger.exception(f"{self.__str__()} discretisation requires a point")
raise

查看文件

@@ -17,9 +17,81 @@
# along with gprMax. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
cimport numpy as np
cpdef get_line_properties(
int number_of_lines,
int nx,
int ny,
int nz,
np.uint32_t[:, :, :, :] ID
):
"""Generate connectivity array and get material ID for each line.
Args:
number_of_lines: Number of lines.
nx: Number of points in the x dimension.
ny: Number of points in the y dimension.
nz: Number of points in the z dimension.
ID: memoryview of sampled ID array according to geometry view
spatial discretisation.
Returns:
connectivity: NDArray of shape (2 * number_of_lines,) listing
the start and end point IDs of each line.
material_data: NDArray of shape (number_of_lines,) listing
material IDs for each line.
"""
cdef np.ndarray material_data = np.zeros(number_of_lines, dtype=np.uint32)
cdef np.ndarray connectivity = np.zeros(2 * number_of_lines, dtype=np.intc)
cdef int line_index = 0
cdef int connectivity_index = 0
cdef int point_id = 0
cdef int z_step = 1
cdef int y_step = nz + 1
cdef int x_step = y_step * (ny + 1)
cdef int i, j, k
for i in range(nx):
for j in range(ny):
for k in range(nz):
# x-direction cell edge
material_data[line_index] = ID[0, i, j, k]
connectivity[connectivity_index] = point_id
connectivity[connectivity_index + 1] = point_id + x_step
line_index += 1
connectivity_index += 2
# y-direction cell edge
material_data[line_index] = ID[1, i, j, k]
connectivity[connectivity_index] = point_id
connectivity[connectivity_index + 1] = point_id + y_step
line_index += 1
connectivity_index += 2
# z-direction cell edge
material_data[line_index] = ID[2, i, j, k]
connectivity[connectivity_index] = point_id
connectivity[connectivity_index + 1] = point_id + z_step
line_index += 1
connectivity_index += 2
# Next point
point_id += 1
# Skip point at (i, j, nz)
point_id += 1
# Skip points in line (i, ny, t) where 0 <= t <= nz
point_id += nz + 1
return connectivity, material_data
cpdef write_lines(
float xs,
float ys,

查看文件

@@ -53,17 +53,13 @@ cpdef void create_electric_average(
G: FDTDGrid class describing a grid in a model.
"""
# Make an ID composed of the names of the four materials that will be averaged
requiredID = (G.materials[numID1].ID + '+' + G.materials[numID2].ID + '+' +
G.materials[numID3].ID + '+' + G.materials[numID4].ID)
# Make an ID composed of the names of the four materials that will
# be averaged. Sort the names to ensure the same four component
# materials always form the same ID.
requiredID = Material.create_compound_id(G.materials[numID1], G.materials[numID2], G.materials[numID3], G.materials[numID4])
# Check if this material already exists
tmp = requiredID.split('+')
material = [x for x in G.materials if
x.ID.count(tmp[0]) == requiredID.count(tmp[0]) and
x.ID.count(tmp[1]) == requiredID.count(tmp[1]) and
x.ID.count(tmp[2]) == requiredID.count(tmp[2]) and
x.ID.count(tmp[3]) == requiredID.count(tmp[3])]
material = [x for x in G.materials if x.ID == requiredID]
if material:
G.ID[componentID, i, j, k] = material[0].numID
@@ -108,14 +104,10 @@ cpdef void create_magnetic_average(
"""
# Make an ID composed of the names of the two materials that will be averaged
requiredID = G.materials[numID1].ID + '+' + G.materials[numID2].ID
requiredID = Material.create_compound_id(G.materials[numID1], G.materials[numID2])
# Check if this material already exists
tmp = requiredID.split('+')
material = [x for x in G.materials if
(x.ID.count(tmp[0]) == requiredID.count(tmp[0]) and
x.ID.count(tmp[1]) == requiredID.count(tmp[1])) or
(x.ID.count(tmp[0]) % 2 == 0 and x.ID.count(tmp[1]) % 2 == 0)]
material = [x for x in G.materials if x.ID == requiredID]
if material:
G.ID[componentID, i, j, k] = material[0].numID

查看文件

@@ -16,29 +16,38 @@
# You should have received a copy of the GNU General Public License
# along with gprMax. If not, see <http://www.gnu.org/licenses/>.
import json
import logging
import sys
from abc import ABC, abstractmethod
from io import TextIOWrapper
from itertools import chain
from pathlib import Path
from typing import Dict, List, Sequence, Tuple, Union
import h5py
import numpy as np
from evtk.hl import imageToVTK, linesToVTK
from evtk.vtk import VtkGroup, VtkImageData, VtkUnstructuredGrid
import numpy.typing as npt
from tqdm import tqdm
import gprMax.config as config
from gprMax.grid.fdtd_grid import FDTDGrid
from gprMax.grid.mpi_grid import MPIGrid
from gprMax.materials import Material
from gprMax.receivers import Rx
from gprMax.sources import Source
from gprMax.vtkhdf_filehandlers.vtk_image_data import VtkImageData
from gprMax.vtkhdf_filehandlers.vtk_unstructured_grid import VtkUnstructuredGrid
from gprMax.vtkhdf_filehandlers.vtkhdf import VtkCellType, VtkHdfFile
from ._version import __version__
from .cython.geometry_outputs import write_lines
from .cython.geometry_outputs import get_line_properties
from .subgrids.grid import SubGridBaseGrid
from .utilities.utilities import get_terminal_width
logger = logging.getLogger(__name__)
def save_geometry_views(gvs):
def save_geometry_views(gvs: "List[GeometryView]"):
"""Creates and saves geometryviews.
Args:
@@ -48,28 +57,42 @@ def save_geometry_views(gvs):
logger.info("")
for i, gv in enumerate(gvs):
gv.set_filename()
vtk_data = gv.prep_vtk()
gv.prep_vtk()
pbar = tqdm(
total=gv.nbytes,
unit="byte",
unit_scale=True,
desc=f"Writing geometry view file {i + 1}/{len(gvs)}, "
f"{gv.filename.name}{gv.vtkfiletype.ext}",
desc=f"Writing geometry view file {i + 1}/{len(gvs)}, {gv.filename.name}",
ncols=get_terminal_width() - 1,
file=sys.stdout,
disable=not config.sim_config.general["progressbars"],
)
gv.write_vtk(vtk_data)
gv.write_vtk()
pbar.update(gv.nbytes)
pbar.close()
logger.info("")
class GeometryView:
class GeometryView(ABC):
"""Base class for Geometry Views."""
def __init__(self, xs, ys, zs, xf, yf, zf, dx, dy, dz, filename, grid):
FILE_EXTENSION = ".vtkhdf"
def __init__(
self,
xs: int,
ys: int,
zs: int,
xf: int,
yf: int,
zf: int,
dx: int,
dy: int,
dz: int,
filename: str,
grid: FDTDGrid,
):
"""
Args:
xs, xf, ys, yf, zs, zf: ints for extent of geometry view in cells.
@@ -78,44 +101,89 @@ class GeometryView:
grid: FDTDGrid class describing a grid in a model.
"""
self.xs = xs
self.ys = ys
self.zs = zs
self.xf = xf
self.yf = yf
self.zf = zf
self.nx = self.xf - self.xs
self.ny = self.yf - self.ys
self.nz = self.zf - self.zs
self.dx = dx
self.dy = dy
self.dz = dz
self.filename = filename
self.start = np.array([xs, ys, zs], dtype=np.intc)
self.stop = np.array([xf, yf, zf], dtype=np.intc)
self.step = np.array([dx, dy, dz], dtype=np.intc)
self.size = (self.stop - self.start) // self.step
self.filename = Path(filename)
self.filenamebase = filename
self.grid = grid
self.nbytes = None
self.material_data = None
self.materials = None
# Properties for backwards compatibility
@property
def xs(self) -> int:
return self.start[0]
@property
def ys(self) -> int:
return self.start[1]
@property
def zs(self) -> int:
return self.start[2]
@property
def xf(self) -> int:
return self.stop[0]
@property
def yf(self) -> int:
return self.stop[1]
@property
def zf(self) -> int:
return self.stop[2]
@property
def dx(self) -> int:
return self.step[0]
@property
def dy(self) -> int:
return self.step[1]
@property
def dz(self) -> int:
return self.step[2]
@property
def nx(self) -> int:
return self.size[0]
@property
def ny(self) -> int:
return self.size[1]
@property
def nz(self) -> int:
return self.size[2]
def set_filename(self):
"""Constructs filename from user-supplied name and model run number."""
"""Construct filename from user-supplied name and model number."""
parts = config.get_model_config().output_file_path.parts
self.filename = Path(
*parts[:-1], self.filenamebase + config.get_model_config().appendmodelnumber
)
).with_suffix(self.FILE_EXTENSION)
@abstractmethod
def prep_vtk(self):
pass
@abstractmethod
def write_vtk(self):
pass
class GeometryViewLines(GeometryView):
"""Unstructured grid (.vtu) for a per-cell-edge geometry view."""
def __init__(self, *args):
super().__init__(*args)
self.vtkfiletype = VtkUnstructuredGrid
"""Unstructured grid for a per-cell-edge geometry view."""
def prep_vtk(self):
"""Prepares data for writing to VTK file.
Returns:
vtk_data: dict of coordinates, data, and comments for VTK file.
"""
"""Prepares data for writing to VTKHDF file."""
# Sample ID array according to geometry view spatial discretisation
# Only create a new array if subsampling is required
@@ -124,7 +192,7 @@ class GeometryViewLines(GeometryView):
or (self.dx, self.dy, self.dz) != (1, 1, 1)
or (self.xs, self.ys, self.zs) != (0, 0, 0)
):
# Require contiguous for evtk library
# Require contiguous array
ID = np.ascontiguousarray(
self.grid.ID[
:,
@@ -137,81 +205,66 @@ class GeometryViewLines(GeometryView):
# This array is contiguous by design
ID = self.grid.ID
x, y, z, lines = write_lines(
(self.xs * self.grid.dx),
(self.ys * self.grid.dy),
(self.zs * self.grid.dz),
self.nx,
self.ny,
self.nz,
(self.dx * self.grid.dx),
(self.dy * self.grid.dy),
(self.dz * self.grid.dz),
ID,
)
x = np.arange(self.nx + 1, dtype=np.float64)
y = np.arange(self.ny + 1, dtype=np.float64)
z = np.arange(self.nz + 1, dtype=np.float64)
coords = np.meshgrid(x, y, z, indexing="ij")
self.points = np.vstack(list(map(np.ravel, coords))).T
self.points += self.start
self.points *= self.step * self.grid.dl
# Add offset to subgrid geometry to correctly locate within main grid
if isinstance(self.grid, SubGridBaseGrid):
x += self.grid.i0 * self.grid.dx * self.grid.ratio
y += self.grid.j0 * self.grid.dy * self.grid.ratio
z += self.grid.k0 * self.grid.dz * self.grid.ratio
offset = [self.grid.i0, self.grid.j0, self.grid.k0]
self.points += offset * self.grid.dl * self.grid.ratio
# Each point is the 'source' for 3 lines.
# NB: Excluding points at the far edge of the geometry as those
# are the 'source' for no lines
n_lines = 3 * self.nx * self.ny * self.nz
self.cell_types = np.full(n_lines, VtkCellType.LINE)
self.cell_offsets = np.arange(0, 2 * n_lines + 2, 2, dtype=np.intc)
self.connectivity, self.material_data = get_line_properties(
n_lines, self.nx, self.ny, self.nz, ID
)
assert isinstance(self.connectivity, np.ndarray)
assert isinstance(self.material_data, np.ndarray)
# Write information about any PMLs, sources, receivers
comments = Comments(self.grid, self)
comments.averaged_materials = True
comments.materials_only = True
info = comments.get_gprmax_info()
comments = json.dumps(info)
self.metadata = Metadata(self.grid, self, averaged_materials=True, materials_only=True)
# Number of bytes of data to be written to file
offsets_size = np.arange(start=2, step=2, stop=len(x) + 1, dtype="int32").nbytes
connect_size = len(x) * np.dtype("int32").itemsize
cell_type_size = len(x) * np.dtype("uint8").itemsize
self.nbytes = (
x.nbytes
+ y.nbytes
+ z.nbytes
+ lines.nbytes
+ offsets_size
+ connect_size
+ cell_type_size
self.points.nbytes
+ self.cell_types.nbytes
+ self.connectivity.nbytes
+ self.cell_offsets.nbytes
+ self.material_data.nbytes
)
vtk_data = {"x": x, "y": y, "z": z, "data": lines, "comments": comments}
def write_vtk(self):
"""Writes geometry information to a VTKHDF file."""
return vtk_data
def write_vtk(self, vtk_data):
"""Writes geometry information to a VTK file.
Args:
vtk_data: dict of coordinates, data, and comments for VTK file.
"""
# Write the VTK file .vtu
linesToVTK(
str(self.filename),
vtk_data["x"],
vtk_data["y"],
vtk_data["z"],
cellData={"Material": vtk_data["data"]},
comments=[vtk_data["comments"]],
)
# Write the VTK file
with VtkUnstructuredGrid(
self.filename,
self.points,
self.cell_types,
self.connectivity,
self.cell_offsets,
) as f:
f.add_cell_data("Material", self.material_data)
self.metadata.write_to_vtkhdf(f)
class GeometryViewVoxels(GeometryView):
"""Imagedata (.vti) for a per-cell geometry view."""
def __init__(self, *args):
super().__init__(*args)
self.vtkfiletype = VtkImageData
"""Image data for a per-cell geometry view."""
def prep_vtk(self):
"""Prepares data for writing to VTK file.
Returns:
vtk_data: dict of data and comments for VTK file.
"""
"""Prepares data for writing to VTKHDF file."""
# Sample solid array according to geometry view spatial discretisation
# Only create a new array if subsampling is required
@@ -220,8 +273,8 @@ class GeometryViewVoxels(GeometryView):
or (self.dx, self.dy, self.dz) != (1, 1, 1)
or (self.xs, self.ys, self.zs) != (0, 0, 0)
):
# Require contiguous for evtk library
solid = np.ascontiguousarray(
# Require contiguous array
self.material_data = np.ascontiguousarray(
self.grid.solid[
self.xs : self.xf : self.dx,
self.ys : self.yf : self.dy,
@@ -230,74 +283,64 @@ class GeometryViewVoxels(GeometryView):
)
else:
# This array is contiguous by design
solid = self.grid.solid
# Write information about any PMLs, sources, receivers
comments = Comments(self.grid, self)
info = comments.get_gprmax_info()
comments = json.dumps(info)
self.nbytes = solid.nbytes
vtk_data = {"data": solid, "comments": comments}
return vtk_data
def write_vtk(self, vtk_data):
"""Writes geometry information to a VTK file.
Args:
vtk_data: dict of data and comments for VTK file.
"""
self.material_data = self.grid.solid
if isinstance(self.grid, SubGridBaseGrid):
origin = (
(self.grid.i0 * self.grid.dx * self.grid.ratio),
(self.grid.j0 * self.grid.dy * self.grid.ratio),
(self.grid.k0 * self.grid.dz * self.grid.ratio),
self.origin = np.array(
[
(self.grid.i0 * self.grid.dx * self.grid.ratio),
(self.grid.j0 * self.grid.dy * self.grid.ratio),
(self.grid.k0 * self.grid.dz * self.grid.ratio),
]
)
else:
origin = ((self.xs * self.grid.dx), (self.ys * self.grid.dy), (self.zs * self.grid.dz))
self.origin = self.start * self.grid.dl
# Write the VTK file .vti
imageToVTK(
str(self.filename),
origin=origin,
spacing=((self.dx * self.grid.dx), (self.dy * self.grid.dy), (self.dz * self.grid.dz)),
cellData={"Material": vtk_data["data"]},
comments=[vtk_data["comments"]],
)
self.spacing = self.step * self.grid.dl
# Write information about any PMLs, sources, receivers
self.metadata = Metadata(self.grid, self)
self.nbytes = self.material_data.nbytes
def write_vtk(self):
"""Writes geometry information to a VTKHDF file."""
with VtkImageData(self.filename, self.size, self.origin, self.spacing) as f:
f.add_cell_data("Material", self.material_data)
self.metadata.write_to_vtkhdf(f)
class Comments:
class Metadata:
"""Comments can be strings included in the header of XML VTK file, and are
used to hold extra (gprMax) information about the VTK data.
"""
def __init__(self, grid, gv):
def __init__(
self,
grid: FDTDGrid,
gv: GeometryView,
averaged_materials: bool = False,
materials_only: bool = False,
):
self.grid = grid
self.gv = gv
self.averaged_materials = False
self.materials_only = False
self.averaged_materials = averaged_materials
self.materials_only = materials_only
def get_gprmax_info(self):
"""Returns gprMax specific information relating material, source,
and receiver names to numeric identifiers.
"""
self.gprmax_version = __version__
self.dx_dy_dz = self.grid.dl
self.nx_ny_nz = np.array([self.grid.nx, self.grid.ny, self.grid.nz], dtype=np.intc)
# Comments for Paraview macro
comments = {
"gprMax_version": __version__,
"dx_dy_dz": self.dx_dy_dz_comment(),
"nx_ny_nz": self.nx_ny_nz_comment(),
"Materials": self.materials_comment(),
} # Write the name and numeric ID for each material
self.materials = self.materials_comment()
# Write information on PMLs, sources, and receivers
if not self.materials_only:
# Information on PML thickness
if self.grid.pmls["slabs"]:
comments["PMLthickness"] = self.pml_gv_comment()
self.pml_thickness = self.pml_gv_comment()
else:
self.pml_thickness = None
srcs = (
self.grid.hertziandipoles
+ self.grid.magneticdipoles
@@ -305,53 +348,89 @@ class Comments:
+ self.grid.transmissionlines
)
if srcs:
comments["Sources"] = self.srcs_rx_gv_comment(srcs)
self.source_ids, self.source_positions = self.srcs_rx_gv_comment(srcs)
else:
self.source_ids = None
self.source_positions = None
if self.grid.rxs:
comments["Receivers"] = self.srcs_rx_gv_comment(self.grid.rxs)
self.receiver_ids, self.receiver_positions = self.srcs_rx_gv_comment(self.grid.rxs)
else:
self.receiver_ids = None
self.receiver_positions = None
return comments
def write_to_vtkhdf(self, file_handler: VtkHdfFile):
file_handler.add_field_data(
"gprMax_version",
self.gprmax_version,
dtype=h5py.string_dtype(),
)
file_handler.add_field_data("dx_dy_dz", self.dx_dy_dz)
file_handler.add_field_data("nx_ny_nz", self.nx_ny_nz)
def pml_gv_comment(self):
file_handler.add_field_data(
"material_ids",
self.materials,
dtype=h5py.string_dtype(),
)
if not self.materials_only:
if self.pml_thickness is not None:
file_handler.add_field_data("pml_thickness", self.pml_thickness)
if self.source_ids is not None and self.source_positions is not None:
file_handler.add_field_data(
"source_ids", self.source_ids, dtype=h5py.string_dtype()
)
file_handler.add_field_data("sources", self.source_positions)
if self.receiver_ids is not None and self.receiver_positions is not None:
file_handler.add_field_data(
"receiver_ids", self.receiver_ids, dtype=h5py.string_dtype()
)
file_handler.add_field_data("receivers", self.receiver_positions)
def pml_gv_comment(self) -> List[int]:
grid = self.grid
# Only render PMLs if they are in the geometry view
pmlstorender = dict.fromkeys(grid.pmls["thickness"], 0)
thickness: Dict[str, int] = grid.pmls["thickness"]
gv_pml_depth = dict.fromkeys(thickness, 0)
# Casting to int required as json does not handle numpy types
if grid.pmls["thickness"]["x0"] - self.gv.xs > 0:
pmlstorender["x0"] = int(grid.pmls["thickness"]["x0"] - self.gv.xs)
if grid.pmls["thickness"]["y0"] - self.gv.ys > 0:
pmlstorender["y0"] = int(grid.pmls["thickness"]["y0"] - self.gv.ys)
if grid.pmls["thickness"]["z0"] - self.gv.zs > 0:
pmlstorender["z0"] = int(grid.pmls["thickness"]["z0"] - self.gv.zs)
if self.gv.xf > grid.nx - grid.pmls["thickness"]["xmax"]:
pmlstorender["xmax"] = int(self.gv.xf - (grid.nx - grid.pmls["thickness"]["xmax"]))
if self.gv.yf > grid.ny - grid.pmls["thickness"]["ymax"]:
pmlstorender["ymax"] = int(self.gv.yf - (grid.ny - grid.pmls["thickness"]["ymax"]))
if self.gv.zf > grid.nz - grid.pmls["thickness"]["zmax"]:
pmlstorender["zmax"] = int(self.gv.zf - (grid.nz - grid.pmls["thickness"]["zmax"]))
if self.gv.xs < thickness["x0"]:
gv_pml_depth["x0"] = thickness["x0"] - self.gv.xs
if self.gv.ys < thickness["y0"]:
gv_pml_depth["y0"] = thickness["y0"] - self.gv.ys
if thickness["z0"] - self.gv.zs > 0:
gv_pml_depth["z0"] = thickness["z0"] - self.gv.zs
if self.gv.xf > grid.nx - thickness["xmax"]:
gv_pml_depth["xmax"] = self.gv.xf - (grid.nx - thickness["xmax"])
if self.gv.yf > grid.ny - thickness["ymax"]:
gv_pml_depth["ymax"] = self.gv.yf - (grid.ny - thickness["ymax"])
if self.gv.zf > grid.nz - thickness["zmax"]:
gv_pml_depth["zmax"] = self.gv.zf - (grid.nz - thickness["zmax"])
return list(pmlstorender.values())
return list(gv_pml_depth.values())
def srcs_rx_gv_comment(self, srcs):
def srcs_rx_gv_comment(
self, srcs: Union[Sequence[Source], List[Rx]]
) -> Tuple[List[str], npt.NDArray[np.float32]]:
"""Used to name sources and/or receivers."""
sc = []
for src in srcs:
p = (src.xcoord * self.grid.dx, src.ycoord * self.grid.dy, src.zcoord * self.grid.dz)
p = list(map(float, p))
names: List[str] = []
positions: npt.NDArray[np.float32] = np.empty((len(srcs), 3))
for index, src in enumerate(srcs):
position = src.coord * self.grid.dl
names.append(src.ID)
positions[index] = position
s = {"name": src.ID, "position": p}
sc.append(s)
return names, positions
return sc
def dx_dy_dz_comment(self) -> npt.NDArray[np.float64]:
return self.grid.dl
def dx_dy_dz_comment(self):
return list(map(float, [self.grid.dx, self.grid.dy, self.grid.dz]))
def nx_ny_nz_comment(self) -> npt.NDArray[np.intc]:
return np.array([self.grid.nx, self.grid.ny, self.grid.nz], dtype=np.intc)
def nx_ny_nz_comment(self):
return list(map(int, [self.grid.nx, self.grid.ny, self.grid.nz]))
def materials_comment(self):
def materials_comment(self) -> List[str]:
if not self.averaged_materials:
return [m.ID for m in self.grid.materials if m.type != "dielectric-smoothed"]
else:
@@ -406,68 +485,173 @@ class GeometryObjects:
pbar: Progress bar class instance.
"""
ID = G.ID[:, self.xs : self.xf + 1, self.ys : self.yf + 1, self.zs : self.zf + 1]
# Get materials present in subset of ID array and sort by material ID
material_ids, inverse_map = np.unique(ID, return_inverse=True)
get_material = np.vectorize(lambda id: G.materials[id])
materials = sorted(get_material(material_ids))
# Create map from material ID to 0 - number of materials
materials_map = {material.numID: index for index, material in enumerate(materials)}
map_materials = np.vectorize(lambda id: materials_map[id])
# Remap ID array to the reduced list of materials
ID = np.array(map_materials(material_ids))[inverse_map].reshape(ID.shape)
data = G.solid[self.xs : self.xf, self.ys : self.yf, self.zs : self.zf].astype("int16")
data = map_materials(data)
rigidE = G.rigidE[:, self.xs : self.xf, self.ys : self.yf, self.zs : self.zf]
rigidH = G.rigidH[:, self.xs : self.xf, self.ys : self.yf, self.zs : self.zf]
with h5py.File(self.filename_hdf5, "w") as fdata:
fdata.attrs["gprMax"] = __version__
fdata.attrs["Title"] = title
fdata.attrs["dx_dy_dz"] = (G.dx, G.dy, G.dz)
# Get minimum and maximum integers of materials in geometry objects volume
minmat = np.amin(
G.ID[:, self.xs : self.xf + 1, self.ys : self.yf + 1, self.zs : self.zf + 1]
)
maxmat = np.amax(
G.ID[:, self.xs : self.xf + 1, self.ys : self.yf + 1, self.zs : self.zf + 1]
)
fdata["/data"] = (
G.solid[self.xs : self.xf + 1, self.ys : self.yf + 1, self.zs : self.zf + 1].astype(
"int16"
)
- minmat
)
fdata["/data"] = data
pbar.update(self.solidsize)
fdata["/rigidE"] = G.rigidE[
:, self.xs : self.xf + 1, self.ys : self.yf + 1, self.zs : self.zf + 1
]
fdata["/rigidH"] = G.rigidH[
:, self.xs : self.xf + 1, self.ys : self.yf + 1, self.zs : self.zf + 1
]
fdata["/rigidE"] = rigidE
fdata["/rigidH"] = rigidH
pbar.update(self.rigidsize)
fdata["/ID"] = (
G.ID[:, self.xs : self.xf + 1, self.ys : self.yf + 1, self.zs : self.zf + 1]
- minmat
)
fdata["/ID"] = ID
pbar.update(self.IDsize)
# Write materials list to a text file
# This includes all materials in range whether used in volume or not
with open(self.filename_materials, "w") as fmaterials:
for numID in range(minmat, maxmat + 1):
for material in G.materials:
if material.numID == numID:
fmaterials.write(
f"#material: {material.er:g} {material.se:g} "
f"{material.mr:g} {material.sm:g} {material.ID}\n"
)
if hasattr(material, "poles"):
if "debye" in material.type:
dispersionstr = "#add_dispersion_debye: " f"{material.poles:g} "
for pole in range(material.poles):
dispersionstr += (
f"{material.deltaer[pole]:g} " f"{material.tau[pole]:g} "
)
elif "lorenz" in material.type:
dispersionstr = f"#add_dispersion_lorenz: " f"{material.poles:g} "
for pole in range(material.poles):
dispersionstr += (
f"{material.deltaer[pole]:g} "
f"{material.tau[pole]:g} "
f"{material.alpha[pole]:g} "
)
elif "drude" in material.type:
dispersionstr = f"#add_dispersion_drude: " f"{material.poles:g} "
for pole in range(material.poles):
dispersionstr += (
f"{material.tau[pole]:g} " f"{material.alpha[pole]:g} "
)
dispersionstr += material.ID
fmaterials.write(dispersionstr + "\n")
for material in materials:
self.output_material(material, fmaterials)
def output_material(self, material: Material, file: TextIOWrapper):
file.write(
f"#material: {material.er:g} {material.se:g} "
f"{material.mr:g} {material.sm:g} {material.ID}\n"
)
if hasattr(material, "poles"):
if "debye" in material.type:
dispersionstr = "#add_dispersion_debye: " f"{material.poles:g} "
for pole in range(material.poles):
dispersionstr += f"{material.deltaer[pole]:g} " f"{material.tau[pole]:g} "
elif "lorenz" in material.type:
dispersionstr = f"#add_dispersion_lorenz: " f"{material.poles:g} "
for pole in range(material.poles):
dispersionstr += (
f"{material.deltaer[pole]:g} "
f"{material.tau[pole]:g} "
f"{material.alpha[pole]:g} "
)
elif "drude" in material.type:
dispersionstr = f"#add_dispersion_drude: " f"{material.poles:g} "
for pole in range(material.poles):
dispersionstr += f"{material.tau[pole]:g} " f"{material.alpha[pole]:g} "
dispersionstr += material.ID
file.write(dispersionstr + "\n")
class MPIGeometryObjects(GeometryObjects):
# def __init__(self, start, stop, global_size, comm):
# super().__init__(...)
def write_hdf5(self, title: str, G: MPIGrid, pbar: tqdm):
"""Writes a geometry objects file in HDF5 format.
Args:
G: FDTDGrid class describing a grid in a model.
pbar: Progress bar class instance.
"""
# Get neighbours
self.neighbours = np.full((3, 2), -1, dtype=int)
self.neighbours[0] = self.comm.Shift(direction=0, disp=1)
self.neighbours[1] = self.comm.Shift(direction=1, disp=1)
self.neighbours[2] = self.comm.Shift(direction=2, disp=1)
# Make subsection of array one larger if no positive neighbour
slice_stop = np.where(
self.neighbours[:, 1] == -1,
self.stop + 1,
self.stop,
)
array_slice = list(map(slice, self.start, slice_stop))
ID = G.ID[:, array_slice[0], array_slice[1], array_slice[2]]
# Get materials present in subset of ID
local_material_num_ids, inverse_map = np.unique(ID, return_inverse=True)
get_material = np.vectorize(lambda id: G.materials[id])
local_materials = get_material(local_material_num_ids)
# Send all materials to the coordinating rank
materials = self.comm.gather(local_materials, root=0)
if self.comm.rank == 0:
# Filter out duplicate materials and sort by material ID
materials = np.fromiter(chain.from_iterable(materials), dtype=Material)
global_materials = np.unique(materials)
global_material_ids = [m.ID for m in global_materials]
else:
global_material_ids = None
global_material_ids = self.comm.bcast(global_material_ids, root=0)
# Create map from material ID (str) to global material numID
materials_map = {
material_id: index for index, material_id in enumerate(global_material_ids)
}
# Remap ID array to the global material numID
ID = np.array([materials_map[m.ID] for m in local_materials])[inverse_map].reshape(ID.shape)
# Other geometry arrays do not have halos, so adjustment to
# 'stop' is not necessary
array_slice = list(map(slice, self.start, self.stop))
data = G.solid[array_slice[0], array_slice[1], array_slice[2]]
map_local_materials = np.vectorize(lambda id: materials_map[G.materials[id].ID])
data = map_local_materials(data)
rigidE = G.rigidE[:, array_slice[0], array_slice[1], array_slice[2]]
rigidH = G.rigidH[:, array_slice[0], array_slice[1], array_slice[2]]
start = self.offset
stop = start + self.size
with h5py.File(self.filename_hdf5, "w", driver="mpio", comm=self.comm) as fdata:
fdata.attrs["gprMax"] = __version__
fdata.attrs["Title"] = title
fdata.attrs["dx_dy_dz"] = (G.dx, G.dy, G.dz)
dset = fdata.create_dataset("/data", self.global_size, dtype=data.dtype)
dset[start[0] : stop[0], start[1] : stop[1], start[2] : stop[2]] = data
pbar.update(self.solidsize)
rigid_E_dataset = fdata.create_dataset(
"/rigidE", (12, *self.global_size), dtype=rigidE.dtype
)
rigid_E_dataset[:, start[0] : stop[0], start[1] : stop[1], start[2] : stop[2]] = rigidE
rigid_H_dataset = fdata.create_dataset(
"/rigidH", (6, *self.global_size), dtype=rigidH.dtype
)
rigid_H_dataset[:, start[0] : stop[0], start[1] : stop[1], start[2] : stop[2]] = rigidH
pbar.update(self.rigidsize)
stop = np.where(
self.neighbours[:, 1] == -1,
stop + 1,
stop,
)
dset = fdata.create_dataset("/ID", (6, *(self.global_size + 1)), dtype=ID.dtype)
dset[:, start[0] : stop[0], start[1] : stop[1], start[2] : stop[2]] = ID
pbar.update(self.IDsize)
# Write materials list to a text file
if self.comm.rank == 0:
with open(self.filename_materials, "w") as materials_file:
for material in global_materials:
self.output_material(material, materials_file)

查看文件

@@ -58,21 +58,21 @@ class FDTDGrid:
self.nx = 0
self.ny = 0
self.nz = 0
self.dl = np.ones(3, dtype=float)
self.dl = np.ones(3, dtype=np.float64)
self.dt = 0.0
# Field Arrays
self.Ex: npt.NDArray[np.single]
self.Ey: npt.NDArray[np.single]
self.Ez: npt.NDArray[np.single]
self.Hx: npt.NDArray[np.single]
self.Hy: npt.NDArray[np.single]
self.Hz: npt.NDArray[np.single]
self.Ex: npt.NDArray[np.float32]
self.Ey: npt.NDArray[np.float32]
self.Ez: npt.NDArray[np.float32]
self.Hx: npt.NDArray[np.float32]
self.Hy: npt.NDArray[np.float32]
self.Hz: npt.NDArray[np.float32]
# Dispersive Arrays
self.Tx: npt.NDArray[np.single]
self.Ty: npt.NDArray[np.single]
self.Tz: npt.NDArray[np.single]
self.Tx: npt.NDArray[np.float32]
self.Ty: npt.NDArray[np.float32]
self.Tz: npt.NDArray[np.float32]
# Geometry Arrays
self.solid: npt.NDArray[np.uint32]
@@ -81,9 +81,9 @@ class FDTDGrid:
self.ID: npt.NDArray[np.uint32]
# Update Coefficient Arrays
self.updatecoeffsE: npt.NDArray
self.updatecoeffsH: npt.NDArray
self.updatecoeffsdispersive: npt.NDArray
self.updatecoeffsE: npt.NDArray[np.float32]
self.updatecoeffsH: npt.NDArray[np.float32]
self.updatecoeffsdispersive: npt.NDArray[np.float32]
# PML parameters - set some defaults to use if not user provided
self.pmls = {}

查看文件

@@ -195,7 +195,7 @@ class MPIGrid(FDTDGrid):
global_coord: Global grid coordinate.
Returns:
local_coord: Local grid coordinate
local_coord: Local grid coordinate.
"""
return global_coord - self.lower_extent
@@ -203,13 +203,80 @@ class MPIGrid(FDTDGrid):
"""Convert a local grid coordinate to a global grid coordinate.
Args:
local_coord: Local grid coordinate
local_coord: Local grid coordinate.
Returns:
global_coord: Global grid coordinate
global_coord: Global grid coordinate.
"""
return local_coord + self.lower_extent
def global_coord_inside_grid(
self, global_coord: npt.NDArray[np.intc], allow_inside_halo: bool = False
) -> bool:
"""Check if a global coordinate falls with in the local grid.
Args:
global_coord: Global grid coordinate.
allow_inside_halo: If True, the function returns True when
the coordinate is inside the grid halo. Otherwise, it
will return False when the coordinate is inside the grid
halo. (defaults to False)
Returns:
is_inside_grid: True if the global coordinate falls inside
the local grid bounds.
"""
if allow_inside_halo:
lower_bound = self.lower_extent
upper_bound = self.upper_extent + 1
else:
lower_bound = self.lower_extent + self.negative_halo_offset
upper_bound = self.upper_extent
return all(global_coord >= lower_bound) and all(global_coord <= upper_bound)
def global_bounds_overlap_local_grid(
self, start: npt.NDArray[np.intc], stop: npt.NDArray[np.intc]
) -> bool:
local_start = self.global_to_local_coordinate(start)
local_stop = self.global_to_local_coordinate(stop)
return all(local_start < self.size) and all(local_stop > self.negative_halo_offset)
def limit_global_bounds_to_within_local_grid(
self,
start: npt.NDArray[np.intc],
stop: npt.NDArray[np.intc],
step: npt.NDArray[np.intc] = np.ones(3, dtype=np.intc),
) -> Tuple[npt.NDArray[np.intc], npt.NDArray[np.intc], npt.NDArray[np.intc]]:
local_start = self.global_to_local_coordinate(start)
# Bring start into the local grid (and not in the negative halo)
# local_start must still be aligned with the provided step.
local_start = np.where(
local_start < self.negative_halo_offset,
self.negative_halo_offset + ((local_start - self.negative_halo_offset) % step),
local_start,
)
local_stop = self.global_to_local_coordinate(stop)
# Limit local_stop such that it is at most one step beyond the
# max index of the grid. As local_stop is the upper bound, it is
# exclusive, meaning when used to slice an array (with the
# provided step), the last element accessed will one step below
# local_stop.
# Note: using self.size as an index in any dimension would fall
# in the positive halo (this counts as outside the local grid).
local_stop = np.where(
local_stop > self.size,
self.size + ((local_stop - self.size) % step),
local_stop,
)
offset = self.local_to_global_coordinate(local_start) - start
return local_start, local_stop, offset
def scatter_coord_objects(self, objects: List[CoordType]) -> List[CoordType]:
"""Scatter coord objects to the correct MPI rank.

查看文件

@@ -48,7 +48,9 @@ def process_python_include_code(inputfile, usernamespace):
"""
# Strip out any newline characters and comments that must begin with double hashes
inputlines = [line.rstrip() for line in inputfile if (not line.startswith("##") and line.rstrip("\n"))]
inputlines = [
line.rstrip() for line in inputfile if (not line.startswith("##") and line.rstrip("\n"))
]
# Rewind input file in preparation for any subsequent reading function
inputfile.seek(0)
@@ -74,7 +76,8 @@ def process_python_include_code(inputfile, usernamespace):
x += 1
if x == len(inputlines):
logger.exception(
"Cannot find the end of the Python code " + "block, i.e. missing #end_python: command."
"Cannot find the end of the Python code "
+ "block, i.e. missing #end_python: command."
)
raise SyntaxError
# Compile code for faster execution
@@ -281,6 +284,15 @@ def check_cmd_names(processedlines, checkessential=True):
lindex = 0
while lindex < len(processedlines):
cmd = processedlines[lindex].split(":")
# Check the command name and parameters were both found
if len(cmd) < 2:
logger.error(
f"Unable to identify command and parameters in '{processedlines[lindex].strip()}'."
" There must be a colon ':' between the command name and parameters."
)
exit(1)
cmdname = cmd[0]
cmdparams = cmd[1]
@@ -289,7 +301,9 @@ def check_cmd_names(processedlines, checkessential=True):
# are no parameters for a command, e.g. for #taguchi:
if " " not in cmdparams[0] and len(cmdparams.strip("\n")) != 0:
logger.exception(
"There must be a space between the command name " + "and parameters in " + processedlines[lindex]
"There must be a space between the command name "
+ "and parameters in "
+ processedlines[lindex]
)
raise SyntaxError
@@ -312,7 +326,9 @@ def check_cmd_names(processedlines, checkessential=True):
if singlecmds[cmdname] is None:
singlecmds[cmdname] = cmd[1].strip(" \t\n")
else:
logger.exception("You can only have a single instance of " + cmdname + " in your model")
logger.exception(
"You can only have a single instance of " + cmdname + " in your model"
)
raise SyntaxError
elif cmdname in multiplecmds:
@@ -384,7 +400,9 @@ def parse_hash_commands(scene):
for key, value in sorted(usernamespace.items()):
if key != "__builtins__":
uservars += f"{key}: {value}, "
logger.info(f"Constants/variables used/available for Python scripting: " + f"{{{uservars[:-2]}}}\n")
logger.info(
f"Constants/variables used/available for Python scripting: " + f"{{{uservars[:-2]}}}\n"
)
# Write a file containing the input commands after Python or include
# file commands have been processed

查看文件

@@ -30,7 +30,7 @@ class Material:
their properties and update coefficients.
"""
def __init__(self, numID, ID):
def __init__(self, numID: int, ID: str):
"""
Args:
numID: int for numeric I of the material.
@@ -49,6 +49,82 @@ class Material:
self.mr = 1.0
self.sm = 0.0
def __eq__(self, value: object) -> bool:
if isinstance(value, Material):
return self.ID == value.ID
else:
raise TypeError(
f"'==' not supported between instances of 'Material' and '{type(value)}'"
)
def __lt__(self, value: "Material") -> bool:
"""Less than comparator for two Materials.
Only non-compound materials (i.e. default or user added
materials) are guaranteed to have the same numID for the same
material across MPI ranks. Therefore compound materials are
sorted by ID and non-compound materials are always less than
compound materials.
"""
if not isinstance(value, Material):
raise TypeError(
f"'<' not supported between instances of 'Material' and '{type(value)}'"
)
elif self.is_compound_material() and value.is_compound_material():
return self.ID < value.ID
else:
return value.is_compound_material() or self.numID < value.numID
def __gt__(self, value: "Material") -> bool:
"""Greater than comparator for two Materials.
Only non-compound materials (i.e. default or user added
materials) are guaranteed to have the same numID for the same
material across MPI ranks. Therefore compound materials are
sorted by ID and are always greater than non-compound materials.
"""
if not isinstance(value, Material):
raise TypeError(
f"'>' not supported between instances of 'Material' and '{type(value)}'"
)
elif self.is_compound_material() and value.is_compound_material():
return self.ID > value.ID
else:
return self.is_compound_material() or self.numID > value.numID
def is_compound_material(self) -> bool:
"""Check if a material is a compound material.
The ID of a compound material comprises of the component
material IDs joined by a '+' symbol. Therefore we check for a
compound material by looking for a '+' symbol in the material
ID.
Returns:
is_compound_material: True if material is a compound
material. False otherwise.
"""
return self.ID.count("+") > 0
@staticmethod
def create_compound_id(*materials: "Material") -> str:
"""Create a compound ID from existing materials.
The new ID will be the IDs of the existing materials joined by a
'+' symbol. The component IDs will be sorted alphabetically and
if two materials are provided, the compound ID will contain each
material twice.
Args:
*materials: Materials to use to create the compound ID.
Returns:
compound_id: New compound id.
"""
if len(materials) == 2:
materials += materials
return "+".join(sorted([material.ID for material in materials]))
def calculate_update_coeffsH(self, G):
"""Calculates the magnetic update coefficients of the material.
@@ -239,7 +315,9 @@ class PeplinskiSoil:
by Peplinski (http://dx.doi.org/10.1109/36.387598).
"""
def __init__(self, ID, sandfraction, clayfraction, bulkdensity, sandpartdensity, watervolfraction):
def __init__(
self, ID, sandfraction, clayfraction, bulkdensity, sandpartdensity, watervolfraction
):
"""
Args:
ID: string for name of the soil.
@@ -304,7 +382,12 @@ class PeplinskiSoil:
muiter = np.nditer(mumaterials, flags=["c_index"])
while not muiter.finished:
# Real part for frequencies in the range 1.4GHz to 18GHz
er = (1 + (self.rb / self.rs) * ((es**a) - 1) + (muiter[0] ** b1 * erealw**a) - muiter[0]) ** (1 / a)
er = (
1
+ (self.rb / self.rs) * ((es**a) - 1)
+ (muiter[0] ** b1 * erealw**a)
- muiter[0]
) ** (1 / a)
# Real part for frequencies in the range 0.3GHz to 1.3GHz (linear
# correction to 1.4-18GHz value)
er = 1.15 * er - 0.68
@@ -499,7 +582,10 @@ def calculate_water_properties(T=25, S=0):
delta = 25 - T
beta = (
2.033e-2 + 1.266e-4 * delta + 2.464e-6 * delta**2 - S * (1.849e-5 - 2.551e-7 * delta + 2.551e-8 * delta**2)
2.033e-2
+ 1.266e-4 * delta
+ 2.464e-6 * delta**2
- S * (1.849e-5 - 2.551e-7 * delta + 2.551e-8 * delta**2)
)
sig_25s = S * (0.182521 - 1.46192e-3 * S + 2.09324e-5 * S**2 - 1.28205e-7 * S**3)
sig = sig_25s * np.exp(-delta * beta)
@@ -608,8 +694,20 @@ def process_materials(G):
material.calculate_update_coeffsH(G)
# Add update coefficients to overall storage for all materials
G.updatecoeffsE[material.numID, :] = material.CA, material.CBx, material.CBy, material.CBz, material.srce
G.updatecoeffsH[material.numID, :] = material.DA, material.DBx, material.DBy, material.DBz, material.srcm
G.updatecoeffsE[material.numID, :] = (
material.CA,
material.CBx,
material.CBy,
material.CBz,
material.srce,
)
G.updatecoeffsH[material.numID, :] = (
material.DA,
material.DBx,
material.DBy,
material.DBz,
material.srcm,
)
# Add update coefficients to overall storage for dispersive materials
if hasattr(material, "poles"):

查看文件

@@ -23,6 +23,7 @@ from typing import List, Sequence
import humanize
import numpy as np
import numpy.typing as npt
import psutil
from colorama import Fore, Style, init
@@ -125,11 +126,11 @@ class Model:
self.G.dl[2] = value
@property
def dl(self) -> np.ndarray:
def dl(self) -> npt.NDArray[np.float64]:
return self.G.dl
@dl.setter
def dl(self, value: np.ndarray):
def dl(self, value: npt.NDArray[np.float64]):
self.G.dl = value
@property
@@ -164,7 +165,10 @@ class Model:
self.p = psutil.Process()
# Normal model reading/building process; bypassed if geometry information to be reused
self.reuse_geometry() if config.get_model_config().reuse_geometry() else self.build_geometry()
if config.get_model_config().reuse_geometry():
self.reuse_geometry()
else:
self.build_geometry()
logger.info(
f"Output directory: {config.get_model_config().output_file_path.parent.resolve()}\n"

查看文件

@@ -1,6 +1,7 @@
import logging
from typing import Optional
import numpy as np
from mpi4py import MPI
from gprMax import config
@@ -55,7 +56,9 @@ class MPIModel(Model):
def build_geometry(self):
self._broadcast_model()
return super().build_geometry()
super().build_geometry()
self._filter_geometry_objects()
def _broadcast_model(self):
self.title = self.comm.bcast(self.title)
@@ -76,6 +79,30 @@ class MPIModel(Model):
model_config.materials["maxpoles"] = self.comm.bcast(model_config.materials["maxpoles"])
model_config.ompthreads = self.comm.bcast(model_config.ompthreads)
def _filter_geometry_objects(self):
objects = self.comm.bcast(self.geometryobjects)
self.geometryobjects = []
for go in objects:
start = np.array([go.xs, go.ys, go.zs], dtype=np.intc)
stop = np.array([go.xf, go.yf, go.zf], dtype=np.intc)
if self.G.global_bounds_overlap_local_grid(start, stop):
comm = self.comm.Split()
assert isinstance(comm, MPI.Intracomm)
start_grid_coord = self.G.get_grid_coord_from_coordinate(start)
stop_grid_coord = self.G.get_grid_coord_from_coordinate(stop) + 1
go.comm = comm.Create_cart((stop_grid_coord - start_grid_coord).tolist())
go.global_size = np.array([go.nx, go.ny, go.nz], dtype=np.intc)
start, stop, offset = self.G.limit_global_bounds_to_within_local_grid(start, stop)
go.size = stop - start
go.start = start
go.stop = stop
go.offset = offset
self.geometryobjects.append(go)
else:
self.comm.Split(MPI.UNDEFINED)
def write_output_data(self):
"""Writes output data, i.e. field data for receivers and snapshots to
file(s).
@@ -91,9 +118,6 @@ class MPIModel(Model):
self.G.size = self.G.global_size
write_hdf5_outputfile(config.get_model_config().output_file_path_ext, self.title, self)
def _output_geometry(self):
logger.warn("Geometry views and geometry objects are not currently supported with MPI\n")
def _create_grid(self) -> MPIGrid:
cart_comm = MPI.COMM_WORLD.Create_cart(config.sim_config.mpi)
return MPIGrid(cart_comm)

查看文件

@@ -110,7 +110,7 @@ class Snapshot:
filename: str,
fileext: str,
outputs: Dict[str, bool],
grid_dl: npt.NDArray[np.single],
grid_dl: npt.NDArray[np.float32],
grid_dt: float,
):
"""
@@ -358,7 +358,7 @@ class MPISnapshot(Snapshot):
filename: str,
fileext: str,
outputs: Dict[str, bool],
grid_dl: npt.NDArray[np.single],
grid_dl: npt.NDArray[np.float32],
grid_dt: float,
):
super().__init__(
@@ -375,7 +375,7 @@ class MPISnapshot(Snapshot):
self.size = np.ceil((self.stop - self.start) / self.step).astype(np.intc)
return super().initialise_snapfields()
def has_neighbour(self, dimension: Dim, direction: Dir):
def has_neighbour(self, dimension: Dim, direction: Dir) -> bool:
return self.neighbours[dimension][direction] != -1
def store(self, G):
@@ -393,13 +393,13 @@ class MPISnapshot(Snapshot):
self.neighbours[Dim.Y] = self.comm.Shift(direction=Dim.Y, disp=1)
self.neighbours[Dim.Z] = self.comm.Shift(direction=Dim.Z, disp=1)
# If we have a positive neighbour, an extra step may land in our
# local halo. The numbers here will be correct (except the top
# corner), but using it would require extra checks below.
# If we do not have a positive neighbour, add an extra step to
# make the upper bound inclusive. Otherwise the additional step
# will be provided by the received halo.
slice_stop = np.where(
self.neighbours[:, Dir.POS] != -1,
self.stop,
self.neighbours[:, Dir.POS] == -1,
self.stop + self.step,
self.stop,
)
self.slice = list(map(slice, self.start, slice_stop, self.step))

查看文件

@@ -0,0 +1,184 @@
from os import PathLike
from typing import Literal, Optional, Union
import numpy as np
import numpy.typing as npt
from mpi4py import MPI
from gprMax.vtkhdf_filehandlers.vtkhdf import VtkHdfFile
class VtkImageData(VtkHdfFile):
"""File handler for creating a VTKHDF Image Data file.
File format information is available here:
https://docs.vtk.org/en/latest/design_documents/VTKFileFormats.html#image-data
"""
DIRECTION_ATTR = "Direction"
ORIGIN_ATTR = "Origin"
SPACING_ATTR = "Spacing"
WHOLE_EXTENT_ATTR = "WholeExtent"
DIMENSIONS = 3
@property
def TYPE(self) -> Literal["ImageData"]:
return "ImageData"
def __init__(
self,
filename: Union[str, PathLike],
shape: npt.NDArray[np.intc],
origin: Optional[npt.NDArray[np.float32]] = None,
spacing: Optional[npt.NDArray[np.float32]] = None,
direction: Optional[npt.NDArray[np.float32]] = None,
comm: Optional[MPI.Comm] = None,
):
"""Create a new VtkImageData file.
If the file already exists, it will be overriden. Required
attributes (Type and Version) will be written to the file.
The file will be opened using the 'mpio' h5py driver if an MPI
communicator is provided.
Args:
filename: Name of the file (can be a file path). The file
extension will be set to '.vtkhdf'.
shape: Shape of the image data to be stored in the file.
This specifies the number of cells. Image data can be
1D, 2D, or 3D.
origin (optional): Origin of the image data. Default
[0, 0, 0].
spacing (optional): Discritisation of the image data.
Default [1, 1, 1].
direction (optional): Array of direction vectors for each
dimension of the image data. Can be a flattened array.
I.e. [[1, 0, 0], [0, 1, 0], [0, 0, 1]] and
[1, 0, 0, 0, 1, 0, 0, 0, 1] are equivalent. Default
[[1, 0, 0], [0, 1, 0], [0, 0, 1]].
comm (optional): MPI communicator containing all ranks that
want to write to the file.
"""
super().__init__(filename, comm)
if len(shape) == 0:
raise ValueError(f"Shape must not be empty.")
if len(shape) > self.DIMENSIONS:
raise ValueError(f"Shape must not have more than {self.DIMENSIONS} dimensions.")
elif len(shape) < self.DIMENSIONS:
shape = np.concatenate((shape, np.ones(self.DIMENSIONS - len(shape), dtype=np.intc)))
self.shape = shape
whole_extent = np.zeros(2 * self.DIMENSIONS, dtype=np.intc)
whole_extent[1::2] = self.shape
self._set_root_attribute(self.WHOLE_EXTENT_ATTR, whole_extent)
if origin is None:
origin = np.zeros(self.DIMENSIONS, dtype=np.float32)
self.set_origin(origin)
if spacing is None:
spacing = np.ones(self.DIMENSIONS, dtype=np.float32)
self.set_spacing(spacing)
if direction is None:
direction = np.diag(np.ones(self.DIMENSIONS, dtype=np.float32))
self.set_direction(direction)
@property
def whole_extent(self) -> npt.NDArray[np.intc]:
return self._get_root_attribute(self.WHOLE_EXTENT_ATTR)
@property
def origin(self) -> npt.NDArray[np.float32]:
return self._get_root_attribute(self.ORIGIN_ATTR)
@property
def spacing(self) -> npt.NDArray[np.float32]:
return self._get_root_attribute(self.SPACING_ATTR)
@property
def direction(self) -> npt.NDArray[np.float32]:
return self._get_root_attribute(self.DIRECTION_ATTR)
def set_origin(self, origin: npt.NDArray[np.float32]):
"""Set the origin coordinate of the image data.
Args:
origin: x, y, z coordinates to set as the origin.
"""
if len(origin) != self.DIMENSIONS:
raise ValueError(f"Origin attribute must have {self.DIMENSIONS} dimensions.")
self._set_root_attribute(self.ORIGIN_ATTR, origin)
def set_spacing(self, spacing: npt.NDArray[np.float32]):
"""Set the discritisation of the image data.
Args:
spacing: Discritisation of the x, y, and z dimensions.
"""
if len(spacing) != self.DIMENSIONS:
raise ValueError(f"Spacing attribute must have {self.DIMENSIONS} dimensions.")
self._set_root_attribute(self.SPACING_ATTR, spacing)
def set_direction(self, direction: npt.NDArray[np.float32]):
"""Set the coordinate system of the image data.
Args:
direction: Array of direction vectors for each dimension of
the image data. Can be a flattened array. I.e.
[[1, 0, 0], [0, 1, 0], [0, 0, 1]] and
[1, 0, 0, 0, 1, 0, 0, 0, 1] are equivalent.
"""
direction = direction.flatten()
if len(direction) != self.DIMENSIONS * self.DIMENSIONS:
raise ValueError(
f"Direction array must contain {self.DIMENSIONS * self.DIMENSIONS} elements."
)
self._set_root_attribute(self.DIRECTION_ATTR, direction)
def add_point_data(
self, name: str, data: npt.NDArray, offset: Optional[npt.NDArray[np.intc]] = None
):
"""Add point data to the VTKHDF file.
Args:
name: Name of the dataset.
data: Data to be saved.
offset (optional): Offset to store the provided data at. Can
be omitted if data provides the full dataset.
Raises:
ValueError: Raised if data has invalid dimensions.
"""
points_shape = self.shape + 1
if offset is None and any(data.shape != points_shape): # type: ignore
raise ValueError(
"If no offset is specified, data.shape must be one larger in each dimension than"
f" this vtkImageData object. {data.shape} != {points_shape}"
)
return super().add_point_data(name, data, points_shape, offset)
def add_cell_data(
self, name: str, data: npt.NDArray, offset: Optional[npt.NDArray[np.intc]] = None
):
"""Add cell data to the VTKHDF file.
Args:
name: Name of the dataset.
data: Data to be saved.
offset (optional): Offset to store the provided data at. Can
be omitted if data provides the full dataset.
Raises:
ValueError: Raised if data has invalid dimensions.
"""
if offset is None and any(data.shape != self.shape): # type: ignore
raise ValueError(
"If no offset is specified, data.shape must match the dimensions of this"
f" VtkImageData object. {data.shape} != {self.shape}"
)
return super().add_cell_data(name, data, self.shape, offset)

查看文件

@@ -0,0 +1,172 @@
import logging
from os import PathLike
from typing import Literal, Optional, Union
import numpy as np
import numpy.typing as npt
from mpi4py import MPI
from gprMax.vtkhdf_filehandlers.vtkhdf import VtkCellType, VtkHdfFile
logger = logging.getLogger(__name__)
class VtkUnstructuredGrid(VtkHdfFile):
"""File handler for creating a VTKHDF Unstructured Grid file.
File format information is available here:
https://docs.vtk.org/en/latest/design_documents/VTKFileFormats.html#unstructured-grid
"""
class Dataset(VtkHdfFile.Dataset):
CONNECTIVITY = "Connectivity"
NUMBER_OF_CELLS = "NumberOfCells"
NUMBER_OF_CONNECTIVITY_IDS = "NumberOfConnectivityIds"
NUMBER_OF_POINTS = "NumberOfPoints"
OFFSETS = "Offsets"
POINTS = "Points"
TYPES = "Types"
@property
def TYPE(self) -> Literal["UnstructuredGrid"]:
return "UnstructuredGrid"
def __init__(
self,
filename: Union[str, PathLike],
points: npt.NDArray,
cell_types: npt.NDArray[VtkCellType],
connectivity: npt.NDArray,
cell_offsets: npt.NDArray,
comm: Optional[MPI.Comm] = None,
) -> None:
"""Create a new VtkUnstructuredGrid file.
An unstructured grid has N points and C cells. A cell is defined
as a collection of points which is specified by the connectivity
and cell_offsets arguments along with the list of cell_types.
If the file already exists, it will be overriden. Required
attributes (Type and Version) will be written to the file.
The file will be opened using the 'mpio' h5py driver if an MPI
communicator is provided.
Args:
filename: Name of the file (can be a file path). The file
extension will be set to '.vtkhdf'.
points: Array of point coordinates of shape (N, 3).
cell_types: Array of VTK cell types of shape (C,).
connectivity: Array of point IDs that together with
cell_offsets, defines the points that make up each cell.
Each point ID has a value between 0 and (N-1) inclusive
and corresponds to a point in the points array.
cell_offsets: Array listing where each cell starts and ends
in the connectivity array. It has shape (C + 1,).
comm (optional): MPI communicator containing all ranks that
want to write to the file.
Raises:
Value Error: Raised if argument dimensions are invalid.
"""
super().__init__(filename, comm)
if len(cell_offsets) != len(cell_types) + 1:
raise ValueError(
"cell_offsets should be one longer than cell_types."
" I.e. one longer than the number of cells"
)
is_sorted = lambda a: np.all(a[:-1] <= a[1:])
if not is_sorted(cell_offsets):
raise ValueError("cell_offsets should be sorted in ascending order")
if len(connectivity) < cell_offsets[-1]:
raise ValueError("Connectivity array is shorter than final cell_offsets value")
elif len(connectivity) > cell_offsets[-1]:
raise logger.warning(
"Connectivity array longer than final cell_offsets value."
" Some connectivity data will be ignored"
)
self._write_root_dataset(self.Dataset.CONNECTIVITY, connectivity)
self._write_root_dataset(self.Dataset.NUMBER_OF_CELLS, len(cell_types))
self._write_root_dataset(self.Dataset.NUMBER_OF_CONNECTIVITY_IDS, len(connectivity))
self._write_root_dataset(self.Dataset.NUMBER_OF_POINTS, len(points))
self._write_root_dataset(self.Dataset.OFFSETS, cell_offsets)
self._write_root_dataset(self.Dataset.POINTS, points, xyz_data_ordering=False)
self._write_root_dataset(self.Dataset.TYPES, cell_types)
@property
def number_of_cells(self) -> int:
number_of_cells = self._get_root_dataset(self.Dataset.NUMBER_OF_CELLS)
return np.sum(number_of_cells, dtype=np.intc)
@property
def number_of_connectivity_ids(self) -> int:
number_of_connectivity_ids = self._get_root_dataset(self.Dataset.NUMBER_OF_CONNECTIVITY_IDS)
return np.sum(number_of_connectivity_ids, dtype=np.intc)
@property
def number_of_points(self) -> int:
number_of_points = self._get_root_dataset(self.Dataset.NUMBER_OF_POINTS)
return np.sum(number_of_points, dtype=np.intc)
def add_point_data(
self, name: str, data: npt.NDArray, offset: Optional[npt.NDArray[np.intc]] = None
):
"""Add point data to the VTKHDF file.
Args:
name: Name of the dataset.
data: Data to be saved.
offset (optional): Offset to store the provided data at. Can
be omitted if data provides the full dataset.
Raises:
ValueError: Raised if data has invalid dimensions.
"""
shape = np.array(data.shape)
number_of_dimensions = len(shape)
if number_of_dimensions < 1 or number_of_dimensions > 2:
raise ValueError(f"Data must have 1 or 2 dimensions, not {number_of_dimensions}")
elif len(data) != self.number_of_points:
raise ValueError(
"Length of data must match the number of points in the vtkUnstructuredGrid."
f" {len(data)} != {self.number_of_points}"
)
elif number_of_dimensions == 2 and shape[1] != 1 and shape[1] != 3:
raise ValueError(f"The second dimension should have shape 1 or 3, not {shape[1]}")
return super().add_point_data(name, data, shape, offset)
def add_cell_data(
self, name: str, data: npt.NDArray, offset: Optional[npt.NDArray[np.intc]] = None
):
"""Add cell data to the VTKHDF file.
Args:
name: Name of the dataset.
data: Data to be saved.
offset (optional): Offset to store the provided data at. Can
be omitted if data provides the full dataset.
Raises:
ValueError: Raised if data has invalid dimensions.
"""
shape = np.array(data.shape)
number_of_dimensions = len(shape)
if number_of_dimensions < 1 or number_of_dimensions > 2:
raise ValueError(f"Data must have 1 or 2 dimensions, not {number_of_dimensions}.")
elif len(data) != self.number_of_cells:
raise ValueError(
"Length of data must match the number of cells in the vtkUnstructuredGrid."
f" {len(data)} != {self.number_of_cells}"
)
elif number_of_dimensions == 2 and shape[1] != 1 and shape[1] != 3:
raise ValueError(f"The second dimension should have shape 1 or 3, not {shape[1]}")
return super().add_cell_data(name, data, shape, offset)

查看文件

@@ -0,0 +1,386 @@
import logging
from abc import abstractmethod
from contextlib import AbstractContextManager
from enum import Enum
from os import PathLike
from pathlib import Path
from types import TracebackType
from typing import Optional, Union
import h5py
import numpy as np
import numpy.typing as npt
from mpi4py import MPI
logger = logging.getLogger(__name__)
class VtkHdfFile(AbstractContextManager):
VERSION = [2, 2]
FILE_EXTENSION = ".vtkhdf"
ROOT_GROUP = "VTKHDF"
# TODO: Can this be moved to using an Enum like root datasets?
# Main barrier: Can't subclass an enum with members and any base
# Enum class would need VERSION and TYPE as members.
VERSION_ATTR = "Version"
TYPE_ATTR = "Type"
class Dataset(str, Enum):
pass
@property
@abstractmethod
def TYPE(self) -> str:
pass
def __enter__(self):
return self
def __init__(self, filename: Union[str, PathLike], comm: Optional[MPI.Comm] = None) -> None:
"""Create a new VtkHdfFile.
If the file already exists, it will be overriden. Required
attributes (Type and Version) will be written to the file.
The file will be opened using the 'mpio' h5py driver if an MPI
communicator is provided.
Args:
filename: Name of the file (can be a file path). The file
extension will be set to '.vtkhdf'.
comm (optional): MPI communicator containing all ranks that
want to write to the file.
"""
# Ensure the filename uses the correct extension
self.filename = Path(filename)
if self.filename.suffix != "" and self.filename.suffix != self.FILE_EXTENSION:
logger.warning(
f"Invalid file extension '{self.filename.suffix}' for VTKHDF file. Changing to '{self.FILE_EXTENSION}'."
)
self.filename = self.filename.with_suffix(self.FILE_EXTENSION)
self.comm = comm
# Check if the filehandler should use an MPI driver
if self.comm is None:
self.file_handler = h5py.File(self.filename, "w")
else:
self.file_handler = h5py.File(self.filename, "w", driver="mpio", comm=self.comm)
self.root_group = self.file_handler.create_group(self.ROOT_GROUP)
# Set required Version and Type root attributes
self._set_root_attribute(self.VERSION_ATTR, self.VERSION)
type_as_ascii = self.TYPE.encode("ascii")
self._set_root_attribute(
self.TYPE_ATTR, type_as_ascii, h5py.string_dtype("ascii", len(type_as_ascii))
)
def __exit__(
self,
exc_type: Optional[type[BaseException]],
exc_value: Optional[BaseException],
traceback: Optional[TracebackType],
) -> Optional[bool]:
"""Close the file when the context is exited.
The parameters describe the exception that caused the context to
be exited. If the context was exited without an exception, all
three arguments will be None. Any exception will be
processed normally upon exit from this method.
Returns:
suppress_exception (optional): Returns True if the exception
should be suppressed (i.e. not propagated). Otherwise,
the exception will be processed normally upon exit from
this method.
"""
self.close()
def close(self) -> None:
"""Close the file handler"""
self.file_handler.close()
def _get_root_attribute(self, attribute: str) -> npt.NDArray:
"""Get attribute from the root VTKHDF group if it exists.
Args:
attribute: Name of the attribute.
Returns:
value: Current value of the attribute if it exists.
Raises:
KeyError: Raised if the attribute is not present as a key.
"""
value = self.root_group.attrs[attribute]
if isinstance(value, h5py.Empty):
raise KeyError(f"Attribute '{attribute}' not present in /{self.ROOT_GROUP} group")
return value
def _set_root_attribute(
self, attribute: str, value: npt.ArrayLike, dtype: npt.DTypeLike = None
):
"""Set attribute in the root VTKHDF group.
Args:
attribute: Name of the new attribute.
value: An array to initialize the attribute.
dtype (optional): Data type of the attribute. Overrides
value.dtype if both are given.
"""
self.root_group.attrs.create(attribute, value, dtype=dtype)
def _build_dataset_path(self, *path: str) -> str:
"""Build an HDF5 dataset path attached to the root VTKHDF group.
Args:
*path: Components of the required path.
Returns:
path: Path to the dataset.
"""
return "/".join([self.ROOT_GROUP, *path])
def _get_root_dataset(self, name: "VtkHdfFile.Dataset") -> h5py.Dataset:
"""Get specified dataset from the root group of the VTKHDF file.
Args:
path: Name of the dataset.
Returns:
dataset: Returns specified h5py dataset.
"""
path = self._build_dataset_path(name)
return self._get_dataset(path)
def _get_dataset(self, path: str) -> h5py.Dataset:
"""Get specified dataset.
Args:
path: Absolute path to the dataset.
Returns:
dataset: Returns specified h5py dataset.
Raises:
KeyError: Raised if the dataset does not exist, or the path
points to some other object, e.g. a Group not a Dataset.
"""
cls = self.file_handler.get(path, getclass=True)
if cls == "default":
raise KeyError("Path does not exist")
elif cls != h5py.Dataset:
raise KeyError(f"Dataset not found. Found '{cls}' instead")
dataset = self.file_handler.get(path)
assert isinstance(dataset, h5py.Dataset)
return dataset
def _write_root_dataset(
self,
name: "VtkHdfFile.Dataset",
data: npt.ArrayLike,
shape: Optional[npt.NDArray[np.intc]] = None,
offset: Optional[npt.NDArray[np.intc]] = None,
xyz_data_ordering=True,
):
"""Write specified dataset to the root group of the VTKHDF file.
Args:
name: Name of the dataset.
data: Data to initialize the dataset.
shape (optional): Size of the full dataset being created.
Can be omitted if data provides the full dataset.
offset (optional): Offset to store the provided data at. Can
be omitted if data provides the full dataset.
xyz_data_ordering (optional): If True, the data will be
transposed as VTKHDF stores datasets using ZYX ordering.
Default True.
"""
path = self._build_dataset_path(name)
self._write_dataset(
path, data, shape=shape, offset=offset, xyz_data_ordering=xyz_data_ordering
)
def _write_dataset(
self,
path: str,
data: npt.ArrayLike,
shape: Optional[npt.NDArray[np.intc]] = None,
offset: Optional[npt.NDArray[np.intc]] = None,
dtype: Optional[npt.DTypeLike] = None,
xyz_data_ordering=True,
):
"""Write specified dataset to the VTKHDF file.
If data has shape (d1, d2, ..., dn), i.e. n dimensions, then, if
specified, shape and offset must be of length n.
Args:
path: Absolute path to the dataset.
data: Data to initialize the dataset.
shape (optional): Size of the full dataset being created.
Can be omitted if data provides the full dataset.
offset (optional): Offset to store the provided data at. Can
be omitted if data provides the full dataset.
xyz_data_ordering (optional): If True, the data will be
transposed as VTKHDF stores datasets using ZYX ordering.
Default True.
Raises:
ValueError: Raised if the combination of data.shape, shape,
and offset are invalid.
"""
# If dtype is a string, ensure it is ascii encoded and use
# variable length strings
if dtype is not None and h5py.check_string_dtype(dtype) is not None:
dtype = h5py.string_dtype(encoding="ascii", length=None)
if not isinstance(data, np.ndarray):
data = np.array(data, dtype=dtype)
if data.ndim < 1:
data = np.expand_dims(data, axis=-1)
if dtype is None:
dtype = data.dtype
# VTKHDF stores datasets using ZYX ordering rather than XYZ
if xyz_data_ordering:
data = data.transpose()
if shape is not None:
shape = np.flip(shape)
if offset is not None:
offset = np.flip(offset)
if shape is None or all(shape == data.shape):
self.file_handler.create_dataset(path, data=data, dtype=dtype)
elif offset is None:
raise ValueError(
"Offset must not be None as the full dataset has not been provided."
" I.e. data.shape != shape"
)
else:
dimensions = len(data.shape)
if dimensions != len(shape):
raise ValueError(
"The data and specified shape must have the same number of dimensions."
f" {dimensions} != {len(shape)}"
)
if dimensions != len(offset):
raise ValueError(
"The data and specified offset must have the same number of dimensions."
f" {dimensions} != {len(offset)}"
)
if any(offset + data.shape > shape):
raise ValueError(
"The provided offset and data does not fit within the bounds of the dataset."
f" {offset} + {data.shape} = {offset + data.shape} > {shape}"
)
dataset = self.file_handler.create_dataset(path, shape=shape, dtype=dtype)
start = offset
stop = offset + data.shape
dataset_slice = (slice(start[i], stop[i]) for i in range(dimensions))
dataset[dataset_slice] = data
def add_point_data(
self,
name: str,
data: npt.NDArray,
shape: Optional[npt.NDArray[np.intc]] = None,
offset: Optional[npt.NDArray[np.intc]] = None,
):
"""Add point data to the VTKHDF file.
Args:
name: Name of the dataset.
data: Data to be saved.
shape (optional): Size of the full dataset being created.
Can be omitted if data provides the full dataset.
offset (optional): Offset to store the provided data at. Can
be omitted if data provides the full dataset.
"""
dataset_path = self._build_dataset_path("PointData", name)
self._write_dataset(dataset_path, data, shape=shape, offset=offset)
def add_cell_data(
self,
name: str,
data: npt.NDArray,
shape: Optional[npt.NDArray[np.intc]] = None,
offset: Optional[npt.NDArray[np.intc]] = None,
):
"""Add cell data to the VTKHDF file.
Args:
name: Name of the dataset.
data: Data to be saved.
shape (optional): Size of the full dataset being created.
Can be omitted if data provides the full dataset.
offset (optional): Offset to store the provided data at. Can
be omitted if data provides the full dataset.
"""
dataset_path = self._build_dataset_path("CellData", name)
self._write_dataset(dataset_path, data, shape=shape, offset=offset)
def add_field_data(
self,
name: str,
data: npt.ArrayLike,
shape: Optional[npt.NDArray[np.intc]] = None,
offset: Optional[npt.NDArray[np.intc]] = None,
dtype: Optional[npt.DTypeLike] = None,
):
"""Add field data to the VTKHDF file.
Args:
name: Name of the dataset.
data: Data to be saved.
shape (optional): Size of the full dataset being created.
Can be omitted if data provides the full dataset.
offset (optional): Offset to store the provided data at. Can
be omitted if data provides the full dataset.
"""
dataset_path = self._build_dataset_path("FieldData", name)
self._write_dataset(
dataset_path, data, shape=shape, offset=offset, dtype=dtype, xyz_data_ordering=False
)
class VtkCellType(np.uint8, Enum):
"""VTK cell types as defined here:
https://vtk.org/doc/nightly/html/vtkCellType_8h_source.html#l00019
"""
# Linear cells
EMPTY_CELL = 0
VERTEX = 1
POLY_VERTEX = 2
LINE = 3
POLY_LINE = 4
TRIANGLE = 5
TRIANGLE_STRIP = 6
POLYGON = 7
PIXEL = 8
QUAD = 9
TETRA = 10
VOXEL = 11
HEXAHEDRON = 12
WEDGE = 13
PYRAMID = 14
PENTAGONAL_PRISM = 15
HEXAGONAL_PRISM = 16

查看文件

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:a23ccc9ed81685385ff300c0c00e70329945db364dd7eb9607a511870469c720
size 75462640

查看文件

@@ -0,0 +1,8 @@
#material: 1 0 1 0 free_space
#material: 4.9 0 1 0 myWater
#material: 3 0 2 0 boxMaterial
#material: 2 0 1.5 0 boxMaterial+boxMaterial+free_space+free_space
#material: 1.5 0 1.25 0 boxMaterial+free_space+free_space+free_space
#material: 1.975 0 1 0 free_space+free_space+free_space+myWater
#material: 2.95 0 1 0 free_space+free_space+myWater+myWater
#material: 3.925 0 1 0 free_space+myWater+myWater+myWater

查看文件

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:4b8f8c4979f4c82eb4be8a9a63cfaf19408b9f493d1bf3082a8226e97542ae4a
size 4980400

查看文件

@@ -0,0 +1,8 @@
#material: 1 0 1 0 free_space
#material: 4.9 0 1 0 myWater
#material: 3 0 2 0 boxMaterial
#material: 2 0 1.5 0 boxMaterial+boxMaterial+free_space+free_space
#material: 1.5 0 1.25 0 boxMaterial+free_space+free_space+free_space
#material: 1.975 0 1 0 free_space+free_space+free_space+myWater
#material: 2.95 0 1 0 free_space+free_space+myWater+myWater
#material: 3.925 0 1 0 free_space+myWater+myWater+myWater

查看文件

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:4a8134fca3e6228211e4a4bc3e504e64f888c55f22e025b482ec501e0051d253
size 75740296

查看文件

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:2ccd95ff4ef73c224c99a8c321ad6f8014c673a1b33bbb741bff415dfbffd0aa
size 4931176

查看文件

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:82b8ac914a9fe20b49f2512215782f01305a2e3211aaac51913f88296a0e3ac5
size 4012444

查看文件

@@ -182,7 +182,7 @@ class GprMaxBaseTest(RunOnlyRegressionTest):
else:
return self.getdep(variant)
def build_reference_filepath(self, name: Union[str, os.PathLike]) -> Path:
def build_reference_filepath(self, name: Union[str, os.PathLike], suffix: str = ".h5") -> Path:
"""Build path to the specified reference file.
Reference files are saved in directories per test case. If this
@@ -192,6 +192,7 @@ class GprMaxBaseTest(RunOnlyRegressionTest):
Args:
name: Name of the file.
suffix: File extension. Default ".h5".
Returns:
filepath: Absolute path to the reference file.
@@ -202,7 +203,7 @@ class GprMaxBaseTest(RunOnlyRegressionTest):
else:
reference_dir = target.short_name
reference_file = Path("regression_checks", reference_dir, name).with_suffix(".h5")
reference_file = Path("regression_checks", reference_dir, name).with_suffix(suffix)
return reference_file.absolute()
# TODO: Change CreatePyenvTest to a fixture instead of a test dependency

查看文件

@@ -9,8 +9,11 @@ from typing_extensions import TYPE_CHECKING
from reframe_tests.tests.base_tests import GprMaxBaseTest
from reframe_tests.tests.regression_checks import (
GeometryObjectMaterialsRegressionCheck,
GeometryObjectRegressionCheck,
GeometryViewRegressionCheck,
H5RegressionCheck,
ReceiverRegressionCheck,
RegressionCheck,
SnapshotRegressionCheck,
)
@@ -37,7 +40,7 @@ class ReceiverMixin(GprMaxMixin):
)
self.regression_checks.append(regression_check)
else:
regression_check = RegressionCheck(self.output_file, reference_file)
regression_check = H5RegressionCheck(self.output_file, reference_file)
self.regression_checks.append(regression_check)
@@ -77,6 +80,105 @@ class SnapshotMixin(GprMaxMixin):
self.regression_checks.append(regression_check)
class GeometryOnlyMixin(GprMaxMixin):
"""Run test with geometry only flag"""
@run_after("setup")
def add_geometry_only_flag(self):
self.executable_opts += ["--geometry-only"]
class GeometryObjectMixin(GprMaxMixin):
"""Add regression tests for geometry objects.
Attributes:
geometry_objects (list[str]): List of geometry objects to run
regression checks on.
"""
geometry_objects = variable(typ.List[str], value=[])
def build_geometry_object_filepath(self, geometry_object: str) -> Path:
"""Build filepath to the specified geometry object.
Args:
geometry_object: Name of the geometry object.
"""
return Path(geometry_object).with_suffix(".h5")
def build_materials_filepath(self, geometry_object: str) -> Path:
"""Build filepath to the materials output by the geometry object.
Args:
geometry_object: Name of the geometry object.
"""
return Path(f"{geometry_object}_materials").with_suffix(".txt")
@run_after("setup")
def add_geometry_object_regression_checks(self):
"""Add a regression check for each geometry object.
The test will be skipped if no geometry objects have been specified.
"""
self.skip_if(
len(self.geometry_objects) < 0,
f"Must provide a list of geometry objects.",
)
for geometry_object in self.geometry_objects:
# Add materials regression check first as if this fails,
# checking the .h5 file will almost definitely fail.
materials_file = self.build_materials_filepath(geometry_object)
materials_reference_file = self.build_reference_filepath(
materials_file.name, suffix=materials_file.suffix
)
materials_regression_check = GeometryObjectMaterialsRegressionCheck(
materials_file, materials_reference_file
)
self.regression_checks.append(materials_regression_check)
geometry_object_file = self.build_geometry_object_filepath(geometry_object)
reference_file = self.build_reference_filepath(geometry_object)
regression_check = GeometryObjectRegressionCheck(geometry_object_file, reference_file)
self.regression_checks.append(regression_check)
class GeometryViewMixin(GprMaxMixin):
"""Add regression tests for geometry views.
Attributes:
geometry_views (list[str]): List of geometry views to run
regression checks on.
"""
geometry_views = variable(typ.List[str], value=[])
def build_geometry_view_filepath(self, geometry_view: str) -> Path:
"""Build filepath to the specified geometry view.
Args:
geometry_view: Name of the geometry view.
"""
return Path(geometry_view).with_suffix(".vtkhdf")
@run_after("setup")
def add_geometry_view_regression_checks(self):
"""Add a regression check for each geometry view.
The test will be skipped if no geometry views have been specified.
"""
self.skip_if(
len(self.geometry_views) < 0,
f"Must provide a list of geometry views.",
)
for geometry_view in self.geometry_views:
geometry_view_file = self.build_geometry_view_filepath(geometry_view)
reference_file = self.build_reference_filepath(geometry_view, ".vtkhdf")
regression_check = GeometryViewRegressionCheck(geometry_view_file, reference_file)
self.regression_checks.append(regression_check)
class PythonApiMixin(GprMaxMixin):
"""Use the GprMax Python API rather than a standard input file."""

查看文件

@@ -9,7 +9,7 @@ from reframe.utility import osext
class RegressionCheck:
"""Compare two hdf5 files using h5diff"""
"""Compare two files using diff"""
def __init__(
self, output_file: Union[str, PathLike], reference_file: Union[str, PathLike]
@@ -23,7 +23,8 @@ class RegressionCheck:
"""
self.output_file = Path(output_file)
self.reference_file = Path(reference_file)
self.h5diff_options: list[str] = []
self.cmd = "diff"
self.options: list[str] = []
@property
def error_msg(self) -> str:
@@ -57,40 +58,54 @@ class RegressionCheck:
return sn.path_isfile(self.reference_file)
def run(self) -> Literal[True]:
"""Run the regression check using h5diff.
"""Run the regression check.
Returns:
check_passed: Returns True if the output file matches the
reference file (i.e. no output from h5diff). Otherwise,
reference file (i.e. no output from diff). Otherwise,
raises a SanityError.
Raises:
reframe.core.exceptions.SanityError: If the output file does
not exist, or the regression check fails.
"""
if runtime().system.name == "archer2":
h5diff = "/opt/cray/pe/hdf5/default/bin/h5diff"
else:
h5diff = "h5diff"
h5diff_output = osext.run_command(
[h5diff, *self.h5diff_options, str(self.output_file), str(self.reference_file)]
completed_process = osext.run_command(
[
self.cmd,
*self.options,
str(self.output_file.absolute()),
str(self.reference_file),
]
)
return sn.assert_true(
sn.path_isfile(self.output_file),
f"Expected output file '{self.output_file}' does not exist",
) and sn.assert_false(
h5diff_output.stdout,
completed_process.stdout,
(
f"{self.error_msg}\n"
f"For more details run: '{' '.join(h5diff_output.args)}'\n"
f"For more details run: '{' '.join(completed_process.args)}'\n"
f"To re-create regression file, delete '{self.reference_file}' and rerun the test."
),
)
class ReceiverRegressionCheck(RegressionCheck):
class H5RegressionCheck(RegressionCheck):
"""Compare two hdf5 files using h5diff"""
def __init__(
self, output_file: Union[str, PathLike], reference_file: Union[str, PathLike]
) -> None:
super().__init__(output_file, reference_file)
if runtime().system.name == "archer2":
self.cmd = "/opt/cray/pe/hdf5/default/bin/h5diff"
else:
self.cmd = "h5diff"
class ReceiverRegressionCheck(H5RegressionCheck):
"""Run regression check on individual reveivers in output files.
This can include arbitrary receivers in each file, or two receivers
@@ -101,7 +116,7 @@ class ReceiverRegressionCheck(RegressionCheck):
self,
output_file: Union[str, PathLike],
reference_file: Union[str, PathLike],
output_receiver: Optional[str],
output_receiver: str,
reference_receiver: Optional[str] = None,
) -> None:
"""Create a new receiver regression check.
@@ -120,18 +135,42 @@ class ReceiverRegressionCheck(RegressionCheck):
self.output_receiver = output_receiver
self.reference_receiver = reference_receiver
self.h5diff_options.append(f"rxs/{self.output_receiver}")
self.options.append(f"rxs/{self.output_receiver}")
if self.reference_receiver is not None:
self.h5diff_options.append(f"rxs/{self.reference_receiver}")
self.options.append(f"rxs/{self.reference_receiver}")
@property
def error_msg(self) -> str:
return f"Receiver '{self.output_receiver}' failed regression check"
class SnapshotRegressionCheck(RegressionCheck):
class SnapshotRegressionCheck(H5RegressionCheck):
"""Run regression check on a gprMax Snapshot."""
@property
def error_msg(self) -> str:
return f"Snapshot '{self.output_file.name}' failed regression check "
return f"Snapshot '{self.output_file.name}' failed regression check"
class GeometryObjectRegressionCheck(H5RegressionCheck):
"""Run regression check on a GprMax GeometryObject."""
@property
def error_msg(self) -> str:
return f"GeometryObject '{self.output_file.name}' failed regression check"
class GeometryObjectMaterialsRegressionCheck(RegressionCheck):
"""Run regression check on materials output by a GeometryObject."""
@property
def error_msg(self) -> str:
return f"GeometryObject materials file '{self.output_file}' failed regression check"
class GeometryViewRegressionCheck(H5RegressionCheck):
"""Run regression check on a GprMax GeometryView."""
@property
def error_msg(self) -> str:
return f"GeometryView '{self.output_file.name}' failed regression check"

查看文件

@@ -0,0 +1,17 @@
#title: Hertzian dipole in free-space
#domain: 0.100 0.100 0.100
#dx_dy_dz: 0.001 0.001 0.001
#time_window: 3e-9
#waveform: gaussiandot 1 1e9 myWave
#hertzian_dipole: z 0.050 0.050 0.050 myWave
#material: 4.9 0 1 0 myWater
#material: 2 0 1.4 0 unusedMaterial
#material: 3 0 2 0 boxMaterial
#sphere: 0.05 0.05 0.05 0.03 myWater y
#box: 0.01 0.01 0.01 0.09 0.025 0.025 boxMaterial y
#geometry_objects_write: 0.02 0.02 0.02 0.06 0.06 0.06 partial_volume
#geometry_objects_write: 0 0 0 0.1 0.1 0.1 full_volume

查看文件

@@ -0,0 +1,17 @@
#title: Hertzian dipole in free-space
#domain: 0.100 0.100 0.100
#dx_dy_dz: 0.001 0.001 0.001
#time_window: 3e-9
#waveform: gaussiandot 1 1e9 myWave
#hertzian_dipole: z 0.050 0.050 0.050 myWave
#material: 4.9 0 1 0 myWater
#material: 2 0 1.4 0 unusedMaterial
#material: 3 0 2 0 boxMaterial
#sphere: 0.05 0.05 0.05 0.03 myWater y
#box: 0.01 0.01 0.01 0.09 0.025 0.025 boxMaterial y
#geometry_view: 0.02 0.02 0.02 0.06 0.06 0.06 0.001 0.001 0.001 partial_volume f
#geometry_view: 0 0 0 0.1 0.1 0.1 0.001 0.001 0.001 full_volume f

查看文件

@@ -0,0 +1,17 @@
#title: Hertzian dipole in free-space
#domain: 0.100 0.100 0.100
#dx_dy_dz: 0.001 0.001 0.001
#time_window: 3e-9
#waveform: gaussiandot 1 1e9 myWave
#hertzian_dipole: z 0.050 0.050 0.050 myWave
#material: 4.9 0 1 0 myWater
#material: 2 0 1.4 0 unusedMaterial
#material: 3 0 2 0 boxMaterial
#sphere: 0.05 0.05 0.05 0.03 myWater y
#box: 0.01 0.01 0.01 0.09 0.025 0.025 boxMaterial y
#geometry_view: 0.02 0.02 0.02 0.06 0.06 0.06 0.001 0.001 0.001 partial_volume n
#geometry_view: 0 0 0 0.1 0.1 0.1 0.001 0.001 0.001 full_volume n

查看文件

@@ -1,5 +1,11 @@
from reframe_tests.tests.base_tests import GprMaxBaseTest
from reframe_tests.tests.mixins import ReceiverMixin, SnapshotMixin
from reframe_tests.tests.mixins import (
GeometryObjectMixin,
GeometryOnlyMixin,
GeometryViewMixin,
ReceiverMixin,
SnapshotMixin,
)
class GprMaxRegressionTest(ReceiverMixin, GprMaxBaseTest):
@@ -8,3 +14,11 @@ class GprMaxRegressionTest(ReceiverMixin, GprMaxBaseTest):
class GprMaxSnapshotTest(SnapshotMixin, GprMaxBaseTest):
pass
class GprMaxGeometryViewTest(GeometryViewMixin, GeometryOnlyMixin, GprMaxBaseTest):
pass
class GprMaxGeometryObjectTest(GeometryObjectMixin, GeometryOnlyMixin, GprMaxBaseTest):
pass

查看文件

@@ -0,0 +1,20 @@
import reframe as rfm
from reframe.core.builtins import parameter
from reframe_tests.tests.mixins import MpiMixin
from reframe_tests.tests.standard_tests import GprMaxGeometryObjectTest
@rfm.simple_test
class TestGeometryObject(GprMaxGeometryObjectTest):
tags = {"test", "serial", "geometry only", "geometry object"}
sourcesdir = "src/geometry_object_tests"
model = parameter(["geometry_object_write"])
geometry_objects = ["partial_volume", "full_volume"]
@rfm.simple_test
class TestGeometryObjectMPI(MpiMixin, TestGeometryObject):
tags = {"test", "mpi", "geometry only", "geometry object"}
mpi_layout = parameter([[2, 2, 2], [4, 4, 1]])
test_dependency = TestGeometryObject

查看文件

@@ -0,0 +1,20 @@
import reframe as rfm
from reframe.core.builtins import parameter
from reframe_tests.tests.mixins import MpiMixin
from reframe_tests.tests.standard_tests import GprMaxGeometryViewTest
@rfm.simple_test
class TestGeometryView(GprMaxGeometryViewTest):
tags = {"test", "serial", "geometry only", "geometry view"}
sourcesdir = "src/geometry_view_tests"
model = parameter(["geometry_view_voxel", "geometry_view_fine"])
geometry_views = ["partial_volume", "full_volume"]
@rfm.simple_test
class TestGeometryViewMPI(MpiMixin, TestGeometryView):
tags = {"test", "mpi", "geometry only", "geometry view"}
mpi_layout = parameter([[2, 2, 2], [4, 4, 1]])
test_dependency = TestGeometryView

查看文件

@@ -16,69 +16,103 @@
# You should have received a copy of the GNU General Public License
# along with gprMax. If not, see <http://www.gnu.org/licenses/>.
import json
import os
from paraview.servermanager import Proxy, SourceProxy
from paraview.simple import (
AppendDatasets,
Box,
ColorBy,
FetchData,
GetActiveSource,
GetActiveView,
GetDisplayProperties,
GetParaViewVersion,
Hide,
OpenDataFile,
RenameSource,
RenderAllViews,
SetActiveSource,
Show,
Threshold,
)
from paraview.vtk.numpy_interface import dataset_adapter as dsa
from vtkmodules.vtkCommonCore import vtkStringArray
COLOUR_SCALARS = ("CELLS", "Material")
def threshold_filt(input, lt, ut, scalars):
def threshold_materials(source: SourceProxy, view: Proxy, material_ids: vtkStringArray):
"""Create threshold filter according to Paraview version.
Args:
input (array): input data to threshold filter
lt, ut (int): lower and upper bounds of thresholding operation
scalars (list/str): name of scalar array to perform thresholding
Returns:
threshold (object): threshold filter
source: Input to the threshold filter.
view: The view proxy to show the threshold in.
material_ids: Array of material ids. A new threshold filter will
be created for each material.
"""
# Read Paraview version number to set threshold filter method
pvv = GetParaViewVersion()
threshold = Threshold(Input=input)
threshold.Scalars = scalars
for index in range(material_ids.GetNumberOfValues()):
threshold = Threshold(Input=source, Scalars=COLOUR_SCALARS)
if pvv.major == 5 and pvv.minor < 10:
threshold.ThresholdRange = [lt, ut]
else:
threshold.LowerThreshold = lt
threshold.UpperThreshold = ut
if pvv.major == 5 and pvv.minor < 10:
threshold.ThresholdRange = [index, index]
else:
threshold.LowerThreshold = index
threshold.UpperThreshold = index
return threshold
RenameSource(material_ids.GetValue(index), threshold)
# Show data in view, except for free_space
if index != 1:
Show(threshold, view)
threshold.UpdatePipeline()
def display_pmls(pmlthick, dx_dy_dz, nx_ny_nz):
"""Display PMLs as box sources using PML thickness values.
Only suitable for gprMax >= v4
def create_box_sources(names: vtkStringArray, positions: dsa.VTKArray, dl: dsa.VTKArray):
"""Create new single cell box sources.
Args:
pmlthick (tuple): PML thickness values for each slab (cells)
dx_dy_dz (tuple): Spatial resolution (m)
nx_ny_dz (tuple): Domain size (cells)
names: Array of N names for the new sources.
positions: x, y, z coordinates of the new sources. This should
have shape (N, 3).
dl: x, y, z spatial resolution.
"""
for index in range(names.GetNumberOfValues()):
name = names.GetValue(index)
pos = positions[index]
src = Box(
Center=pos + dl / 2,
XLength=dl[0],
YLength=dl[1],
ZLength=dl[2],
)
RenameSource(name, src)
Show(src)
def display_pmls(pmlthick: dsa.VTKArray, dx_dy_dz: dsa.VTKArray, nx_ny_nz: dsa.VTKArray):
"""Display PMLs as box sources using PML thickness values.
Only suitable for gprMax >= v4.
Args:
pmlthick: PML thickness values for each slab (cells).
dx_dy_dz: Spatial resolution (m).
nx_ny_dz: Domain size (cells).
"""
pml_names = ["x0", "y0", "z0", "xmax", "ymax", "zmax"]
pmls = dict.fromkeys(pml_names, None)
SetActiveSource(pv_data)
if pmlthick[0] != 0:
x0 = Box(
Center=[pmlthick[0] * dx_dy_dz[0] / 2, nx_ny_nz[1] * dx_dy_dz[1] / 2, nx_ny_nz[2] * dx_dy_dz[2] / 2],
Center=[
pmlthick[0] * dx_dy_dz[0] / 2,
nx_ny_nz[1] * dx_dy_dz[1] / 2,
nx_ny_nz[2] * dx_dy_dz[2] / 2,
],
XLength=pmlthick[0] * dx_dy_dz[0],
YLength=nx_ny_nz[1] * dx_dy_dz[1],
ZLength=nx_ny_nz[2] * dx_dy_dz[2],
@@ -100,7 +134,11 @@ def display_pmls(pmlthick, dx_dy_dz, nx_ny_nz):
if pmlthick[1] != 0:
y0 = Box(
Center=[nx_ny_nz[0] * dx_dy_dz[0] / 2, pmlthick[1] * dx_dy_dz[1] / 2, nx_ny_nz[2] * dx_dy_dz[2] / 2],
Center=[
nx_ny_nz[0] * dx_dy_dz[0] / 2,
pmlthick[1] * dx_dy_dz[1] / 2,
nx_ny_nz[2] * dx_dy_dz[2] / 2,
],
XLength=nx_ny_nz[0] * dx_dy_dz[0],
YLength=pmlthick[1] * dx_dy_dz[1],
ZLength=nx_ny_nz[2] * dx_dy_dz[2],
@@ -122,7 +160,11 @@ def display_pmls(pmlthick, dx_dy_dz, nx_ny_nz):
if pmlthick[2] != 0:
z0 = Box(
Center=[nx_ny_nz[0] * dx_dy_dz[0] / 2, nx_ny_nz[1] * dx_dy_dz[1] / 2, pmlthick[2] * dx_dy_dz[2] / 2],
Center=[
nx_ny_nz[0] * dx_dy_dz[0] / 2,
nx_ny_nz[1] * dx_dy_dz[1] / 2,
pmlthick[2] * dx_dy_dz[2] / 2,
],
XLength=nx_ny_nz[0] * dx_dy_dz[0],
YLength=nx_ny_nz[1] * dx_dy_dz[1],
ZLength=pmlthick[2] * dx_dy_dz[2],
@@ -158,96 +200,122 @@ def display_pmls(pmlthick, dx_dy_dz, nx_ny_nz):
pml_view.Opacity = 0.5
# Get whatever source is loaded - should be loaded file (.vt*) or files (.pvd)
pv_data = GetActiveSource()
def is_valid_key(key: str, dictionary: dict) -> bool:
"""Check key exists and is not an empty array (i.e. VTKNoneArray).
# Hide display of root data
Hide(pv_data)
Args:
key: Name of key to check.
dictionary: Dictionary like object that should contain the key.
# Single .vti or .vtu file
file = pv_data.FileName[0]
Returns:
is_valid_key: True if the key is in the dictionary and is not
of type VTKNoneArray. False otherwise.
"""
return key in dictionary.keys() and not isinstance(dictionary[key], dsa.VTKNoneArray)
# Read and display data from file, i.e. materials, sources, receivers, and PMLs
with open(file, "rb") as f:
# Comments () embedded in line 3 of file
f.readline()
f.readline()
c = f.readline().decode()
# Strip comment tags
c = c[5:-5]
# Model information
c = json.loads(c)
print("\ngprMax version: " + c["gprMax_version"])
print(file)
################
# Display data #
################
pv_view = GetActiveView()
pv_view.AxesGrid.Visibility = 1 # Show Data Axes Grid
pv_disp = Show(pv_data, pv_view)
pv_disp.ColorArrayName = ["CELLS", "Material"]
class HaltException(Exception):
pass
# Discretisation
dl = c["dx_dy_dz"]
# Number of voxels
nl = c["nx_ny_nz"]
# Materials
try:
for i, mat in enumerate(c["Materials"]):
threshold = threshold_filt(pv_data, i, i, ["CELLS", "Material"])
RenameSource(mat, threshold)
print("=============== Running gprMax macro ===============", end="\n\n")
# Show data in view, except for free_space
if i != 1:
thresholddisplay = Show(threshold, pv_view)
thresholddisplay.ColorArrayName = ["CELLS", "Material"]
threshold.UpdatePipeline()
except KeyError:
print("No materials to load")
# Get active source - should be loaded file (.vtkhdf)
pv_source = GetActiveSource()
# Display any sources
try:
for item in c["Sources"]:
pos = item["position"]
name = item["name"]
src = Box(
Center=[pos[0] + dl[0] / 2, pos[1] + dl[1] / 2, pos[2] + dl[2] / 2],
XLength=dl[0],
YLength=dl[1],
ZLength=dl[2],
if pv_source is None:
raise HaltException("ERROR: No active source.")
pv_source.UpdatePipeline()
raw_data = FetchData(pv_source)[0]
data = dsa.WrapDataObject(raw_data)
# Check the necessary metadata is attached to the file
metadata_keys = data.FieldData.keys()
compulsory_keys = ["gprMax_version", "nx_ny_nz", "dx_dy_dz", "material_ids"]
missing_keys = []
for key in compulsory_keys:
if key not in metadata_keys:
missing_keys.append(key)
if len(missing_keys) > 0:
keys = "\n- ".join(missing_keys)
print(f"Missing metadata fields: \n- {keys}\n")
if len(metadata_keys) > 0:
keys = "\n- ".join(metadata_keys)
print(f"Found: \n- {keys}\n")
raise HaltException(
"ERROR: Missing gprMax metadata information. Do you have the correct source selected?"
)
RenameSource(name, src)
Show(src)
except KeyError:
print("No sources to load")
# Display any receivers
try:
for item in c["Receivers"]:
pos = item["position"]
name = item["name"]
rx = Box(
Center=[pos[0] + dl[0] / 2, pos[1] + dl[1] / 2, pos[2] + dl[2] / 2],
XLength=dl[0],
YLength=dl[1],
ZLength=dl[2],
)
RenameSource(name, rx)
Show(rx)
except KeyError:
print("No receivers to load")
# gprMax version
version = data.FieldData["gprMax_version"].GetValue(0)
# Number of voxels
nl = data.FieldData["nx_ny_nz"]
# Discretisation
dl = data.FieldData["dx_dy_dz"]
# Display any PMLs
try:
pt = c["PMLthickness"]
display_pmls(pt, dl, nl)
except KeyError:
print("No PMLs to load")
print("gprMax version:", data.FieldData["gprMax_version"].GetValue(0))
print("nx_ny_nz:", nl)
print("dx_dy_dz:", dl, end="\n\n")
################
# Display data #
################
RenderAllViews()
# Get active view
pv_view = GetActiveView()
pv_view.AxesGrid.Visibility = 1 # Show Data Axes Grid
# Reset view to fit data
pv_view.ResetCamera()
# Get display properties
pv_disp = GetDisplayProperties(pv_source, view=pv_view)
# Set scalar colouring
ColorBy(pv_disp, COLOUR_SCALARS)
# Materials
print("Loading materials... ", end="\t")
if is_valid_key("material_ids", data.FieldData):
threshold_materials(pv_source, pv_view, data.FieldData["material_ids"])
print("Done.")
else:
print("No materials to load.")
# Display any sources
print("Loading sources... ", end="\t\t")
if is_valid_key("source_ids", data.FieldData) and is_valid_key("sources", data.FieldData):
create_box_sources(data.FieldData["source_ids"], data.FieldData["sources"], dl)
print("Done.")
else:
print("No sources to load.")
# Display any receivers
print("Loading receivers... ", end="\t")
if is_valid_key("receiver_ids", data.FieldData) and is_valid_key("receivers", data.FieldData):
create_box_sources(data.FieldData["receiver_ids"], data.FieldData["receivers"], dl)
print("Done.")
else:
print("No receivers to load.")
# Display any PMLs
print("Loading PMLs... ", end="\t\t")
if is_valid_key("pml_thickness", data.FieldData):
display_pmls(data.FieldData["pml_thickness"], dl, nl)
print("Done.")
else:
print("No PMLs to load.")
RenderAllViews()
SetActiveSource(pv_source)
Hide(pv_source)
# Reset view to fit data
pv_view.ResetCamera()
except HaltException as e:
print(e)
print()