Merge branch '37-hdf5-geometry-views' into 26-mpi-geometry-objects

这个提交包含在:
nmannall
2024-12-04 15:47:35 +00:00
当前提交 8b038f09c3
共有 23 个文件被更改,包括 1405 次插入311 次删除

3
.gitattributes vendored
查看文件

@@ -1 +1,4 @@
tools/Jupyter_notebooks/* linguist-vendored
reframe_tests/regression_checks/TestGeometryView_5176823e/partial_volume.vtkhdf filter=lfs diff=lfs merge=lfs -text
reframe_tests/regression_checks/TestGeometryView_5176823e/full_volume.vtkhdf filter=lfs diff=lfs merge=lfs -text
reframe_tests/regression_checks/TestGeometryView_77980202/full_volume.vtkhdf filter=lfs diff=lfs merge=lfs -text

查看文件

@@ -92,7 +92,7 @@ class Discretisation(UserObjectSingle):
def build(self, model, uip):
try:
model.dl = np.array(self.kwargs["p1"], dtype=float)
model.dl = np.array(self.kwargs["p1"], dtype=np.float64)
except KeyError:
logger.exception(f"{self.__str__()} discretisation requires a point")
raise

查看文件

@@ -17,9 +17,81 @@
# along with gprMax. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
cimport numpy as np
cpdef get_line_properties(
int number_of_lines,
int nx,
int ny,
int nz,
np.uint32_t[:, :, :, :] ID
):
"""Generate connectivity array and get material ID for each line.
Args:
number_of_lines: Number of lines.
nx: Number of points in the x dimension.
ny: Number of points in the y dimension.
nz: Number of points in the z dimension.
ID: memoryview of sampled ID array according to geometry view
spatial discretisation.
Returns:
connectivity: NDArray of shape (2 * number_of_lines,) listing
the start and end point IDs of each line.
material_data: NDArray of shape (number_of_lines,) listing
material IDs for each line.
"""
cdef np.ndarray material_data = np.zeros(number_of_lines, dtype=np.uint32)
cdef np.ndarray connectivity = np.zeros(2 * number_of_lines, dtype=np.intc)
cdef int line_index = 0
cdef int connectivity_index = 0
cdef int point_id = 0
cdef int z_step = 1
cdef int y_step = nz + 1
cdef int x_step = y_step * (ny + 1)
cdef int i, j, k
for i in range(nx):
for j in range(ny):
for k in range(nz):
# x-direction cell edge
material_data[line_index] = ID[0, i, j, k]
connectivity[connectivity_index] = point_id
connectivity[connectivity_index + 1] = point_id + x_step
line_index += 1
connectivity_index += 2
# y-direction cell edge
material_data[line_index] = ID[1, i, j, k]
connectivity[connectivity_index] = point_id
connectivity[connectivity_index + 1] = point_id + y_step
line_index += 1
connectivity_index += 2
# z-direction cell edge
material_data[line_index] = ID[2, i, j, k]
connectivity[connectivity_index] = point_id
connectivity[connectivity_index + 1] = point_id + z_step
line_index += 1
connectivity_index += 2
# Next point
point_id += 1
# Skip point at (i, j, nz)
point_id += 1
# Skip points in line (i, ny, t) where 0 <= t <= nz
point_id += nz + 1
return connectivity, material_data
cpdef write_lines(
float xs,
float ys,

查看文件

@@ -16,34 +16,38 @@
# You should have received a copy of the GNU General Public License
# along with gprMax. If not, see <http://www.gnu.org/licenses/>.
import json
import logging
import sys
from abc import ABC, abstractmethod
from io import TextIOWrapper
from itertools import chain
from pathlib import Path
from typing import Dict, List, Sequence, Tuple, Union
import h5py
import numpy as np
from evtk.hl import imageToVTK, linesToVTK
from evtk.vtk import VtkGroup, VtkImageData, VtkUnstructuredGrid
from mpi4py import MPI
import numpy.typing as npt
from tqdm import tqdm
import gprMax.config as config
from gprMax.grid.fdtd_grid import FDTDGrid
from gprMax.grid.mpi_grid import MPIGrid
from gprMax.materials import Material
from gprMax.receivers import Rx
from gprMax.sources import Source
from gprMax.vtkhdf_filehandlers.vtk_image_data import VtkImageData
from gprMax.vtkhdf_filehandlers.vtk_unstructured_grid import VtkUnstructuredGrid
from gprMax.vtkhdf_filehandlers.vtkhdf import VtkCellType, VtkHdfFile
from ._version import __version__
from .cython.geometry_outputs import write_lines
from .cython.geometry_outputs import get_line_properties
from .subgrids.grid import SubGridBaseGrid
from .utilities.utilities import get_terminal_width
logger = logging.getLogger(__name__)
def save_geometry_views(gvs):
def save_geometry_views(gvs: "List[GeometryView]"):
"""Creates and saves geometryviews.
Args:
@@ -53,28 +57,42 @@ def save_geometry_views(gvs):
logger.info("")
for i, gv in enumerate(gvs):
gv.set_filename()
vtk_data = gv.prep_vtk()
gv.prep_vtk()
pbar = tqdm(
total=gv.nbytes,
unit="byte",
unit_scale=True,
desc=f"Writing geometry view file {i + 1}/{len(gvs)}, "
f"{gv.filename.name}{gv.vtkfiletype.ext}",
desc=f"Writing geometry view file {i + 1}/{len(gvs)}, {gv.filename.name}",
ncols=get_terminal_width() - 1,
file=sys.stdout,
disable=not config.sim_config.general["progressbars"],
)
gv.write_vtk(vtk_data)
gv.write_vtk()
pbar.update(gv.nbytes)
pbar.close()
logger.info("")
class GeometryView:
class GeometryView(ABC):
"""Base class for Geometry Views."""
def __init__(self, xs, ys, zs, xf, yf, zf, dx, dy, dz, filename, grid):
FILE_EXTENSION = ".vtkhdf"
def __init__(
self,
xs: int,
ys: int,
zs: int,
xf: int,
yf: int,
zf: int,
dx: int,
dy: int,
dz: int,
filename: str,
grid: FDTDGrid,
):
"""
Args:
xs, xf, ys, yf, zs, zf: ints for extent of geometry view in cells.
@@ -83,44 +101,89 @@ class GeometryView:
grid: FDTDGrid class describing a grid in a model.
"""
self.xs = xs
self.ys = ys
self.zs = zs
self.xf = xf
self.yf = yf
self.zf = zf
self.nx = self.xf - self.xs
self.ny = self.yf - self.ys
self.nz = self.zf - self.zs
self.dx = dx
self.dy = dy
self.dz = dz
self.filename = filename
self.start = np.array([xs, ys, zs], dtype=np.intc)
self.stop = np.array([xf, yf, zf], dtype=np.intc)
self.step = np.array([dx, dy, dz], dtype=np.intc)
self.size = (self.stop - self.start) // self.step
self.filename = Path(filename)
self.filenamebase = filename
self.grid = grid
self.nbytes = None
self.material_data = None
self.materials = None
# Properties for backwards compatibility
@property
def xs(self) -> int:
return self.start[0]
@property
def ys(self) -> int:
return self.start[1]
@property
def zs(self) -> int:
return self.start[2]
@property
def xf(self) -> int:
return self.stop[0]
@property
def yf(self) -> int:
return self.stop[1]
@property
def zf(self) -> int:
return self.stop[2]
@property
def dx(self) -> int:
return self.step[0]
@property
def dy(self) -> int:
return self.step[1]
@property
def dz(self) -> int:
return self.step[2]
@property
def nx(self) -> int:
return self.size[0]
@property
def ny(self) -> int:
return self.size[1]
@property
def nz(self) -> int:
return self.size[2]
def set_filename(self):
"""Constructs filename from user-supplied name and model run number."""
"""Construct filename from user-supplied name and model number."""
parts = config.get_model_config().output_file_path.parts
self.filename = Path(
*parts[:-1], self.filenamebase + config.get_model_config().appendmodelnumber
)
).with_suffix(self.FILE_EXTENSION)
@abstractmethod
def prep_vtk(self):
pass
@abstractmethod
def write_vtk(self):
pass
class GeometryViewLines(GeometryView):
"""Unstructured grid (.vtu) for a per-cell-edge geometry view."""
def __init__(self, *args):
super().__init__(*args)
self.vtkfiletype = VtkUnstructuredGrid
"""Unstructured grid for a per-cell-edge geometry view."""
def prep_vtk(self):
"""Prepares data for writing to VTK file.
Returns:
vtk_data: dict of coordinates, data, and comments for VTK file.
"""
"""Prepares data for writing to VTKHDF file."""
# Sample ID array according to geometry view spatial discretisation
# Only create a new array if subsampling is required
@@ -129,7 +192,7 @@ class GeometryViewLines(GeometryView):
or (self.dx, self.dy, self.dz) != (1, 1, 1)
or (self.xs, self.ys, self.zs) != (0, 0, 0)
):
# Require contiguous for evtk library
# Require contiguous array
ID = np.ascontiguousarray(
self.grid.ID[
:,
@@ -142,81 +205,66 @@ class GeometryViewLines(GeometryView):
# This array is contiguous by design
ID = self.grid.ID
x, y, z, lines = write_lines(
(self.xs * self.grid.dx),
(self.ys * self.grid.dy),
(self.zs * self.grid.dz),
self.nx,
self.ny,
self.nz,
(self.dx * self.grid.dx),
(self.dy * self.grid.dy),
(self.dz * self.grid.dz),
ID,
)
x = np.arange(self.nx + 1, dtype=np.float64)
y = np.arange(self.ny + 1, dtype=np.float64)
z = np.arange(self.nz + 1, dtype=np.float64)
coords = np.meshgrid(x, y, z, indexing="ij")
self.points = np.vstack(list(map(np.ravel, coords))).T
self.points += self.start
self.points *= self.step * self.grid.dl
# Add offset to subgrid geometry to correctly locate within main grid
if isinstance(self.grid, SubGridBaseGrid):
x += self.grid.i0 * self.grid.dx * self.grid.ratio
y += self.grid.j0 * self.grid.dy * self.grid.ratio
z += self.grid.k0 * self.grid.dz * self.grid.ratio
offset = [self.grid.i0, self.grid.j0, self.grid.k0]
self.points += offset * self.grid.dl * self.grid.ratio
# Each point is the 'source' for 3 lines.
# NB: Excluding points at the far edge of the geometry as those
# are the 'source' for no lines
n_lines = 3 * self.nx * self.ny * self.nz
self.cell_types = np.full(n_lines, VtkCellType.LINE)
self.cell_offsets = np.arange(0, 2 * n_lines + 2, 2, dtype=np.intc)
self.connectivity, self.material_data = get_line_properties(
n_lines, self.nx, self.ny, self.nz, ID
)
assert isinstance(self.connectivity, np.ndarray)
assert isinstance(self.material_data, np.ndarray)
# Write information about any PMLs, sources, receivers
comments = Comments(self.grid, self)
comments.averaged_materials = True
comments.materials_only = True
info = comments.get_gprmax_info()
comments = json.dumps(info)
self.metadata = Metadata(self.grid, self, averaged_materials=True, materials_only=True)
# Number of bytes of data to be written to file
offsets_size = np.arange(start=2, step=2, stop=len(x) + 1, dtype="int32").nbytes
connect_size = len(x) * np.dtype("int32").itemsize
cell_type_size = len(x) * np.dtype("uint8").itemsize
self.nbytes = (
x.nbytes
+ y.nbytes
+ z.nbytes
+ lines.nbytes
+ offsets_size
+ connect_size
+ cell_type_size
self.points.nbytes
+ self.cell_types.nbytes
+ self.connectivity.nbytes
+ self.cell_offsets.nbytes
+ self.material_data.nbytes
)
vtk_data = {"x": x, "y": y, "z": z, "data": lines, "comments": comments}
def write_vtk(self):
"""Writes geometry information to a VTKHDF file."""
return vtk_data
def write_vtk(self, vtk_data):
"""Writes geometry information to a VTK file.
Args:
vtk_data: dict of coordinates, data, and comments for VTK file.
"""
# Write the VTK file .vtu
linesToVTK(
str(self.filename),
vtk_data["x"],
vtk_data["y"],
vtk_data["z"],
cellData={"Material": vtk_data["data"]},
comments=[vtk_data["comments"]],
)
# Write the VTK file
with VtkUnstructuredGrid(
self.filename,
self.points,
self.cell_types,
self.connectivity,
self.cell_offsets,
) as f:
f.add_cell_data("Material", self.material_data)
self.metadata.write_to_vtkhdf(f)
class GeometryViewVoxels(GeometryView):
"""Imagedata (.vti) for a per-cell geometry view."""
def __init__(self, *args):
super().__init__(*args)
self.vtkfiletype = VtkImageData
"""Image data for a per-cell geometry view."""
def prep_vtk(self):
"""Prepares data for writing to VTK file.
Returns:
vtk_data: dict of data and comments for VTK file.
"""
"""Prepares data for writing to VTKHDF file."""
# Sample solid array according to geometry view spatial discretisation
# Only create a new array if subsampling is required
@@ -225,8 +273,8 @@ class GeometryViewVoxels(GeometryView):
or (self.dx, self.dy, self.dz) != (1, 1, 1)
or (self.xs, self.ys, self.zs) != (0, 0, 0)
):
# Require contiguous for evtk library
solid = np.ascontiguousarray(
# Require contiguous array
self.material_data = np.ascontiguousarray(
self.grid.solid[
self.xs : self.xf : self.dx,
self.ys : self.yf : self.dy,
@@ -235,74 +283,64 @@ class GeometryViewVoxels(GeometryView):
)
else:
# This array is contiguous by design
solid = self.grid.solid
# Write information about any PMLs, sources, receivers
comments = Comments(self.grid, self)
info = comments.get_gprmax_info()
comments = json.dumps(info)
self.nbytes = solid.nbytes
vtk_data = {"data": solid, "comments": comments}
return vtk_data
def write_vtk(self, vtk_data):
"""Writes geometry information to a VTK file.
Args:
vtk_data: dict of data and comments for VTK file.
"""
self.material_data = self.grid.solid
if isinstance(self.grid, SubGridBaseGrid):
origin = (
self.origin = np.array(
[
(self.grid.i0 * self.grid.dx * self.grid.ratio),
(self.grid.j0 * self.grid.dy * self.grid.ratio),
(self.grid.k0 * self.grid.dz * self.grid.ratio),
]
)
else:
origin = ((self.xs * self.grid.dx), (self.ys * self.grid.dy), (self.zs * self.grid.dz))
self.origin = self.start * self.grid.dl
# Write the VTK file .vti
imageToVTK(
str(self.filename),
origin=origin,
spacing=((self.dx * self.grid.dx), (self.dy * self.grid.dy), (self.dz * self.grid.dz)),
cellData={"Material": vtk_data["data"]},
comments=[vtk_data["comments"]],
)
self.spacing = self.step * self.grid.dl
# Write information about any PMLs, sources, receivers
self.metadata = Metadata(self.grid, self)
self.nbytes = self.material_data.nbytes
def write_vtk(self):
"""Writes geometry information to a VTKHDF file."""
with VtkImageData(self.filename, self.size, self.origin, self.spacing) as f:
f.add_cell_data("Material", self.material_data)
self.metadata.write_to_vtkhdf(f)
class Comments:
class Metadata:
"""Comments can be strings included in the header of XML VTK file, and are
used to hold extra (gprMax) information about the VTK data.
"""
def __init__(self, grid, gv):
def __init__(
self,
grid: FDTDGrid,
gv: GeometryView,
averaged_materials: bool = False,
materials_only: bool = False,
):
self.grid = grid
self.gv = gv
self.averaged_materials = False
self.materials_only = False
self.averaged_materials = averaged_materials
self.materials_only = materials_only
def get_gprmax_info(self):
"""Returns gprMax specific information relating material, source,
and receiver names to numeric identifiers.
"""
self.gprmax_version = __version__
self.dx_dy_dz = self.grid.dl
self.nx_ny_nz = np.array([self.grid.nx, self.grid.ny, self.grid.nz], dtype=np.intc)
# Comments for Paraview macro
comments = {
"gprMax_version": __version__,
"dx_dy_dz": self.dx_dy_dz_comment(),
"nx_ny_nz": self.nx_ny_nz_comment(),
"Materials": self.materials_comment(),
} # Write the name and numeric ID for each material
self.materials = self.materials_comment()
# Write information on PMLs, sources, and receivers
if not self.materials_only:
# Information on PML thickness
if self.grid.pmls["slabs"]:
comments["PMLthickness"] = self.pml_gv_comment()
self.pml_thickness = self.pml_gv_comment()
else:
self.pml_thickness = None
srcs = (
self.grid.hertziandipoles
+ self.grid.magneticdipoles
@@ -310,53 +348,89 @@ class Comments:
+ self.grid.transmissionlines
)
if srcs:
comments["Sources"] = self.srcs_rx_gv_comment(srcs)
self.source_ids, self.source_positions = self.srcs_rx_gv_comment(srcs)
else:
self.source_ids = None
self.source_positions = None
if self.grid.rxs:
comments["Receivers"] = self.srcs_rx_gv_comment(self.grid.rxs)
self.receiver_ids, self.receiver_positions = self.srcs_rx_gv_comment(self.grid.rxs)
else:
self.receiver_ids = None
self.receiver_positions = None
return comments
def write_to_vtkhdf(self, file_handler: VtkHdfFile):
file_handler.add_field_data(
"gprMax_version",
self.gprmax_version,
dtype=h5py.string_dtype(),
)
file_handler.add_field_data("dx_dy_dz", self.dx_dy_dz)
file_handler.add_field_data("nx_ny_nz", self.nx_ny_nz)
def pml_gv_comment(self):
file_handler.add_field_data(
"material_ids",
self.materials,
dtype=h5py.string_dtype(),
)
if not self.materials_only:
if self.pml_thickness is not None:
file_handler.add_field_data("pml_thickness", self.pml_thickness)
if self.source_ids is not None and self.source_positions is not None:
file_handler.add_field_data(
"source_ids", self.source_ids, dtype=h5py.string_dtype()
)
file_handler.add_field_data("sources", self.source_positions)
if self.receiver_ids is not None and self.receiver_positions is not None:
file_handler.add_field_data(
"receiver_ids", self.receiver_ids, dtype=h5py.string_dtype()
)
file_handler.add_field_data("receivers", self.receiver_positions)
def pml_gv_comment(self) -> List[int]:
grid = self.grid
# Only render PMLs if they are in the geometry view
pmlstorender = dict.fromkeys(grid.pmls["thickness"], 0)
thickness: Dict[str, int] = grid.pmls["thickness"]
gv_pml_depth = dict.fromkeys(thickness, 0)
# Casting to int required as json does not handle numpy types
if grid.pmls["thickness"]["x0"] - self.gv.xs > 0:
pmlstorender["x0"] = int(grid.pmls["thickness"]["x0"] - self.gv.xs)
if grid.pmls["thickness"]["y0"] - self.gv.ys > 0:
pmlstorender["y0"] = int(grid.pmls["thickness"]["y0"] - self.gv.ys)
if grid.pmls["thickness"]["z0"] - self.gv.zs > 0:
pmlstorender["z0"] = int(grid.pmls["thickness"]["z0"] - self.gv.zs)
if self.gv.xf > grid.nx - grid.pmls["thickness"]["xmax"]:
pmlstorender["xmax"] = int(self.gv.xf - (grid.nx - grid.pmls["thickness"]["xmax"]))
if self.gv.yf > grid.ny - grid.pmls["thickness"]["ymax"]:
pmlstorender["ymax"] = int(self.gv.yf - (grid.ny - grid.pmls["thickness"]["ymax"]))
if self.gv.zf > grid.nz - grid.pmls["thickness"]["zmax"]:
pmlstorender["zmax"] = int(self.gv.zf - (grid.nz - grid.pmls["thickness"]["zmax"]))
if self.gv.xs < thickness["x0"]:
gv_pml_depth["x0"] = thickness["x0"] - self.gv.xs
if self.gv.ys < thickness["y0"]:
gv_pml_depth["y0"] = thickness["y0"] - self.gv.ys
if thickness["z0"] - self.gv.zs > 0:
gv_pml_depth["z0"] = thickness["z0"] - self.gv.zs
if self.gv.xf > grid.nx - thickness["xmax"]:
gv_pml_depth["xmax"] = self.gv.xf - (grid.nx - thickness["xmax"])
if self.gv.yf > grid.ny - thickness["ymax"]:
gv_pml_depth["ymax"] = self.gv.yf - (grid.ny - thickness["ymax"])
if self.gv.zf > grid.nz - thickness["zmax"]:
gv_pml_depth["zmax"] = self.gv.zf - (grid.nz - thickness["zmax"])
return list(pmlstorender.values())
return list(gv_pml_depth.values())
def srcs_rx_gv_comment(self, srcs):
def srcs_rx_gv_comment(
self, srcs: Union[Sequence[Source], List[Rx]]
) -> Tuple[List[str], npt.NDArray[np.float32]]:
"""Used to name sources and/or receivers."""
sc = []
for src in srcs:
p = (src.xcoord * self.grid.dx, src.ycoord * self.grid.dy, src.zcoord * self.grid.dz)
p = list(map(float, p))
names: List[str] = []
positions: npt.NDArray[np.float32] = np.empty((len(srcs), 3))
for index, src in enumerate(srcs):
position = src.coord * self.grid.dl
names.append(src.ID)
positions[index] = position
s = {"name": src.ID, "position": p}
sc.append(s)
return names, positions
return sc
def dx_dy_dz_comment(self) -> npt.NDArray[np.float64]:
return self.grid.dl
def dx_dy_dz_comment(self):
return list(map(float, [self.grid.dx, self.grid.dy, self.grid.dz]))
def nx_ny_nz_comment(self) -> npt.NDArray[np.intc]:
return np.array([self.grid.nx, self.grid.ny, self.grid.nz], dtype=np.intc)
def nx_ny_nz_comment(self):
return list(map(int, [self.grid.nx, self.grid.ny, self.grid.nz]))
def materials_comment(self):
def materials_comment(self) -> List[str]:
if not self.averaged_materials:
return [m.ID for m in self.grid.materials if m.type != "dielectric-smoothed"]
else:

查看文件

@@ -58,21 +58,21 @@ class FDTDGrid:
self.nx = 0
self.ny = 0
self.nz = 0
self.dl = np.ones(3, dtype=float)
self.dl = np.ones(3, dtype=np.float64)
self.dt = 0.0
# Field Arrays
self.Ex: npt.NDArray[np.single]
self.Ey: npt.NDArray[np.single]
self.Ez: npt.NDArray[np.single]
self.Hx: npt.NDArray[np.single]
self.Hy: npt.NDArray[np.single]
self.Hz: npt.NDArray[np.single]
self.Ex: npt.NDArray[np.float32]
self.Ey: npt.NDArray[np.float32]
self.Ez: npt.NDArray[np.float32]
self.Hx: npt.NDArray[np.float32]
self.Hy: npt.NDArray[np.float32]
self.Hz: npt.NDArray[np.float32]
# Dispersive Arrays
self.Tx: npt.NDArray[np.single]
self.Ty: npt.NDArray[np.single]
self.Tz: npt.NDArray[np.single]
self.Tx: npt.NDArray[np.float32]
self.Ty: npt.NDArray[np.float32]
self.Tz: npt.NDArray[np.float32]
# Geometry Arrays
self.solid: npt.NDArray[np.uint32]
@@ -81,9 +81,9 @@ class FDTDGrid:
self.ID: npt.NDArray[np.uint32]
# Update Coefficient Arrays
self.updatecoeffsE: npt.NDArray
self.updatecoeffsH: npt.NDArray
self.updatecoeffsdispersive: npt.NDArray
self.updatecoeffsE: npt.NDArray[np.float32]
self.updatecoeffsH: npt.NDArray[np.float32]
self.updatecoeffsdispersive: npt.NDArray[np.float32]
# PML parameters - set some defaults to use if not user provided
self.pmls = {}

查看文件

@@ -30,7 +30,7 @@ class Material:
their properties and update coefficients.
"""
def __init__(self, numID, ID):
def __init__(self, numID: int, ID: str):
"""
Args:
numID: int for numeric I of the material.

查看文件

@@ -23,6 +23,7 @@ from typing import List, Sequence
import humanize
import numpy as np
import numpy.typing as npt
import psutil
from colorama import Fore, Style, init
@@ -125,11 +126,11 @@ class Model:
self.G.dl[2] = value
@property
def dl(self) -> np.ndarray:
def dl(self) -> npt.NDArray[np.float64]:
return self.G.dl
@dl.setter
def dl(self, value: np.ndarray):
def dl(self, value: npt.NDArray[np.float64]):
self.G.dl = value
@property
@@ -164,7 +165,10 @@ class Model:
self.p = psutil.Process()
# Normal model reading/building process; bypassed if geometry information to be reused
self.reuse_geometry() if config.get_model_config().reuse_geometry() else self.build_geometry()
if config.get_model_config().reuse_geometry():
self.reuse_geometry()
else:
self.build_geometry()
logger.info(
f"Output directory: {config.get_model_config().output_file_path.parent.resolve()}\n"

查看文件

@@ -110,7 +110,7 @@ class Snapshot:
filename: str,
fileext: str,
outputs: Dict[str, bool],
grid_dl: npt.NDArray[np.single],
grid_dl: npt.NDArray[np.float32],
grid_dt: float,
):
"""
@@ -358,7 +358,7 @@ class MPISnapshot(Snapshot):
filename: str,
fileext: str,
outputs: Dict[str, bool],
grid_dl: npt.NDArray[np.single],
grid_dl: npt.NDArray[np.float32],
grid_dt: float,
):
super().__init__(

查看文件

@@ -0,0 +1,184 @@
from os import PathLike
from typing import Literal, Optional, Union
import numpy as np
import numpy.typing as npt
from mpi4py import MPI
from gprMax.vtkhdf_filehandlers.vtkhdf import VtkHdfFile
class VtkImageData(VtkHdfFile):
"""File handler for creating a VTKHDF Image Data file.
File format information is available here:
https://docs.vtk.org/en/latest/design_documents/VTKFileFormats.html#image-data
"""
DIRECTION_ATTR = "Direction"
ORIGIN_ATTR = "Origin"
SPACING_ATTR = "Spacing"
WHOLE_EXTENT_ATTR = "WholeExtent"
DIMENSIONS = 3
@property
def TYPE(self) -> Literal["ImageData"]:
return "ImageData"
def __init__(
self,
filename: Union[str, PathLike],
shape: npt.NDArray[np.intc],
origin: Optional[npt.NDArray[np.float32]] = None,
spacing: Optional[npt.NDArray[np.float32]] = None,
direction: Optional[npt.NDArray[np.float32]] = None,
comm: Optional[MPI.Comm] = None,
):
"""Create a new VtkImageData file.
If the file already exists, it will be overriden. Required
attributes (Type and Version) will be written to the file.
The file will be opened using the 'mpio' h5py driver if an MPI
communicator is provided.
Args:
filename: Name of the file (can be a file path). The file
extension will be set to '.vtkhdf'.
shape: Shape of the image data to be stored in the file.
This specifies the number of cells. Image data can be
1D, 2D, or 3D.
origin (optional): Origin of the image data. Default
[0, 0, 0].
spacing (optional): Discritisation of the image data.
Default [1, 1, 1].
direction (optional): Array of direction vectors for each
dimension of the image data. Can be a flattened array.
I.e. [[1, 0, 0], [0, 1, 0], [0, 0, 1]] and
[1, 0, 0, 0, 1, 0, 0, 0, 1] are equivalent. Default
[[1, 0, 0], [0, 1, 0], [0, 0, 1]].
comm (optional): MPI communicator containing all ranks that
want to write to the file.
"""
super().__init__(filename, comm)
if len(shape) == 0:
raise ValueError(f"Shape must not be empty.")
if len(shape) > self.DIMENSIONS:
raise ValueError(f"Shape must not have more than {self.DIMENSIONS} dimensions.")
elif len(shape) < self.DIMENSIONS:
shape = np.concatenate((shape, np.ones(self.DIMENSIONS - len(shape), dtype=np.intc)))
self.shape = shape
whole_extent = np.zeros(2 * self.DIMENSIONS, dtype=np.intc)
whole_extent[1::2] = self.shape
self._set_root_attribute(self.WHOLE_EXTENT_ATTR, whole_extent)
if origin is None:
origin = np.zeros(self.DIMENSIONS, dtype=np.float32)
self.set_origin(origin)
if spacing is None:
spacing = np.ones(self.DIMENSIONS, dtype=np.float32)
self.set_spacing(spacing)
if direction is None:
direction = np.diag(np.ones(self.DIMENSIONS, dtype=np.float32))
self.set_direction(direction)
@property
def whole_extent(self) -> npt.NDArray[np.intc]:
return self._get_root_attribute(self.WHOLE_EXTENT_ATTR)
@property
def origin(self) -> npt.NDArray[np.float32]:
return self._get_root_attribute(self.ORIGIN_ATTR)
@property
def spacing(self) -> npt.NDArray[np.float32]:
return self._get_root_attribute(self.SPACING_ATTR)
@property
def direction(self) -> npt.NDArray[np.float32]:
return self._get_root_attribute(self.DIRECTION_ATTR)
def set_origin(self, origin: npt.NDArray[np.float32]):
"""Set the origin coordinate of the image data.
Args:
origin: x, y, z coordinates to set as the origin.
"""
if len(origin) != self.DIMENSIONS:
raise ValueError(f"Origin attribute must have {self.DIMENSIONS} dimensions.")
self._set_root_attribute(self.ORIGIN_ATTR, origin)
def set_spacing(self, spacing: npt.NDArray[np.float32]):
"""Set the discritisation of the image data.
Args:
spacing: Discritisation of the x, y, and z dimensions.
"""
if len(spacing) != self.DIMENSIONS:
raise ValueError(f"Spacing attribute must have {self.DIMENSIONS} dimensions.")
self._set_root_attribute(self.SPACING_ATTR, spacing)
def set_direction(self, direction: npt.NDArray[np.float32]):
"""Set the coordinate system of the image data.
Args:
direction: Array of direction vectors for each dimension of
the image data. Can be a flattened array. I.e.
[[1, 0, 0], [0, 1, 0], [0, 0, 1]] and
[1, 0, 0, 0, 1, 0, 0, 0, 1] are equivalent.
"""
direction = direction.flatten()
if len(direction) != self.DIMENSIONS * self.DIMENSIONS:
raise ValueError(
f"Direction array must contain {self.DIMENSIONS * self.DIMENSIONS} elements."
)
self._set_root_attribute(self.DIRECTION_ATTR, direction)
def add_point_data(
self, name: str, data: npt.NDArray, offset: Optional[npt.NDArray[np.intc]] = None
):
"""Add point data to the VTKHDF file.
Args:
name: Name of the dataset.
data: Data to be saved.
offset (optional): Offset to store the provided data at. Can
be omitted if data provides the full dataset.
Raises:
ValueError: Raised if data has invalid dimensions.
"""
points_shape = self.shape + 1
if offset is None and any(data.shape != points_shape): # type: ignore
raise ValueError(
"If no offset is specified, data.shape must be one larger in each dimension than"
f" this vtkImageData object. {data.shape} != {points_shape}"
)
return super().add_point_data(name, data, points_shape, offset)
def add_cell_data(
self, name: str, data: npt.NDArray, offset: Optional[npt.NDArray[np.intc]] = None
):
"""Add cell data to the VTKHDF file.
Args:
name: Name of the dataset.
data: Data to be saved.
offset (optional): Offset to store the provided data at. Can
be omitted if data provides the full dataset.
Raises:
ValueError: Raised if data has invalid dimensions.
"""
if offset is None and any(data.shape != self.shape): # type: ignore
raise ValueError(
"If no offset is specified, data.shape must match the dimensions of this"
f" VtkImageData object. {data.shape} != {self.shape}"
)
return super().add_cell_data(name, data, self.shape, offset)

查看文件

@@ -0,0 +1,172 @@
import logging
from os import PathLike
from typing import Literal, Optional, Union
import numpy as np
import numpy.typing as npt
from mpi4py import MPI
from gprMax.vtkhdf_filehandlers.vtkhdf import VtkCellType, VtkHdfFile
logger = logging.getLogger(__name__)
class VtkUnstructuredGrid(VtkHdfFile):
"""File handler for creating a VTKHDF Unstructured Grid file.
File format information is available here:
https://docs.vtk.org/en/latest/design_documents/VTKFileFormats.html#unstructured-grid
"""
class Dataset(VtkHdfFile.Dataset):
CONNECTIVITY = "Connectivity"
NUMBER_OF_CELLS = "NumberOfCells"
NUMBER_OF_CONNECTIVITY_IDS = "NumberOfConnectivityIds"
NUMBER_OF_POINTS = "NumberOfPoints"
OFFSETS = "Offsets"
POINTS = "Points"
TYPES = "Types"
@property
def TYPE(self) -> Literal["UnstructuredGrid"]:
return "UnstructuredGrid"
def __init__(
self,
filename: Union[str, PathLike],
points: npt.NDArray,
cell_types: npt.NDArray[VtkCellType],
connectivity: npt.NDArray,
cell_offsets: npt.NDArray,
comm: Optional[MPI.Comm] = None,
) -> None:
"""Create a new VtkUnstructuredGrid file.
An unstructured grid has N points and C cells. A cell is defined
as a collection of points which is specified by the connectivity
and cell_offsets arguments along with the list of cell_types.
If the file already exists, it will be overriden. Required
attributes (Type and Version) will be written to the file.
The file will be opened using the 'mpio' h5py driver if an MPI
communicator is provided.
Args:
filename: Name of the file (can be a file path). The file
extension will be set to '.vtkhdf'.
points: Array of point coordinates of shape (N, 3).
cell_types: Array of VTK cell types of shape (C,).
connectivity: Array of point IDs that together with
cell_offsets, defines the points that make up each cell.
Each point ID has a value between 0 and (N-1) inclusive
and corresponds to a point in the points array.
cell_offsets: Array listing where each cell starts and ends
in the connectivity array. It has shape (C + 1,).
comm (optional): MPI communicator containing all ranks that
want to write to the file.
Raises:
Value Error: Raised if argument dimensions are invalid.
"""
super().__init__(filename, comm)
if len(cell_offsets) != len(cell_types) + 1:
raise ValueError(
"cell_offsets should be one longer than cell_types."
" I.e. one longer than the number of cells"
)
is_sorted = lambda a: np.all(a[:-1] <= a[1:])
if not is_sorted(cell_offsets):
raise ValueError("cell_offsets should be sorted in ascending order")
if len(connectivity) < cell_offsets[-1]:
raise ValueError("Connectivity array is shorter than final cell_offsets value")
elif len(connectivity) > cell_offsets[-1]:
raise logger.warning(
"Connectivity array longer than final cell_offsets value."
" Some connectivity data will be ignored"
)
self._write_root_dataset(self.Dataset.CONNECTIVITY, connectivity)
self._write_root_dataset(self.Dataset.NUMBER_OF_CELLS, len(cell_types))
self._write_root_dataset(self.Dataset.NUMBER_OF_CONNECTIVITY_IDS, len(connectivity))
self._write_root_dataset(self.Dataset.NUMBER_OF_POINTS, len(points))
self._write_root_dataset(self.Dataset.OFFSETS, cell_offsets)
self._write_root_dataset(self.Dataset.POINTS, points, xyz_data_ordering=False)
self._write_root_dataset(self.Dataset.TYPES, cell_types)
@property
def number_of_cells(self) -> int:
number_of_cells = self._get_root_dataset(self.Dataset.NUMBER_OF_CELLS)
return np.sum(number_of_cells, dtype=np.intc)
@property
def number_of_connectivity_ids(self) -> int:
number_of_connectivity_ids = self._get_root_dataset(self.Dataset.NUMBER_OF_CONNECTIVITY_IDS)
return np.sum(number_of_connectivity_ids, dtype=np.intc)
@property
def number_of_points(self) -> int:
number_of_points = self._get_root_dataset(self.Dataset.NUMBER_OF_POINTS)
return np.sum(number_of_points, dtype=np.intc)
def add_point_data(
self, name: str, data: npt.NDArray, offset: Optional[npt.NDArray[np.intc]] = None
):
"""Add point data to the VTKHDF file.
Args:
name: Name of the dataset.
data: Data to be saved.
offset (optional): Offset to store the provided data at. Can
be omitted if data provides the full dataset.
Raises:
ValueError: Raised if data has invalid dimensions.
"""
shape = np.array(data.shape)
number_of_dimensions = len(shape)
if number_of_dimensions < 1 or number_of_dimensions > 2:
raise ValueError(f"Data must have 1 or 2 dimensions, not {number_of_dimensions}")
elif len(data) != self.number_of_points:
raise ValueError(
"Length of data must match the number of points in the vtkUnstructuredGrid."
f" {len(data)} != {self.number_of_points}"
)
elif number_of_dimensions == 2 and shape[1] != 1 and shape[1] != 3:
raise ValueError(f"The second dimension should have shape 1 or 3, not {shape[1]}")
return super().add_point_data(name, data, shape, offset)
def add_cell_data(
self, name: str, data: npt.NDArray, offset: Optional[npt.NDArray[np.intc]] = None
):
"""Add cell data to the VTKHDF file.
Args:
name: Name of the dataset.
data: Data to be saved.
offset (optional): Offset to store the provided data at. Can
be omitted if data provides the full dataset.
Raises:
ValueError: Raised if data has invalid dimensions.
"""
shape = np.array(data.shape)
number_of_dimensions = len(shape)
if number_of_dimensions < 1 or number_of_dimensions > 2:
raise ValueError(f"Data must have 1 or 2 dimensions, not {number_of_dimensions}.")
elif len(data) != self.number_of_cells:
raise ValueError(
"Length of data must match the number of cells in the vtkUnstructuredGrid."
f" {len(data)} != {self.number_of_cells}"
)
elif number_of_dimensions == 2 and shape[1] != 1 and shape[1] != 3:
raise ValueError(f"The second dimension should have shape 1 or 3, not {shape[1]}")
return super().add_cell_data(name, data, shape, offset)

查看文件

@@ -0,0 +1,386 @@
import logging
from abc import abstractmethod
from contextlib import AbstractContextManager
from enum import Enum
from os import PathLike
from pathlib import Path
from types import TracebackType
from typing import Optional, Union
import h5py
import numpy as np
import numpy.typing as npt
from mpi4py import MPI
logger = logging.getLogger(__name__)
class VtkHdfFile(AbstractContextManager):
VERSION = [2, 2]
FILE_EXTENSION = ".vtkhdf"
ROOT_GROUP = "VTKHDF"
# TODO: Can this be moved to using an Enum like root datasets?
# Main barrier: Can't subclass an enum with members and any base
# Enum class would need VERSION and TYPE as members.
VERSION_ATTR = "Version"
TYPE_ATTR = "Type"
class Dataset(str, Enum):
pass
@property
@abstractmethod
def TYPE(self) -> str:
pass
def __enter__(self):
return self
def __init__(self, filename: Union[str, PathLike], comm: Optional[MPI.Comm] = None) -> None:
"""Create a new VtkHdfFile.
If the file already exists, it will be overriden. Required
attributes (Type and Version) will be written to the file.
The file will be opened using the 'mpio' h5py driver if an MPI
communicator is provided.
Args:
filename: Name of the file (can be a file path). The file
extension will be set to '.vtkhdf'.
comm (optional): MPI communicator containing all ranks that
want to write to the file.
"""
# Ensure the filename uses the correct extension
self.filename = Path(filename)
if self.filename.suffix != "" and self.filename.suffix != self.FILE_EXTENSION:
logger.warning(
f"Invalid file extension '{self.filename.suffix}' for VTKHDF file. Changing to '{self.FILE_EXTENSION}'."
)
self.filename = self.filename.with_suffix(self.FILE_EXTENSION)
self.comm = comm
# Check if the filehandler should use an MPI driver
if self.comm is None:
self.file_handler = h5py.File(self.filename, "w")
else:
self.file_handler = h5py.File(self.filename, "w", driver="mpio", comm=self.comm)
self.root_group = self.file_handler.create_group(self.ROOT_GROUP)
# Set required Version and Type root attributes
self._set_root_attribute(self.VERSION_ATTR, self.VERSION)
type_as_ascii = self.TYPE.encode("ascii")
self._set_root_attribute(
self.TYPE_ATTR, type_as_ascii, h5py.string_dtype("ascii", len(type_as_ascii))
)
def __exit__(
self,
exc_type: Optional[type[BaseException]],
exc_value: Optional[BaseException],
traceback: Optional[TracebackType],
) -> Optional[bool]:
"""Close the file when the context is exited.
The parameters describe the exception that caused the context to
be exited. If the context was exited without an exception, all
three arguments will be None. Any exception will be
processed normally upon exit from this method.
Returns:
suppress_exception (optional): Returns True if the exception
should be suppressed (i.e. not propagated). Otherwise,
the exception will be processed normally upon exit from
this method.
"""
self.close()
def close(self) -> None:
"""Close the file handler"""
self.file_handler.close()
def _get_root_attribute(self, attribute: str) -> npt.NDArray:
"""Get attribute from the root VTKHDF group if it exists.
Args:
attribute: Name of the attribute.
Returns:
value: Current value of the attribute if it exists.
Raises:
KeyError: Raised if the attribute is not present as a key.
"""
value = self.root_group.attrs[attribute]
if isinstance(value, h5py.Empty):
raise KeyError(f"Attribute '{attribute}' not present in /{self.ROOT_GROUP} group")
return value
def _set_root_attribute(
self, attribute: str, value: npt.ArrayLike, dtype: npt.DTypeLike = None
):
"""Set attribute in the root VTKHDF group.
Args:
attribute: Name of the new attribute.
value: An array to initialize the attribute.
dtype (optional): Data type of the attribute. Overrides
value.dtype if both are given.
"""
self.root_group.attrs.create(attribute, value, dtype=dtype)
def _build_dataset_path(self, *path: str) -> str:
"""Build an HDF5 dataset path attached to the root VTKHDF group.
Args:
*path: Components of the required path.
Returns:
path: Path to the dataset.
"""
return "/".join([self.ROOT_GROUP, *path])
def _get_root_dataset(self, name: "VtkHdfFile.Dataset") -> h5py.Dataset:
"""Get specified dataset from the root group of the VTKHDF file.
Args:
path: Name of the dataset.
Returns:
dataset: Returns specified h5py dataset.
"""
path = self._build_dataset_path(name)
return self._get_dataset(path)
def _get_dataset(self, path: str) -> h5py.Dataset:
"""Get specified dataset.
Args:
path: Absolute path to the dataset.
Returns:
dataset: Returns specified h5py dataset.
Raises:
KeyError: Raised if the dataset does not exist, or the path
points to some other object, e.g. a Group not a Dataset.
"""
cls = self.file_handler.get(path, getclass=True)
if cls == "default":
raise KeyError("Path does not exist")
elif cls != h5py.Dataset:
raise KeyError(f"Dataset not found. Found '{cls}' instead")
dataset = self.file_handler.get(path)
assert isinstance(dataset, h5py.Dataset)
return dataset
def _write_root_dataset(
self,
name: "VtkHdfFile.Dataset",
data: npt.ArrayLike,
shape: Optional[npt.NDArray[np.intc]] = None,
offset: Optional[npt.NDArray[np.intc]] = None,
xyz_data_ordering=True,
):
"""Write specified dataset to the root group of the VTKHDF file.
Args:
name: Name of the dataset.
data: Data to initialize the dataset.
shape (optional): Size of the full dataset being created.
Can be omitted if data provides the full dataset.
offset (optional): Offset to store the provided data at. Can
be omitted if data provides the full dataset.
xyz_data_ordering (optional): If True, the data will be
transposed as VTKHDF stores datasets using ZYX ordering.
Default True.
"""
path = self._build_dataset_path(name)
self._write_dataset(
path, data, shape=shape, offset=offset, xyz_data_ordering=xyz_data_ordering
)
def _write_dataset(
self,
path: str,
data: npt.ArrayLike,
shape: Optional[npt.NDArray[np.intc]] = None,
offset: Optional[npt.NDArray[np.intc]] = None,
dtype: Optional[npt.DTypeLike] = None,
xyz_data_ordering=True,
):
"""Write specified dataset to the VTKHDF file.
If data has shape (d1, d2, ..., dn), i.e. n dimensions, then, if
specified, shape and offset must be of length n.
Args:
path: Absolute path to the dataset.
data: Data to initialize the dataset.
shape (optional): Size of the full dataset being created.
Can be omitted if data provides the full dataset.
offset (optional): Offset to store the provided data at. Can
be omitted if data provides the full dataset.
xyz_data_ordering (optional): If True, the data will be
transposed as VTKHDF stores datasets using ZYX ordering.
Default True.
Raises:
ValueError: Raised if the combination of data.shape, shape,
and offset are invalid.
"""
# If dtype is a string, ensure it is ascii encoded and use
# variable length strings
if dtype is not None and h5py.check_string_dtype(dtype) is not None:
dtype = h5py.string_dtype(encoding="ascii", length=None)
if not isinstance(data, np.ndarray):
data = np.array(data, dtype=dtype)
if data.ndim < 1:
data = np.expand_dims(data, axis=-1)
if dtype is None:
dtype = data.dtype
# VTKHDF stores datasets using ZYX ordering rather than XYZ
if xyz_data_ordering:
data = data.transpose()
if shape is not None:
shape = np.flip(shape)
if offset is not None:
offset = np.flip(offset)
if shape is None or all(shape == data.shape):
self.file_handler.create_dataset(path, data=data, dtype=dtype)
elif offset is None:
raise ValueError(
"Offset must not be None as the full dataset has not been provided."
" I.e. data.shape != shape"
)
else:
dimensions = len(data.shape)
if dimensions != len(shape):
raise ValueError(
"The data and specified shape must have the same number of dimensions."
f" {dimensions} != {len(shape)}"
)
if dimensions != len(offset):
raise ValueError(
"The data and specified offset must have the same number of dimensions."
f" {dimensions} != {len(offset)}"
)
if any(offset + data.shape > shape):
raise ValueError(
"The provided offset and data does not fit within the bounds of the dataset."
f" {offset} + {data.shape} = {offset + data.shape} > {shape}"
)
dataset = self.file_handler.create_dataset(path, shape=shape, dtype=dtype)
start = offset
stop = offset + data.shape
dataset_slice = (slice(start[i], stop[i]) for i in range(dimensions))
dataset[dataset_slice] = data
def add_point_data(
self,
name: str,
data: npt.NDArray,
shape: Optional[npt.NDArray[np.intc]] = None,
offset: Optional[npt.NDArray[np.intc]] = None,
):
"""Add point data to the VTKHDF file.
Args:
name: Name of the dataset.
data: Data to be saved.
shape (optional): Size of the full dataset being created.
Can be omitted if data provides the full dataset.
offset (optional): Offset to store the provided data at. Can
be omitted if data provides the full dataset.
"""
dataset_path = self._build_dataset_path("PointData", name)
self._write_dataset(dataset_path, data, shape=shape, offset=offset)
def add_cell_data(
self,
name: str,
data: npt.NDArray,
shape: Optional[npt.NDArray[np.intc]] = None,
offset: Optional[npt.NDArray[np.intc]] = None,
):
"""Add cell data to the VTKHDF file.
Args:
name: Name of the dataset.
data: Data to be saved.
shape (optional): Size of the full dataset being created.
Can be omitted if data provides the full dataset.
offset (optional): Offset to store the provided data at. Can
be omitted if data provides the full dataset.
"""
dataset_path = self._build_dataset_path("CellData", name)
self._write_dataset(dataset_path, data, shape=shape, offset=offset)
def add_field_data(
self,
name: str,
data: npt.ArrayLike,
shape: Optional[npt.NDArray[np.intc]] = None,
offset: Optional[npt.NDArray[np.intc]] = None,
dtype: Optional[npt.DTypeLike] = None,
):
"""Add field data to the VTKHDF file.
Args:
name: Name of the dataset.
data: Data to be saved.
shape (optional): Size of the full dataset being created.
Can be omitted if data provides the full dataset.
offset (optional): Offset to store the provided data at. Can
be omitted if data provides the full dataset.
"""
dataset_path = self._build_dataset_path("FieldData", name)
self._write_dataset(
dataset_path, data, shape=shape, offset=offset, dtype=dtype, xyz_data_ordering=False
)
class VtkCellType(np.uint8, Enum):
"""VTK cell types as defined here:
https://vtk.org/doc/nightly/html/vtkCellType_8h_source.html#l00019
"""
# Linear cells
EMPTY_CELL = 0
VERTEX = 1
POLY_VERTEX = 2
LINE = 3
POLY_LINE = 4
TRIANGLE = 5
TRIANGLE_STRIP = 6
POLYGON = 7
PIXEL = 8
QUAD = 9
TETRA = 10
VOXEL = 11
HEXAHEDRON = 12
WEDGE = 13
PYRAMID = 14
PENTAGONAL_PRISM = 15
HEXAGONAL_PRISM = 16

查看文件

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:4a8134fca3e6228211e4a4bc3e504e64f888c55f22e025b482ec501e0051d253
size 75740296

查看文件

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:2ccd95ff4ef73c224c99a8c321ad6f8014c673a1b33bbb741bff415dfbffd0aa
size 4931176

查看文件

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:82b8ac914a9fe20b49f2512215782f01305a2e3211aaac51913f88296a0e3ac5
size 4012444

查看文件

@@ -182,7 +182,7 @@ class GprMaxBaseTest(RunOnlyRegressionTest):
else:
return self.getdep(variant)
def build_reference_filepath(self, name: Union[str, os.PathLike]) -> Path:
def build_reference_filepath(self, name: Union[str, os.PathLike], suffix: str = ".h5") -> Path:
"""Build path to the specified reference file.
Reference files are saved in directories per test case. If this
@@ -192,6 +192,7 @@ class GprMaxBaseTest(RunOnlyRegressionTest):
Args:
name: Name of the file.
suffix: File extension. Default ".h5".
Returns:
filepath: Absolute path to the reference file.
@@ -202,7 +203,7 @@ class GprMaxBaseTest(RunOnlyRegressionTest):
else:
reference_dir = target.short_name
reference_file = Path("regression_checks", reference_dir, name).with_suffix(".h5")
reference_file = Path("regression_checks", reference_dir, name).with_suffix(suffix)
return reference_file.absolute()
# TODO: Change CreatePyenvTest to a fixture instead of a test dependency

查看文件

@@ -9,6 +9,7 @@ from typing_extensions import TYPE_CHECKING
from reframe_tests.tests.base_tests import GprMaxBaseTest
from reframe_tests.tests.regression_checks import (
GeometryViewRegressionCheck,
ReceiverRegressionCheck,
RegressionCheck,
SnapshotRegressionCheck,
@@ -77,6 +78,50 @@ class SnapshotMixin(GprMaxMixin):
self.regression_checks.append(regression_check)
class GeometryOnlyMixin(GprMaxMixin):
"""Run test with geometry only flag"""
@run_after("setup")
def add_geometry_only_flag(self):
self.executable_opts += ["--geometry-only"]
class GeometryViewMixin(GprMaxMixin):
"""Add regression tests for geometry views.
Attributes:
geometry_views (list[str]): List of geometry views to run
regression checks on.
"""
geometry_views = variable(typ.List[str], value=[])
def build_geometry_view_filepath(self, geometry_view: str) -> Path:
"""Build filepath to the specified geometry view.
Args:
geometry_view: Name of the geometry view.
"""
return Path(geometry_view).with_suffix(".vtkhdf")
@run_after("setup")
def add_geometry_view_regression_checks(self):
"""Add a regression check for each geometry view.
The test will be skipped if no geometry views have been specified.
"""
self.skip_if(
len(self.geometry_views) < 0,
f"Must provide a list of geometry views.",
)
for geometry_view in self.geometry_views:
geometry_view_file = self.build_geometry_view_filepath(geometry_view)
reference_file = self.build_reference_filepath(geometry_view, ".vtkhdf")
regression_check = GeometryViewRegressionCheck(geometry_view_file, reference_file)
self.regression_checks.append(regression_check)
class PythonApiMixin(GprMaxMixin):
"""Use the GprMax Python API rather than a standard input file."""

查看文件

@@ -74,7 +74,12 @@ class RegressionCheck:
h5diff = "h5diff"
h5diff_output = osext.run_command(
[h5diff, *self.h5diff_options, str(self.output_file), str(self.reference_file)]
[
h5diff,
*self.h5diff_options,
str(self.output_file.absolute()),
str(self.reference_file),
]
)
return sn.assert_true(
@@ -135,3 +140,11 @@ class SnapshotRegressionCheck(RegressionCheck):
@property
def error_msg(self) -> str:
return f"Snapshot '{self.output_file.name}' failed regression check "
class GeometryViewRegressionCheck(RegressionCheck):
"""Run regression check on a GprMax GeometryView."""
@property
def error_msg(self) -> str:
return f"GeometryView '{self.output_file.name}' failed regression check "

查看文件

@@ -0,0 +1,17 @@
#title: Hertzian dipole in free-space
#domain: 0.100 0.100 0.100
#dx_dy_dz: 0.001 0.001 0.001
#time_window: 3e-9
#waveform: gaussiandot 1 1e9 myWave
#hertzian_dipole: z 0.050 0.050 0.050 myWave
#material: 4.9 0 1 0 myWater
#material: 2 0 1.4 0 unusedMaterial
#material: 3 0 2 0 boxMaterial
#sphere: 0.05 0.05 0.05 0.03 myWater y
#box: 0.01 0.01 0.01 0.09 0.025 0.025 boxMaterial y
#geometry_view: 0.02 0.02 0.02 0.06 0.06 0.06 0.001 0.001 0.001 partial_volume f
#geometry_view: 0 0 0 0.1 0.1 0.1 0.001 0.001 0.001 full_volume f

查看文件

@@ -0,0 +1,17 @@
#title: Hertzian dipole in free-space
#domain: 0.100 0.100 0.100
#dx_dy_dz: 0.001 0.001 0.001
#time_window: 3e-9
#waveform: gaussiandot 1 1e9 myWave
#hertzian_dipole: z 0.050 0.050 0.050 myWave
#material: 4.9 0 1 0 myWater
#material: 2 0 1.4 0 unusedMaterial
#material: 3 0 2 0 boxMaterial
#sphere: 0.05 0.05 0.05 0.03 myWater y
#box: 0.01 0.01 0.01 0.09 0.025 0.025 boxMaterial y
#geometry_view: 0.02 0.02 0.02 0.06 0.06 0.06 0.001 0.001 0.001 partial_volume n
#geometry_view: 0 0 0 0.1 0.1 0.1 0.001 0.001 0.001 full_volume n

查看文件

@@ -1,5 +1,10 @@
from reframe_tests.tests.base_tests import GprMaxBaseTest
from reframe_tests.tests.mixins import ReceiverMixin, SnapshotMixin
from reframe_tests.tests.mixins import (
GeometryOnlyMixin,
GeometryViewMixin,
ReceiverMixin,
SnapshotMixin,
)
class GprMaxRegressionTest(ReceiverMixin, GprMaxBaseTest):
@@ -8,3 +13,7 @@ class GprMaxRegressionTest(ReceiverMixin, GprMaxBaseTest):
class GprMaxSnapshotTest(SnapshotMixin, GprMaxBaseTest):
pass
class GprMaxGeometryViewTest(GeometryViewMixin, GeometryOnlyMixin, GprMaxBaseTest):
pass

查看文件

@@ -0,0 +1,20 @@
import reframe as rfm
from reframe.core.builtins import parameter
from reframe_tests.tests.mixins import MpiMixin
from reframe_tests.tests.standard_tests import GprMaxGeometryViewTest
@rfm.simple_test
class TestGeometryView(GprMaxGeometryViewTest):
tags = {"test", "serial", "geometry only", "geometry view"}
sourcesdir = "src/geometry_view_tests"
model = parameter(["geometry_view_voxel", "geometry_view_fine"])
geometry_views = ["partial_volume", "full_volume"]
@rfm.simple_test
class TestGeometryViewMPI(MpiMixin, TestGeometryView):
tags = {"test", "mpi", "geometry only", "geometry view"}
mpi_layout = parameter([[2, 2, 2], [4, 4, 1]])
test_dependency = TestGeometryView

查看文件

@@ -16,69 +16,103 @@
# You should have received a copy of the GNU General Public License
# along with gprMax. If not, see <http://www.gnu.org/licenses/>.
import json
import os
from paraview.servermanager import Proxy, SourceProxy
from paraview.simple import (
AppendDatasets,
Box,
ColorBy,
FetchData,
GetActiveSource,
GetActiveView,
GetDisplayProperties,
GetParaViewVersion,
Hide,
OpenDataFile,
RenameSource,
RenderAllViews,
SetActiveSource,
Show,
Threshold,
)
from paraview.vtk.numpy_interface import dataset_adapter as dsa
from vtkmodules.vtkCommonCore import vtkStringArray
COLOUR_SCALARS = ("CELLS", "Material")
def threshold_filt(input, lt, ut, scalars):
def threshold_materials(source: SourceProxy, view: Proxy, material_ids: vtkStringArray):
"""Create threshold filter according to Paraview version.
Args:
input (array): input data to threshold filter
lt, ut (int): lower and upper bounds of thresholding operation
scalars (list/str): name of scalar array to perform thresholding
Returns:
threshold (object): threshold filter
source: Input to the threshold filter.
view: The view proxy to show the threshold in.
material_ids: Array of material ids. A new threshold filter will
be created for each material.
"""
# Read Paraview version number to set threshold filter method
pvv = GetParaViewVersion()
threshold = Threshold(Input=input)
threshold.Scalars = scalars
for index in range(material_ids.GetNumberOfValues()):
threshold = Threshold(Input=source, Scalars=COLOUR_SCALARS)
if pvv.major == 5 and pvv.minor < 10:
threshold.ThresholdRange = [lt, ut]
threshold.ThresholdRange = [index, index]
else:
threshold.LowerThreshold = lt
threshold.UpperThreshold = ut
threshold.LowerThreshold = index
threshold.UpperThreshold = index
return threshold
RenameSource(material_ids.GetValue(index), threshold)
# Show data in view, except for free_space
if index != 1:
Show(threshold, view)
threshold.UpdatePipeline()
def display_pmls(pmlthick, dx_dy_dz, nx_ny_nz):
"""Display PMLs as box sources using PML thickness values.
Only suitable for gprMax >= v4
def create_box_sources(names: vtkStringArray, positions: dsa.VTKArray, dl: dsa.VTKArray):
"""Create new single cell box sources.
Args:
pmlthick (tuple): PML thickness values for each slab (cells)
dx_dy_dz (tuple): Spatial resolution (m)
nx_ny_dz (tuple): Domain size (cells)
names: Array of N names for the new sources.
positions: x, y, z coordinates of the new sources. This should
have shape (N, 3).
dl: x, y, z spatial resolution.
"""
for index in range(names.GetNumberOfValues()):
name = names.GetValue(index)
pos = positions[index]
src = Box(
Center=pos + dl / 2,
XLength=dl[0],
YLength=dl[1],
ZLength=dl[2],
)
RenameSource(name, src)
Show(src)
def display_pmls(pmlthick: dsa.VTKArray, dx_dy_dz: dsa.VTKArray, nx_ny_nz: dsa.VTKArray):
"""Display PMLs as box sources using PML thickness values.
Only suitable for gprMax >= v4.
Args:
pmlthick: PML thickness values for each slab (cells).
dx_dy_dz: Spatial resolution (m).
nx_ny_dz: Domain size (cells).
"""
pml_names = ["x0", "y0", "z0", "xmax", "ymax", "zmax"]
pmls = dict.fromkeys(pml_names, None)
SetActiveSource(pv_data)
if pmlthick[0] != 0:
x0 = Box(
Center=[pmlthick[0] * dx_dy_dz[0] / 2, nx_ny_nz[1] * dx_dy_dz[1] / 2, nx_ny_nz[2] * dx_dy_dz[2] / 2],
Center=[
pmlthick[0] * dx_dy_dz[0] / 2,
nx_ny_nz[1] * dx_dy_dz[1] / 2,
nx_ny_nz[2] * dx_dy_dz[2] / 2,
],
XLength=pmlthick[0] * dx_dy_dz[0],
YLength=nx_ny_nz[1] * dx_dy_dz[1],
ZLength=nx_ny_nz[2] * dx_dy_dz[2],
@@ -100,7 +134,11 @@ def display_pmls(pmlthick, dx_dy_dz, nx_ny_nz):
if pmlthick[1] != 0:
y0 = Box(
Center=[nx_ny_nz[0] * dx_dy_dz[0] / 2, pmlthick[1] * dx_dy_dz[1] / 2, nx_ny_nz[2] * dx_dy_dz[2] / 2],
Center=[
nx_ny_nz[0] * dx_dy_dz[0] / 2,
pmlthick[1] * dx_dy_dz[1] / 2,
nx_ny_nz[2] * dx_dy_dz[2] / 2,
],
XLength=nx_ny_nz[0] * dx_dy_dz[0],
YLength=pmlthick[1] * dx_dy_dz[1],
ZLength=nx_ny_nz[2] * dx_dy_dz[2],
@@ -122,7 +160,11 @@ def display_pmls(pmlthick, dx_dy_dz, nx_ny_nz):
if pmlthick[2] != 0:
z0 = Box(
Center=[nx_ny_nz[0] * dx_dy_dz[0] / 2, nx_ny_nz[1] * dx_dy_dz[1] / 2, pmlthick[2] * dx_dy_dz[2] / 2],
Center=[
nx_ny_nz[0] * dx_dy_dz[0] / 2,
nx_ny_nz[1] * dx_dy_dz[1] / 2,
pmlthick[2] * dx_dy_dz[2] / 2,
],
XLength=nx_ny_nz[0] * dx_dy_dz[0],
YLength=nx_ny_nz[1] * dx_dy_dz[1],
ZLength=pmlthick[2] * dx_dy_dz[2],
@@ -158,96 +200,122 @@ def display_pmls(pmlthick, dx_dy_dz, nx_ny_nz):
pml_view.Opacity = 0.5
# Get whatever source is loaded - should be loaded file (.vt*) or files (.pvd)
pv_data = GetActiveSource()
def is_valid_key(key: str, dictionary: dict) -> bool:
"""Check key exists and is not an empty array (i.e. VTKNoneArray).
# Hide display of root data
Hide(pv_data)
Args:
key: Name of key to check.
dictionary: Dictionary like object that should contain the key.
# Single .vti or .vtu file
file = pv_data.FileName[0]
Returns:
is_valid_key: True if the key is in the dictionary and is not
of type VTKNoneArray. False otherwise.
"""
return key in dictionary.keys() and not isinstance(dictionary[key], dsa.VTKNoneArray)
# Read and display data from file, i.e. materials, sources, receivers, and PMLs
with open(file, "rb") as f:
# Comments () embedded in line 3 of file
f.readline()
f.readline()
c = f.readline().decode()
# Strip comment tags
c = c[5:-5]
# Model information
c = json.loads(c)
print("\ngprMax version: " + c["gprMax_version"])
print(file)
class HaltException(Exception):
pass
try:
print("=============== Running gprMax macro ===============", end="\n\n")
# Get active source - should be loaded file (.vtkhdf)
pv_source = GetActiveSource()
if pv_source is None:
raise HaltException("ERROR: No active source.")
pv_source.UpdatePipeline()
raw_data = FetchData(pv_source)[0]
data = dsa.WrapDataObject(raw_data)
# Check the necessary metadata is attached to the file
metadata_keys = data.FieldData.keys()
compulsory_keys = ["gprMax_version", "nx_ny_nz", "dx_dy_dz", "material_ids"]
missing_keys = []
for key in compulsory_keys:
if key not in metadata_keys:
missing_keys.append(key)
if len(missing_keys) > 0:
keys = "\n- ".join(missing_keys)
print(f"Missing metadata fields: \n- {keys}\n")
if len(metadata_keys) > 0:
keys = "\n- ".join(metadata_keys)
print(f"Found: \n- {keys}\n")
raise HaltException(
"ERROR: Missing gprMax metadata information. Do you have the correct source selected?"
)
# gprMax version
version = data.FieldData["gprMax_version"].GetValue(0)
# Number of voxels
nl = data.FieldData["nx_ny_nz"]
# Discretisation
dl = data.FieldData["dx_dy_dz"]
print("gprMax version:", data.FieldData["gprMax_version"].GetValue(0))
print("nx_ny_nz:", nl)
print("dx_dy_dz:", dl, end="\n\n")
################
# Display data #
################
# Get active view
pv_view = GetActiveView()
pv_view.AxesGrid.Visibility = 1 # Show Data Axes Grid
pv_disp = Show(pv_data, pv_view)
pv_disp.ColorArrayName = ["CELLS", "Material"]
# Discretisation
dl = c["dx_dy_dz"]
# Number of voxels
nl = c["nx_ny_nz"]
# Get display properties
pv_disp = GetDisplayProperties(pv_source, view=pv_view)
# Set scalar colouring
ColorBy(pv_disp, COLOUR_SCALARS)
# Materials
try:
for i, mat in enumerate(c["Materials"]):
threshold = threshold_filt(pv_data, i, i, ["CELLS", "Material"])
RenameSource(mat, threshold)
# Show data in view, except for free_space
if i != 1:
thresholddisplay = Show(threshold, pv_view)
thresholddisplay.ColorArrayName = ["CELLS", "Material"]
threshold.UpdatePipeline()
except KeyError:
print("No materials to load")
print("Loading materials... ", end="\t")
if is_valid_key("material_ids", data.FieldData):
threshold_materials(pv_source, pv_view, data.FieldData["material_ids"])
print("Done.")
else:
print("No materials to load.")
# Display any sources
try:
for item in c["Sources"]:
pos = item["position"]
name = item["name"]
src = Box(
Center=[pos[0] + dl[0] / 2, pos[1] + dl[1] / 2, pos[2] + dl[2] / 2],
XLength=dl[0],
YLength=dl[1],
ZLength=dl[2],
)
RenameSource(name, src)
Show(src)
except KeyError:
print("No sources to load")
print("Loading sources... ", end="\t\t")
if is_valid_key("source_ids", data.FieldData) and is_valid_key("sources", data.FieldData):
create_box_sources(data.FieldData["source_ids"], data.FieldData["sources"], dl)
print("Done.")
else:
print("No sources to load.")
# Display any receivers
try:
for item in c["Receivers"]:
pos = item["position"]
name = item["name"]
rx = Box(
Center=[pos[0] + dl[0] / 2, pos[1] + dl[1] / 2, pos[2] + dl[2] / 2],
XLength=dl[0],
YLength=dl[1],
ZLength=dl[2],
)
RenameSource(name, rx)
Show(rx)
except KeyError:
print("No receivers to load")
print("Loading receivers... ", end="\t")
if is_valid_key("receiver_ids", data.FieldData) and is_valid_key("receivers", data.FieldData):
create_box_sources(data.FieldData["receiver_ids"], data.FieldData["receivers"], dl)
print("Done.")
else:
print("No receivers to load.")
# Display any PMLs
try:
pt = c["PMLthickness"]
display_pmls(pt, dl, nl)
except KeyError:
print("No PMLs to load")
print("Loading PMLs... ", end="\t\t")
if is_valid_key("pml_thickness", data.FieldData):
display_pmls(data.FieldData["pml_thickness"], dl, nl)
print("Done.")
else:
print("No PMLs to load.")
RenderAllViews()
SetActiveSource(pv_source)
Hide(pv_source)
# Reset view to fit data
pv_view.ResetCamera()
except HaltException as e:
print(e)
print()