diff --git a/gprMax/__init__.py b/gprMax/__init__.py index 65153178..9010516f 100644 --- a/gprMax/__init__.py +++ b/gprMax/__init__.py @@ -11,7 +11,6 @@ from ._version import __version__ from .cmds_single_use import Discretisation from .cmds_single_use import Domain from .cmds_single_use import TimeWindow -from .cmds_single_use import Messages from .cmds_single_use import Title from .cmds_single_use import NumThreads from .cmds_single_use import TimeStepStabilityFactor diff --git a/gprMax/cmds_geometry/add_grass.py b/gprMax/cmds_geometry/add_grass.py index d36bfd23..41220bde 100644 --- a/gprMax/cmds_geometry/add_grass.py +++ b/gprMax/cmds_geometry/add_grass.py @@ -28,7 +28,7 @@ from ..fractals import Grass from ..materials import Material from ..utilities import round_value -log = logging.getLogger(__name__) +logger = logging.getLogger(__name__) class AddGrass(UserObjectGeometry): @@ -193,4 +193,4 @@ class AddGrass(UserObjectGeometry): volume.fractalsurfaces.append(surface) - log.info(f'{n_blades} blades of grass on surface from {xs * grid.dx:g}m, {ys * grid.dy:g}m, {zs * grid.dz:g}m, to {xf * grid.dx:g}m, {yf * grid.dy:g}m, {zf * grid.dz:g}m with fractal dimension {surface.dimension:g}, fractal seeding {surface.seed}, and range {limits[0]:g}m to {limits[1]:g}m, added to {surface.operatingonID}.') + logger.info(f'{n_blades} blades of grass on surface from {xs * grid.dx:g}m, {ys * grid.dy:g}m, {zs * grid.dz:g}m, to {xf * grid.dx:g}m, {yf * grid.dy:g}m, {zf * grid.dz:g}m with fractal dimension {surface.dimension:g}, fractal seeding {surface.seed}, and range {limits[0]:g}m to {limits[1]:g}m, added to {surface.operatingonID}.') diff --git a/gprMax/cmds_geometry/add_surface_roughness.py b/gprMax/cmds_geometry/add_surface_roughness.py index 6cd4dcbc..83aec2e2 100644 --- a/gprMax/cmds_geometry/add_surface_roughness.py +++ b/gprMax/cmds_geometry/add_surface_roughness.py @@ -27,7 +27,7 @@ from ..exceptions import CmdInputError from ..fractals import FractalSurface from ..utilities import round_value -log = logging.getLogger(__name__) +logger = logging.getLogger(__name__) class AddSurfaceRoughness(UserObjectGeometry): @@ -158,4 +158,4 @@ class AddSurfaceRoughness(UserObjectGeometry): surface.generate_fractal_surface(grid) volume.fractalsurfaces.append(surface) - log.info(f'Fractal surface from {xs * grid.dx:g}m, {ys * grid.dy:g}m, {zs * grid.dz:g}m, to {xf * grid.dx:g}m, {yf * grid.dy:g}m, {zf * grid.dz:g}m with fractal dimension {surface.dimension:g}, fractal weightings {surface.weighting[0]:g}, {surface.weighting[1]:g}, fractal seeding {surface.seed}, and range {limits[0]:g}m to {limits[1]:g}m, added to {surface.operatingonID}.') + logger.info(f'Fractal surface from {xs * grid.dx:g}m, {ys * grid.dy:g}m, {zs * grid.dz:g}m, to {xf * grid.dx:g}m, {yf * grid.dy:g}m, {zf * grid.dz:g}m with fractal dimension {surface.dimension:g}, fractal weightings {surface.weighting[0]:g}, {surface.weighting[1]:g}, fractal seeding {surface.seed}, and range {limits[0]:g}m to {limits[1]:g}m, added to {surface.operatingonID}.') diff --git a/gprMax/cmds_geometry/add_surface_water.py b/gprMax/cmds_geometry/add_surface_water.py index dca123c6..2dc9d6f7 100644 --- a/gprMax/cmds_geometry/add_surface_water.py +++ b/gprMax/cmds_geometry/add_surface_water.py @@ -24,7 +24,7 @@ from ..exceptions import CmdInputError from ..materials import Material from ..utilities import round_value -log = logging.getLogger(__name__) +logger = logging.getLogger(__name__) class AddSurfaceWater(UserObjectGeometry): @@ -142,4 +142,4 @@ class AddSurfaceWater(UserObjectGeometry): if testwater: raise CmdInputError(self.__str__() + ' requires the time step for the model to be less than the relaxation time required to model water.') - log.info(f'Water on surface from {xs * grid.dx:g}m, {ys * grid.dy:g}m, {zs * grid.dz:g}m, to {xf * grid.dx:g}m, {yf * grid.dy:g}m, {zf * grid.dz:g}m with depth {filldepth:g}m, added to {surface.operatingonID}.') + logger.info(f'Water on surface from {xs * grid.dx:g}m, {ys * grid.dy:g}m, {zs * grid.dz:g}m, to {xf * grid.dx:g}m, {yf * grid.dy:g}m, {zf * grid.dz:g}m with depth {filldepth:g}m, added to {surface.operatingonID}.') diff --git a/gprMax/cmds_geometry/box.py b/gprMax/cmds_geometry/box.py index 25a3b261..06f19b9b 100644 --- a/gprMax/cmds_geometry/box.py +++ b/gprMax/cmds_geometry/box.py @@ -26,7 +26,7 @@ from ..cython.geometry_primitives import build_box from ..exceptions import CmdInputError from ..materials import Material -log = logging.getLogger(__name__) +logger = logging.getLogger(__name__) class Box(UserObjectGeometry): @@ -116,4 +116,4 @@ class Box(UserObjectGeometry): build_box(xs, xf, ys, yf, zs, zf, numID, numIDx, numIDy, numIDz, averaging, grid.solid, grid.rigidE, grid.rigidH, grid.ID) dielectricsmoothing = 'on' if averaging else 'off' - log.info(f"Box from {xs * grid.dx:g}m, {ys * grid.dy:g}m, {zs * grid.dz:g}m, to {xf * grid.dx:g}m, {yf * grid.dy:g}m, {zf * grid.dz:g}m of material(s) {', '.join(materialsrequested)} created, dielectric smoothing is {dielectricsmoothing}.") + logger.info(f"Box from {xs * grid.dx:g}m, {ys * grid.dy:g}m, {zs * grid.dz:g}m, to {xf * grid.dx:g}m, {yf * grid.dy:g}m, {zf * grid.dz:g}m of material(s) {', '.join(materialsrequested)} created, dielectric smoothing is {dielectricsmoothing}.") diff --git a/gprMax/cmds_geometry/cmds_geometry.py b/gprMax/cmds_geometry/cmds_geometry.py index 45a3ef1f..5912d465 100644 --- a/gprMax/cmds_geometry/cmds_geometry.py +++ b/gprMax/cmds_geometry/cmds_geometry.py @@ -18,7 +18,7 @@ import logging -log = logging.getLogger(__name__) +logger = logging.getLogger(__name__) class UserObjectGeometry: @@ -45,4 +45,4 @@ class UserObjectGeometry: def create(self, grid, uip): """Create the object and add it to the grid.""" - log.debug('This method is incomplete') + logger.debug('This method is incomplete') diff --git a/gprMax/cmds_geometry/cylinder.py b/gprMax/cmds_geometry/cylinder.py index 8af19fc2..f0bafebc 100644 --- a/gprMax/cmds_geometry/cylinder.py +++ b/gprMax/cmds_geometry/cylinder.py @@ -25,7 +25,7 @@ from ..cython.geometry_primitives import build_cylinder from ..exceptions import CmdInputError from ..materials import Material -log = logging.getLogger(__name__) +logger = logging.getLogger(__name__) class Cylinder(UserObjectGeometry): @@ -121,4 +121,4 @@ class Cylinder(UserObjectGeometry): build_cylinder(x1, y1, z1, x2, y2, z2, r, grid.dx, grid.dy, grid.dz, numID, numIDx, numIDy, numIDz, averaging, grid.solid, grid.rigidE, grid.rigidH, grid.ID) dielectricsmoothing = 'on' if averaging else 'off' - log.info(f"Cylinder with face centres {x1:g}m, {y1:g}m, {z1:g}m and {x2:g}m, {y2:g}m, {z2:g}m, with radius {r:g}m, of material(s) {', '.join(materialsrequested)} created, dielectric smoothing is {dielectricsmoothing}.") + logger.info(f"Cylinder with face centres {x1:g}m, {y1:g}m, {z1:g}m and {x2:g}m, {y2:g}m, {z2:g}m, with radius {r:g}m, of material(s) {', '.join(materialsrequested)} created, dielectric smoothing is {dielectricsmoothing}.") diff --git a/gprMax/cmds_geometry/cylindrical_sector.py b/gprMax/cmds_geometry/cylindrical_sector.py index 6185c2a3..4ef0fcbd 100644 --- a/gprMax/cmds_geometry/cylindrical_sector.py +++ b/gprMax/cmds_geometry/cylindrical_sector.py @@ -26,7 +26,7 @@ from ..cython.geometry_primitives import build_cylindrical_sector from ..exceptions import CmdInputError from ..materials import Material -log = logging.getLogger(__name__) +logger = logging.getLogger(__name__) class CylindricalSector(UserObjectGeometry): @@ -171,6 +171,6 @@ class CylindricalSector(UserObjectGeometry): if thickness > 0: dielectricsmoothing = 'on' if averaging else 'off' - log.info(f"Cylindrical sector with centre {ctr1:g}m, {ctr2:g}m, radius {r:g}m, starting angle {(sectorstartangle / (2 * np.pi)) * 360:.1f} degrees, sector angle {(sectorangle / (2 * np.pi)) * 360:.1f} degrees, thickness {thickness:g}m, of material(s) {', '.join(materialsrequested)} created, dielectric smoothing is {dielectricsmoothing}.") + logger.info(f"Cylindrical sector with centre {ctr1:g}m, {ctr2:g}m, radius {r:g}m, starting angle {(sectorstartangle / (2 * np.pi)) * 360:.1f} degrees, sector angle {(sectorangle / (2 * np.pi)) * 360:.1f} degrees, thickness {thickness:g}m, of material(s) {', '.join(materialsrequested)} created, dielectric smoothing is {dielectricsmoothing}.") else: - log.info(f"Cylindrical sector with centre {ctr1:g}m, {ctr2:g}m, radius {r:g}m, starting angle {(sectorstartangle / (2 * np.pi)) * 360:.1f} degrees, sector angle {(sectorangle / (2 * np.pi)) * 360:.1f} degrees, of material(s) {', '.join(materialsrequested)} created.") + logger.info(f"Cylindrical sector with centre {ctr1:g}m, {ctr2:g}m, radius {r:g}m, starting angle {(sectorstartangle / (2 * np.pi)) * 360:.1f} degrees, sector angle {(sectorangle / (2 * np.pi)) * 360:.1f} degrees, of material(s) {', '.join(materialsrequested)} created.") diff --git a/gprMax/cmds_geometry/edge.py b/gprMax/cmds_geometry/edge.py index db2e61c8..19a81c24 100644 --- a/gprMax/cmds_geometry/edge.py +++ b/gprMax/cmds_geometry/edge.py @@ -25,7 +25,7 @@ from ..cython.geometry_primitives import build_edge_y from ..cython.geometry_primitives import build_edge_z from ..exceptions import CmdInputError -log = logging.getLogger(__name__) +logger = logging.getLogger(__name__) class Edge(UserObjectGeometry): @@ -86,4 +86,4 @@ class Edge(UserObjectGeometry): for k in range(zs, zf): build_edge_z(xs, ys, k, material.numID, grid.rigidE, grid.rigidH, grid.ID) - log.info(f'Edge from {xs * grid.dx:g}m, {ys * grid.dy:g}m, {zs * grid.dz:g}m, to {xf * grid.dx:g}m, {yf * grid.dy:g}m, {zf * grid.dz:g}m of material {material_id} created.') + logger.info(f'Edge from {xs * grid.dx:g}m, {ys * grid.dy:g}m, {zs * grid.dz:g}m, to {xf * grid.dx:g}m, {yf * grid.dy:g}m, {zf * grid.dz:g}m of material {material_id} created.') diff --git a/gprMax/cmds_geometry/fractal_box.py b/gprMax/cmds_geometry/fractal_box.py index effcdf92..965bff56 100644 --- a/gprMax/cmds_geometry/fractal_box.py +++ b/gprMax/cmds_geometry/fractal_box.py @@ -25,7 +25,7 @@ from .cmds_geometry import UserObjectGeometry from ..exceptions import CmdInputError from ..fractals import FractalVolume -log = logging.getLogger(__name__) +logger = logging.getLogger(__name__) class FractalBox(UserObjectGeometry): @@ -121,6 +121,6 @@ class FractalBox(UserObjectGeometry): volume.mixingmodel = mixingmodel dielectricsmoothing = 'on' if volume.averaging else 'off' - log.info(f'Fractal box {volume.ID} from {xs * grid.dx:g}m, {ys * grid.dy:g}m, {zs * grid.dz:g}m, to {xf * grid.dx:g}m, {yf * grid.dy:g}m, {zf * grid.dz:g}m with {volume.operatingonID}, fractal dimension {volume.dimension:g}, fractal weightings {volume.weighting[0]:g}, {volume.weighting[1]:g}, {volume.weighting[2]:g}, fractal seeding {volume.seed}, with {volume.nbins} material(s) created, dielectric smoothing is {dielectricsmoothing}.') + logger.info(f'Fractal box {volume.ID} from {xs * grid.dx:g}m, {ys * grid.dy:g}m, {zs * grid.dz:g}m, to {xf * grid.dx:g}m, {yf * grid.dy:g}m, {zf * grid.dz:g}m with {volume.operatingonID}, fractal dimension {volume.dimension:g}, fractal weightings {volume.weighting[0]:g}, {volume.weighting[1]:g}, {volume.weighting[2]:g}, fractal seeding {volume.seed}, with {volume.nbins} material(s) created, dielectric smoothing is {dielectricsmoothing}.') grid.fractalvolumes.append(volume) diff --git a/gprMax/cmds_geometry/geometry_objects_read.py b/gprMax/cmds_geometry/geometry_objects_read.py index 36e027c7..51c25d84 100644 --- a/gprMax/cmds_geometry/geometry_objects_read.py +++ b/gprMax/cmds_geometry/geometry_objects_read.py @@ -28,7 +28,7 @@ from ..exceptions import CmdInputError from ..hash_cmds_file import get_user_objects from ..utilities import round_value -log = logging.getLogger(__name__) +logger = logging.getLogger(__name__) class GeometryObjectsRead(UserObjectGeometry): @@ -113,8 +113,8 @@ class GeometryObjectsRead(UserObjectGeometry): G.rigidE[:, xs:xs + rigidE.shape[1], ys:ys + rigidE.shape[2], zs:zs + rigidE.shape[3]] = rigidE G.rigidH[:, xs:xs + rigidH.shape[1], ys:ys + rigidH.shape[2], zs:zs + rigidH.shape[3]] = rigidH G.ID[:, xs:xs + ID.shape[1], ys:ys + ID.shape[2], zs:zs + ID.shape[3]] = ID + numexistmaterials - log.info(f'Geometry objects from file {geofile} inserted at {xs * G.dx:g}m, {ys * G.dy:g}m, {zs * G.dz:g}m, with corresponding materials file {matfile}.') + logger.info(f'Geometry objects from file {geofile} inserted at {xs * G.dx:g}m, {ys * G.dy:g}m, {zs * G.dz:g}m, with corresponding materials file {matfile}.') except KeyError: averaging = False build_voxels_from_array(xs, ys, zs, numexistmaterials, averaging, data, G.solid, G.rigidE, G.rigidH, G.ID) - log.info(f'Geometry objects from file (voxels only){geofile} inserted at {xs * G.dx:g}m, {ys * G.dy:g}m, {zs * G.dz:g}m, with corresponding materials file {matfile}.') + logger.info(f'Geometry objects from file (voxels only){geofile} inserted at {xs * G.dx:g}m, {ys * G.dy:g}m, {zs * G.dz:g}m, with corresponding materials file {matfile}.') diff --git a/gprMax/cmds_geometry/plate.py b/gprMax/cmds_geometry/plate.py index 2b7d9a40..4c43a295 100644 --- a/gprMax/cmds_geometry/plate.py +++ b/gprMax/cmds_geometry/plate.py @@ -25,7 +25,7 @@ from ..cython.geometry_primitives import build_face_xz from ..cython.geometry_primitives import build_face_xy from ..exceptions import CmdInputError -log = logging.getLogger(__name__) +logger = logging.getLogger(__name__) class Plate(UserObjectGeometry): @@ -134,4 +134,4 @@ class Plate(UserObjectGeometry): for j in range(ys, yf): build_face_xy(i, j, zs, numIDx, numIDy, grid.rigidE, grid.rigidH, grid.ID) - log.info(f"Plate from {xs * grid.dx:g}m, {ys * grid.dy:g}m, {zs * grid.dz:g}m, to {xf * grid.dx:g}m, {yf * grid.dy:g}m, {zf * grid.dz:g}m of material(s) {', '.join(materialsrequested)} created.") + logger.info(f"Plate from {xs * grid.dx:g}m, {ys * grid.dy:g}m, {zs * grid.dz:g}m, to {xf * grid.dx:g}m, {yf * grid.dy:g}m, {zf * grid.dz:g}m of material(s) {', '.join(materialsrequested)} created.") diff --git a/gprMax/cmds_geometry/sphere.py b/gprMax/cmds_geometry/sphere.py index 37d17a83..49a0f7c2 100644 --- a/gprMax/cmds_geometry/sphere.py +++ b/gprMax/cmds_geometry/sphere.py @@ -26,7 +26,7 @@ from ..cython.geometry_primitives import build_sphere from ..exceptions import CmdInputError from ..materials import Material -log = logging.getLogger(__name__) +logger = logging.getLogger(__name__) class Sphere(UserObjectGeometry): @@ -115,4 +115,4 @@ class Sphere(UserObjectGeometry): build_sphere(xc, yc, zc, r, grid.dx, grid.dy, grid.dz, numID, numIDx, numIDy, numIDz, averaging, grid.solid, grid.rigidE, grid.rigidH, grid.ID) dielectricsmoothing = 'on' if averaging else 'off' - log.info(f"Sphere with centre {xc * grid.dx:g}m, {yc * grid.dy:g}m, {zc * grid.dz:g}m, radius {r:g}m, of material(s) {', '.join(materialsrequested)} created, dielectric smoothing is {dielectricsmoothing}.") + logger.info(f"Sphere with centre {xc * grid.dx:g}m, {yc * grid.dy:g}m, {zc * grid.dz:g}m, radius {r:g}m, of material(s) {', '.join(materialsrequested)} created, dielectric smoothing is {dielectricsmoothing}.") diff --git a/gprMax/cmds_geometry/triangle.py b/gprMax/cmds_geometry/triangle.py index bf925771..ed1cbbb2 100644 --- a/gprMax/cmds_geometry/triangle.py +++ b/gprMax/cmds_geometry/triangle.py @@ -26,7 +26,7 @@ from ..cython.geometry_primitives import build_triangle from ..exceptions import CmdInputError from ..materials import Material -log = logging.getLogger(__name__) +logger = logging.getLogger(__name__) class Triangle(UserObjectGeometry): @@ -156,6 +156,6 @@ class Triangle(UserObjectGeometry): if thickness > 0: dielectricsmoothing = 'on' if averaging else 'off' - log.info(f"Triangle with coordinates {x1:g}m {y1:g}m {z1:g}m, {x2:g}m {y2:g}m {z2:g}m, {x3:g}m {y3:g}m {z3:g}m and thickness {thickness:g}m of material(s) {', '.join(materialsrequested)} created, dielectric smoothing is {dielectricsmoothing}.") + logger.info(f"Triangle with coordinates {x1:g}m {y1:g}m {z1:g}m, {x2:g}m {y2:g}m {z2:g}m, {x3:g}m {y3:g}m {z3:g}m and thickness {thickness:g}m of material(s) {', '.join(materialsrequested)} created, dielectric smoothing is {dielectricsmoothing}.") else: - log.info(f"Triangle with coordinates {x1:g}m {y1:g}m {z1:g}m, {x2:g}m {y2:g}m {z2:g}m, {x3:g}m {y3:g}m {z3:g}m of material(s) {', '.join(materialsrequested)} created.") + logger.info(f"Triangle with coordinates {x1:g}m {y1:g}m {z1:g}m, {x2:g}m {y2:g}m {z2:g}m, {x3:g}m {y3:g}m {z3:g}m of material(s) {', '.join(materialsrequested)} created.") diff --git a/gprMax/cmds_multiple.py b/gprMax/cmds_multiple.py index a3a4e65a..f0ededa4 100644 --- a/gprMax/cmds_multiple.py +++ b/gprMax/cmds_multiple.py @@ -39,7 +39,7 @@ from .subgrids.base import SubGridBase from .utilities import round_value from .waveforms import Waveform as WaveformUser -log = logging.getLogger(__name__) +logger = logging.getLogger(__name__) class UserObjectMulti: @@ -84,6 +84,7 @@ class Waveform(UserObjectMulti): def __init__(self, **kwargs): super().__init__(**kwargs) + self.order = 1 self.hash = '#waveform' def create(self, grid, uip): @@ -108,7 +109,7 @@ class Waveform(UserObjectMulti): w.amp = amp w.freq = freq - log.info(f'Waveform {w.ID} of type {w.type} with maximum amplitude scaling {w.amp:g}, frequency {w.freq:g}Hz created.') + logger.info(f'Waveform {w.ID} of type {w.type} with maximum amplitude scaling {w.amp:g}, frequency {w.freq:g}Hz created.') grid.waveforms.append(w) @@ -132,6 +133,7 @@ class VoltageSource(UserObjectMulti): def __init__(self, **kwargs): super().__init__(**kwargs) + self.order = 2 self.hash = '#voltage_source' def create(self, grid, uip): @@ -194,7 +196,7 @@ class VoltageSource(UserObjectMulti): v.calculate_waveform_values(grid) - log.info(f'Voltage source with polarity {v.polarisation} at {v.xcoord * grid.dx:g}m, {v.ycoord * grid.dy:g}m, {v.zcoord * grid.dz:g}m, resistance {v.resistance:.1f} Ohms,' + startstop + f'using waveform {v.waveformID} created.') + logger.info(f'Voltage source with polarity {v.polarisation} at {v.xcoord * grid.dx:g}m, {v.ycoord * grid.dy:g}m, {v.zcoord * grid.dz:g}m, resistance {v.resistance:.1f} Ohms,' + startstop + f'using waveform {v.waveformID} created.') grid.voltagesources.append(v) @@ -217,6 +219,7 @@ class HertzianDipole(UserObjectMulti): def __init__(self, **kwargs): super().__init__(**kwargs) + self.order = 3 self.hash = '#hertzian_dipole' def create(self, grid, uip): @@ -287,9 +290,9 @@ class HertzianDipole(UserObjectMulti): h.calculate_waveform_values(grid) if grid.mode == '2D': - log.info(f'Hertzian dipole is a line source in 2D with polarity {h.polarisation} at {h.xcoord * grid.dx:g}m, {h.ycoord * grid.dy:g}m, {h.zcoord * grid.dz:g}m,' + startstop + f'using waveform {h.waveformID} created.') + logger.info(f'Hertzian dipole is a line source in 2D with polarity {h.polarisation} at {h.xcoord * grid.dx:g}m, {h.ycoord * grid.dy:g}m, {h.zcoord * grid.dz:g}m,' + startstop + f'using waveform {h.waveformID} created.') else: - log.info(f'Hertzian dipole with polarity {h.polarisation} at {h.xcoord * grid.dx:g}m, {h.ycoord * grid.dy:g}m, {h.zcoord * grid.dz:g}m,' + startstop + f'using waveform {h.waveformID} created.') + logger.info(f'Hertzian dipole with polarity {h.polarisation} at {h.xcoord * grid.dx:g}m, {h.ycoord * grid.dy:g}m, {h.zcoord * grid.dz:g}m,' + startstop + f'using waveform {h.waveformID} created.') grid.hertziandipoles.append(h) @@ -312,6 +315,7 @@ class MagneticDipole(UserObjectMulti): def __init__(self, **kwargs): super().__init__(**kwargs) + self.order = 4 self.hash = '#magnetic_dipole' def create(self, grid, uip): @@ -372,7 +376,7 @@ class MagneticDipole(UserObjectMulti): m.calculate_waveform_values(grid) - log.info(f'Magnetic dipole with polarity {m.polarisation} at {m.xcoord * grid.dx:g}m, {m.ycoord * grid.dy:g}m, {m.zcoord * grid.dz:g}m,' + startstop + f'using waveform {m.waveformID} created.') + logger.info(f'Magnetic dipole with polarity {m.polarisation} at {m.xcoord * grid.dx:g}m, {m.ycoord * grid.dy:g}m, {m.zcoord * grid.dz:g}m,' + startstop + f'using waveform {m.waveformID} created.') grid.magneticdipoles.append(m) @@ -397,6 +401,7 @@ class TransmissionLine(UserObjectMulti): def __init__(self, **kwargs): super().__init__(**kwargs) + self.order = 5 self.hash = '#transmission_line' def create(self, grid, uip): @@ -464,7 +469,7 @@ class TransmissionLine(UserObjectMulti): t.calculate_waveform_values(grid) t.calculate_incident_V_I(grid) - log.info(f'Transmission line with polarity {t.polarisation} at {t.xcoord * grid.dx:g}m, {t.ycoord * grid.dy:g}m, {t.zcoord * grid.dz:g}m, resistance {t.resistance:.1f} Ohms,' + startstop + f'using waveform {t.waveformID} created.') + logger.info(f'Transmission line with polarity {t.polarisation} at {t.xcoord * grid.dx:g}m, {t.ycoord * grid.dy:g}m, {t.zcoord * grid.dz:g}m, resistance {t.resistance:.1f} Ohms,' + startstop + f'using waveform {t.waveformID} created.') grid.transmissionlines.append(t) @@ -485,6 +490,7 @@ class Rx(UserObjectMulti): def __init__(self, **kwargs): super().__init__(**kwargs) + self.order = 6 self.hash = '#rx' self.constructor = RxUser @@ -492,7 +498,7 @@ class Rx(UserObjectMulti): try: p1 = self.kwargs['p1'] except KeyError: - log.exception(f'KeyError with {self.params_str()}') + logger.exception(f'KeyError with {self.params_str()}') p = uip.check_src_rx_point(p1, self.params_str()) @@ -519,7 +525,7 @@ class Rx(UserObjectMulti): else: raise CmdInputError(f"'{self.params_str()}' contains an output type that is not allowable. Allowable outputs in current context are {allowableoutputs}") - log.info(f"Receiver at {r.xcoord * grid.dx:g}m, {r.ycoord * grid.dy:g}m, {r.zcoord * grid.dz:g}m with output component(s) {', '.join(r.outputs)} created.") + logger.info(f"Receiver at {r.xcoord * grid.dx:g}m, {r.ycoord * grid.dy:g}m, {r.zcoord * grid.dz:g}m with output component(s) {', '.join(r.outputs)} created.") grid.rxs.append(r) @@ -539,6 +545,7 @@ class RxArray(UserObjectMulti): def __init__(self, **kwargs): super().__init__(**kwargs) + self.order = 7 self.hash = '#rx_array' def create(self, grid, uip): @@ -573,7 +580,7 @@ class RxArray(UserObjectMulti): else: raise CmdInputError(f"'{self.params_str()}' the step size should not be less than the spatial discretisation") - log.info(f'Receiver array {xs * grid.dx:g}m, {ys * grid.dy:g}m, {zs * grid.dz:g}m, to {xf * grid.dx:g}m, {yf * grid.dy:g}m, {zf * grid.dz:g}m with steps {dx * grid.dx:g}m, {dy * grid.dy:g}m, {dz * grid.dz:g}m') + logger.info(f'Receiver array {xs * grid.dx:g}m, {ys * grid.dy:g}m, {zs * grid.dz:g}m, to {xf * grid.dx:g}m, {yf * grid.dy:g}m, {zf * grid.dz:g}m with steps {dx * grid.dx:g}m, {dy * grid.dy:g}m, {dz * grid.dz:g}m') for x in range(xs, xf + 1, dx): for y in range(ys, yf + 1, dy): @@ -588,7 +595,7 @@ class RxArray(UserObjectMulti): r.ID = r.__class__.__name__ + '(' + str(x) + ',' + str(y) + ',' + str(z) + ')' for key in RxUser.defaultoutputs: r.outputs[key] = np.zeros(grid.iterations, dtype=config.dtypes['float_or_double']) - log.info(f" Receiver at {r.xcoord * grid.dx:g}m, {r.ycoord * grid.dy:g}m, {r.zcoord * grid.dz:g}m with output component(s) {', '.join(r.outputs)} created.") + logger.info(f" Receiver at {r.xcoord * grid.dx:g}m, {r.ycoord * grid.dy:g}m, {r.zcoord * grid.dz:g}m with output component(s) {', '.join(r.outputs)} created.") grid.rxs.append(r) @@ -611,6 +618,7 @@ class Snapshot(UserObjectMulti): def __init__(self, **kwargs): super().__init__(**kwargs) + self.order = 8 self.hash = '#snapshot' def create(self, grid, uip): @@ -657,7 +665,7 @@ class Snapshot(UserObjectMulti): #else: s = SnapshotUser(xs, ys, zs, xf, yf, zf, dx, dy, dz, iterations, filename) - log.info(f'Snapshot from {xs * grid.dx:g}m, {ys * grid.dy:g}m, {zs * grid.dz:g}m, to {xf * grid.dx:g}m, {yf * grid.dy:g}m, {zf * grid.dz:g}m, discretisation {dx * grid.dx:g}m, {dy * grid.dy:g}m, {dz * grid.dz:g}m, at {s.time * grid.dt:g} secs with filename {s.filename} created.') + logger.info(f'Snapshot from {xs * grid.dx:g}m, {ys * grid.dy:g}m, {zs * grid.dz:g}m, to {xf * grid.dx:g}m, {yf * grid.dy:g}m, {zf * grid.dz:g}m, discretisation {dx * grid.dx:g}m, {dy * grid.dy:g}m, {dz * grid.dz:g}m, at {s.time * grid.dt:g} secs with filename {s.filename} created.') grid.snapshots.append(s) @@ -677,6 +685,7 @@ class Material(UserObjectMulti): def __init__(self, **kwargs): super().__init__(**kwargs) + self.order = 9 self.hash = '#material' def create(self, grid, uip): @@ -715,7 +724,7 @@ class Material(UserObjectMulti): if m.se == float('inf'): m.averagable = False - log.info(f'Material {m.ID} with eps_r={m.er:g}, sigma={m.se:g} S/m; mu_r={m.mr:g}, sigma*={m.sm:g} Ohm/m created.') + logger.info(f'Material {m.ID} with eps_r={m.er:g}, sigma={m.se:g} S/m; mu_r={m.mr:g}, sigma*={m.sm:g} Ohm/m created.') # Append the new material object to the materials list grid.materials.append(m) @@ -736,6 +745,7 @@ class AddDebyeDispersion(UserObjectMulti): def __init__(self, **kwargs): super().__init__(**kwargs) + self.order = 10 self.hash = '#add_dispersion_debye' def create(self, grid, uip): @@ -768,7 +778,7 @@ class AddDebyeDispersion(UserObjectMulti): disp_material.averagable = False for i in range(0, poles): if tau[i] > 0: - log.debug('Not checking if relaxation times are greater than time-step') + logger.debug('Not checking if relaxation times are greater than time-step') disp_material.deltaer.append(er_delta[i]) disp_material.tau.append(tau[i]) else: @@ -779,7 +789,7 @@ class AddDebyeDispersion(UserObjectMulti): # Replace original material with newly created DispersiveMaterial grid.materials = [disp_material if mat.numID==material.numID else mat for mat in grid.materials] - log.info(f"Debye disperion added to {disp_material.ID} with delta_eps_r={', '.join('%4.2f' % deltaer for deltaer in disp_material.deltaer)}, and tau={', '.join('%4.3e' % tau for tau in disp_material.tau)} secs created.") + logger.info(f"Debye disperion added to {disp_material.ID} with delta_eps_r={', '.join('%4.2f' % deltaer for deltaer in disp_material.deltaer)}, and tau={', '.join('%4.3e' % tau for tau in disp_material.tau)} secs created.") class AddLorentzDispersion(UserObjectMulti): @@ -799,6 +809,7 @@ class AddLorentzDispersion(UserObjectMulti): def __init__(self, **kwargs): super().__init__(**kwargs) + self.order = 11 self.hash = '#add_dispersion_lorentz' def create(self, grid, uip): @@ -843,7 +854,7 @@ class AddLorentzDispersion(UserObjectMulti): # Replace original material with newly created DispersiveMaterial grid.materials = [disp_material if mat.numID==material.numID else mat for mat in grid.materials] - log.info(f"Lorentz disperion added to {disp_material.ID} with delta_eps_r={', '.join('%4.2f' % deltaer for deltaer in disp_material.deltaer)}, omega={', '.join('%4.3e' % tau for tau in disp_material.tau)} secs, and gamma={', '.join('%4.3e' % alpha for alpha in disp_material.alpha)} created.") + logger.info(f"Lorentz disperion added to {disp_material.ID} with delta_eps_r={', '.join('%4.2f' % deltaer for deltaer in disp_material.deltaer)}, omega={', '.join('%4.3e' % tau for tau in disp_material.tau)} secs, and gamma={', '.join('%4.3e' % alpha for alpha in disp_material.alpha)} created.") class AddDrudeDispersion(UserObjectMulti): @@ -861,6 +872,7 @@ class AddDrudeDispersion(UserObjectMulti): def __init__(self, **kwargs): super().__init__(**kwargs) + self.order = 12 self.hash = '#add_dispersion_drude' def create(self, grid, uip): @@ -903,7 +915,7 @@ class AddDrudeDispersion(UserObjectMulti): # Replace original material with newly created DispersiveMaterial grid.materials = [disp_material if mat.numID==material.numID else mat for mat in grid.materials] - log.info(f"Drude disperion added to {disp_material.ID} with omega={', '.join('%4.3e' % tau for tau in disp_material.tau)} secs, and gamma={', '.join('%4.3e' % alpha for alpha in disp_material.alpha)} secs created.") + logger.info(f"Drude disperion added to {disp_material.ID} with omega={', '.join('%4.3e' % tau for tau in disp_material.tau)} secs, and gamma={', '.join('%4.3e' % alpha for alpha in disp_material.alpha)} secs created.") class SoilPeplinski(UserObjectMulti): @@ -925,6 +937,7 @@ class SoilPeplinski(UserObjectMulti): def __init__(self, **kwargs): super().__init__(**kwargs) + self.order = 13 self.hash = '#soil_peplinski' def create(self, grid, uip): @@ -957,7 +970,7 @@ class SoilPeplinski(UserObjectMulti): # Create a new instance of the Material class material (start index after pec & free_space) s = PeplinskiSoilUser(ID, sand_fraction, clay_fraction, bulk_density, sand_density, (water_fraction_lower, water_fraction_upper)) - log.info(f'Mixing model (Peplinski) used to create {s.ID} with sand fraction {s.S:g}, clay fraction {s.C:g}, bulk density {s.rb:g}g/cm3, sand particle density {s.rs:g}g/cm3, and water volumetric fraction {s.mu[0]:g} to {s.mu[1]:g} created.') + logger.info(f'Mixing model (Peplinski) used to create {s.ID} with sand fraction {s.S:g}, clay fraction {s.C:g}, bulk density {s.rb:g}g/cm3, sand particle density {s.rs:g}g/cm3, and water volumetric fraction {s.mu[0]:g} to {s.mu[1]:g} created.') # Append the new material object to the materials list grid.mixingmodels.append(s) @@ -981,6 +994,7 @@ class GeometryView(UserObjectMulti): def __init__(self, **kwargs): super().__init__(**kwargs) + self.order = 14 self.hash = '#geometry_view' self.multi_grid = False @@ -1039,7 +1053,7 @@ class GeometryView(UserObjectMulti): g = GeometryViewUser(xs, ys, zs, xf, yf, zf, dx, dy, dz, filename, fileext, grid) - log.info(f'Geometry view from {xs * grid.dx:g}m, {ys * grid.dy:g}m, {zs * grid.dz:g}m, to {xf * grid.dx:g}m, {yf * grid.dy:g}m, {zf * grid.dz:g}m, discretisation {dx * grid.dx:g}m, {dy * grid.dy:g}m, {dz * grid.dz:g}m, multi_grid={self.multi_grid}, grid={grid.name}, with filename base {g.filename} created.') + logger.info(f'Geometry view from {xs * grid.dx:g}m, {ys * grid.dy:g}m, {zs * grid.dz:g}m, to {xf * grid.dx:g}m, {yf * grid.dy:g}m, {zf * grid.dz:g}m, discretisation {dx * grid.dx:g}m, {dy * grid.dy:g}m, {dz * grid.dz:g}m, multi_grid={self.multi_grid}, grid={grid.name}, with filename base {g.filename} created.') # Append the new GeometryView object to the geometry views list grid.geometryviews.append(g) @@ -1059,6 +1073,7 @@ class GeometryObjectsWrite(UserObjectMulti): def __init__(self, **kwargs): super().__init__(**kwargs) + self.order = 15 self.hash = '#geometry_objects_write' def create(self, grid, uip): @@ -1075,7 +1090,7 @@ class GeometryObjectsWrite(UserObjectMulti): g = GeometryObjectsUser(x0, y0, z0, x1, y1, z1, filename) - log.info(f'Geometry objects in the volume from {p1[0] * grid.dx:g}m, {p1[1] * grid.dy:g}m, {p1[2] * grid.dz:g}m, to {p2[0] * grid.dx:g}m, {p2[1] * grid.dy:g}m, {p2[2] * grid.dz:g}m, will be written to {g.filename}, with materials written to {g.materialsfilename}') + logger.info(f'Geometry objects in the volume from {p1[0] * grid.dx:g}m, {p1[1] * grid.dy:g}m, {p1[2] * grid.dz:g}m, to {p2[0] * grid.dx:g}m, {p2[1] * grid.dy:g}m, {p2[2] * grid.dz:g}m, will be written to {g.filename}, with materials written to {g.materialsfilename}') # Append the new GeometryView object to the geometry objects to write list grid.geometryobjectswrite.append(g) @@ -1114,6 +1129,7 @@ class PMLCFS(UserObjectMulti): def __init__(self, **kwargs): super().__init__(**kwargs) + self.order = 16 self.hash = '#pml_cfs' PMLCFS.count += 1 if PMLCFS.count == 2: @@ -1171,7 +1187,7 @@ class PMLCFS(UserObjectMulti): cfs.kappa = cfskappa cfs.sigma = cfssigma - log.info(f'PML CFS parameters: alpha (scaling: {cfsalpha.scalingprofile}, scaling direction: {cfsalpha.scalingdirection}, min: {cfsalpha.min:g}, max: {cfsalpha.max:g}), kappa (scaling: {cfskappa.scalingprofile}, scaling direction: {cfskappa.scalingdirection}, min: {cfskappa.min:g}, max: {cfskappa.max:g}), sigma (scaling: {cfssigma.scalingprofile}, scaling direction: {cfssigma.scalingdirection}, min: {cfssigma.min:g}, max: {cfssigma.max:g}) created.') + logger.info(f'PML CFS parameters: alpha (scaling: {cfsalpha.scalingprofile}, scaling direction: {cfsalpha.scalingdirection}, min: {cfsalpha.min:g}, max: {cfsalpha.max:g}), kappa (scaling: {cfskappa.scalingprofile}, scaling direction: {cfskappa.scalingdirection}, min: {cfskappa.min:g}, max: {cfskappa.max:g}), sigma (scaling: {cfssigma.scalingprofile}, scaling direction: {cfssigma.scalingdirection}, min: {cfssigma.min:g}, max: {cfssigma.max:g}) created.') grid.cfs.append(cfs) @@ -1198,4 +1214,4 @@ class SubgridHSG(UserObjectMulti): """ def __init__(self, **kwargs): super().__init__(**kwargs) - log.debug('Is this required?') + logger.debug('Is this required?') diff --git a/gprMax/cmds_single_use.py b/gprMax/cmds_single_use.py index ef390bbe..ea6ba77a 100644 --- a/gprMax/cmds_single_use.py +++ b/gprMax/cmds_single_use.py @@ -33,7 +33,7 @@ from .exceptions import CmdInputError from .waveforms import Waveform from .utilities import round_value -log = logging.getLogger(__name__) +logger = logging.getLogger(__name__) class Properties: @@ -59,32 +59,6 @@ class UserObjectSingle: pass -class Messages(UserObjectSingle): - """Allows you to control the amount of information displayed on screen - when gprMax is run - - :param yn: Whether information should be displayed. - :type yn: bool, optional - """ - - def __init__(self, **kwargs): - super().__init__(**kwargs) - self.order = 0 - - def create(self, G, uip): - try: - yn = self.kwargs['yn'] - except KeyError: - raise CmdInputError(self.__str__() + ' requires exactly one parameter') - - if yn.lower() == 'y': - config.sim_config.general['messages'] = True - elif yn.lower() == 'n': - config.sim_config.general['messages'] = False - else: - raise CmdInputError(self.__str__() + ' requires input values of either y or n') - - class Title(UserObjectSingle): """Allows you to include a title for your model. @@ -100,7 +74,7 @@ class Title(UserObjectSingle): try: title = self.kwargs['name'] G.title = title - log.info(f'Model title: {G.title}') + logger.info(f'Model title: {G.title}') except KeyError: pass @@ -125,7 +99,7 @@ class Domain(UserObjectSingle): if G.nx == 0 or G.ny == 0 or G.nz == 0: raise CmdInputError(f"'{self.params_str()}' requires at least one cell in every dimension") - log.info(f"Domain size: {self.kwargs['p1'][0]:g} x {self.kwargs['p1'][1]:g} x {self.kwargs['p1'][2]:g}m ({G.nx:d} x {G.ny:d} x {G.nz:d} = {(G.nx * G.ny * G.nz):g} cells)") + logger.info(f"Domain size: {self.kwargs['p1'][0]:g} x {self.kwargs['p1'][1]:g} x {self.kwargs['p1'][2]:g}m ({G.nx:d} x {G.ny:d} x {G.nz:d} = {(G.nx * G.ny * G.nz):g} cells)") # Calculate time step at CFL limit; switch off appropriate PMLs for 2D if G.nx == 1: @@ -144,8 +118,8 @@ class Domain(UserObjectSingle): G.mode = '3D' G.calculate_dt() - log.info(f'Mode: {G.mode}') - log.info(f'Time step (at CFL limit): {G.dt:g} secs') + logger.info(f'Mode: {G.mode}') + logger.info(f'Time step (at CFL limit): {G.dt:g} secs') class Discretisation(UserObjectSingle): @@ -174,7 +148,7 @@ class Discretisation(UserObjectSingle): if G.dl[2] <= 0: raise CmdInputError(f"'{self.params_str()}' discretisation requires the z-direction spatial step to be greater than zero") - log.info(f'Spatial discretisation: {G.dl[0]:g} x {G.dl[1]:g} x {G.dl[2]:g}m') + logger.info(f'Spatial discretisation: {G.dl[0]:g} x {G.dl[1]:g} x {G.dl[2]:g}m') class TimeWindow(UserObjectSingle): @@ -214,7 +188,7 @@ class TimeWindow(UserObjectSingle): if not G.timewindow: raise CmdInputError(f"'{self.params_str()}' specify a time or number of iterations") - log.info(f'Time window: {G.timewindow:g} secs ({G.iterations} iterations)') + logger.info(f'Time window: {G.timewindow:g} secs ({G.iterations} iterations)') class NumThreads(UserObjectSingle): @@ -261,7 +235,7 @@ class TimeStepStabilityFactor(UserObjectSingle): raise CmdInputError(self.__str__() + ' requires the value of the time step stability factor to be between zero and one') G.dt = G.dt * f - log.info(f'Time step (modified): {G.dt:g} secs') + logger.info(f'Time step (modified): {G.dt:g} secs') class PMLCells(UserObjectSingle): @@ -333,7 +307,7 @@ class SrcSteps(UserObjectSingle): except KeyError: raise CmdInputError('#src_steps: requires exactly three parameters') - log.info(f'Simple sources will step {G.srcsteps[0] * G.dx:g}m, {G.srcsteps[1] * G.dy:g}m, {G.srcsteps[2] * G.dz:g}m for each model run.') + logger.info(f'Simple sources will step {G.srcsteps[0] * G.dx:g}m, {G.srcsteps[1] * G.dy:g}m, {G.srcsteps[2] * G.dz:g}m for each model run.') class RxSteps(UserObjectSingle): @@ -354,7 +328,7 @@ class RxSteps(UserObjectSingle): except KeyError: raise CmdInputError('#rx_steps: requires exactly three parameters') - log.info(f'All receivers will step {G.rxsteps[0] * G.dx:g}m, {G.rxsteps[1] * G.dy:g}m, {G.rxsteps[2] * G.dz:g}m for each model run.') + logger.info(f'All receivers will step {G.rxsteps[0] * G.dx:g}m, {G.rxsteps[1] * G.dy:g}m, {G.rxsteps[2] * G.dz:g}m for each model run.') class ExcitationFile(UserObjectSingle): @@ -394,7 +368,7 @@ class ExcitationFile(UserObjectSingle): if not excitationfile.exists(): excitationfile = Path(config.sim_config.input_file_path.parent, excitationfile) - log.info(f'Excitation file: {excitationfile}') + logger.info(f'Excitation file: {excitationfile}') # Get waveform names with open(excitationfile, 'r') as f: @@ -436,7 +410,7 @@ class ExcitationFile(UserObjectSingle): # Interpolate waveform values w.userfunc = interpolate.interp1d(waveformtime, singlewaveformvalues, **kwargs) - log.info(f"User waveform {w.ID} created using {timestr} and, if required, interpolation parameters (kind: {kwargs['kind']}, fill value: {kwargs['fill_value']}).") + logger.info(f"User waveform {w.ID} created using {timestr} and, if required, interpolation parameters (kind: {kwargs['kind']}, fill value: {kwargs['fill_value']}).") G.waveforms.append(w) diff --git a/gprMax/config.py b/gprMax/config.py index 23b7b8b0..849bc1b6 100644 --- a/gprMax/config.py +++ b/gprMax/config.py @@ -35,8 +35,7 @@ from .utilities import detect_check_gpus from .utilities import get_host_info from .utilities import get_terminal_width -log = logging.getLogger(__name__) - +logger = logging.getLogger(__name__) # Single instance of SimConfig to hold simulation configuration parameters. sim_config = None @@ -185,12 +184,12 @@ class SimulationConfig: # General settings for the simulation # inputfilepath: path to inputfile location # outputfilepath: path to outputfile location - # messages: whether to print all messages as output to stdout or not # progressbars: whether to show progress bars on stdoout or not # cpu, cuda, opencl: solver type # subgrid: whether the simulation uses sub-grids # precision: data type for electromagnetic field output (single/double) - self.general = {'messages': True, + + self.general = {'log_level': logging.WARNING, 'progressbars': True, 'cpu': True, 'cuda': False, @@ -224,6 +223,10 @@ class SimulationConfig: if any(isinstance(element, list) for element in self.args.gpu): self.args.gpu = [val for sublist in self.args.gpu for val in sublist] + # If no deviceID is given default to 0 + if not self.args.gpu: + self.args.gpu = [0] + self.cuda['gpus'] = detect_check_gpus(self.args.gpu) # Subgrid parameter may not exist if user enters via CLI @@ -243,20 +246,21 @@ class SimulationConfig: self.scenes = [] # Set more complex parameters - self.set_precision() - self.get_byteorder() - self.set_input_file_path() - self.set_model_start_end() - self.set_single_model() - - def is_messages(self): - return self.general['messages'] + self._set_precision() + self._get_byteorder() + self._set_input_file_path() + self._set_model_start_end() + self._set_single_model() def set_model_gpu(self): - """Specify single GPU object for model.""" - return self.cuda['gpus'][0] + """Specify single GPU object for model. + Uses first GPU deviceID if list of deviceID given.""" - def set_precision(self): + for gpu in self.cuda['gpus']: + if gpu.deviceID == self.args.gpu[0]: + return gpu + + def _set_precision(self): """Data type (precision) for electromagnetic field output. Solid and ID arrays use 32-bit integers (0 to 4294967295) @@ -283,19 +287,19 @@ class SimulationConfig: 'C_complex': 'pycuda::complex', 'vtk_float': 'Float64'} - def get_byteorder(self): + def _get_byteorder(self): """Check the byte order of system to use for VTK files, i.e. geometry views and snapshots. """ self.vtk_byteorder = 'LittleEndian' if sys.byteorder == 'little' else 'BigEndian' - def set_single_model(self): + def _set_single_model(self): if self.model_start == 0 and self.model_end == 1: self.single_model = True else: self.single_model = False - def set_model_start_end(self): + def _set_model_start_end(self): """Set range for number of models to run (internally 0 index).""" if self.args.task: # Job array feeds args.n number of single tasks @@ -311,7 +315,7 @@ class SimulationConfig: self.model_start = modelstart self.model_end = modelend - def set_input_file_path(self): + def _set_input_file_path(self): """Set input file path for CLI or API.""" # API if self.args.inputfile is None: @@ -329,11 +333,11 @@ class SimulationConfigMPI(SimulationConfig): def __init__(self, args): super().__init__(args) - def set_model_start_end(self): + def _set_model_start_end(self): # Set range for number of models to run self.model_start = self.args.restart if self.args.restart else 1 - self.model_end = self.modelstart + self.args.n + self.model_end = self.model_start + self.args.n - def set_gpus(self): + def set_model_gpu(self): """Leave list of GPU object(s) as multi-object list.""" pass diff --git a/gprMax/contexts.py b/gprMax/contexts.py index 76f90713..1a8b3dd4 100644 --- a/gprMax/contexts.py +++ b/gprMax/contexts.py @@ -50,10 +50,33 @@ class Context: if config.sim_config.general['cuda']: self.print_gpu_info() self.tsimstart = timer() + # Clear list of model configs. It can be retained when gprMax is + # called in a loop, and want to avoid this. + config.model_configs = [] self._run() self.tsimend = timer() self.print_time_report() + def _run_model(self, i): + """Process for running a single model.""" + + config.model_num = i + write_model_config() + + # Always create a grid for the first model. The next model to run + # only gets a new grid if the geometry is not re-used. + if i != 0 and config.sim_config.args.geometry_fixed: + config.get_model_config().reuse_geometry = True + else: + G = create_G() + + model = ModelBuildRun(G) + model.build() + solver = create_solver(G) + + if not config.sim_config.args.geometry_only: + model.solve(solver) + def print_logo_copyright(self): """Print gprMax logo, version, and copyright/licencing information.""" logo(__version__ + ' (' + codename + ')') @@ -82,29 +105,9 @@ class NoMPIContext(Context): """ def _run(self): - """Specialise how the models are farmed out.""" - - # Clear list of model configs. It can be retained with gprMax is - # called in a loop, and want to avoid this. - config.model_configs = [] - + """Specialise how models are run.""" for i in self.model_range: - config.model_num = i - write_model_config() - - # Always create a grid for the first model. The next model to run - # only gets a new grid if the geometry is not re-used. - if i != 0 and config.sim_config.args.geometry_fixed: - config.get_model_config().reuse_geometry = True - else: - G = create_G() - - model = ModelBuildRun(G) - model.build() - solver = create_solver(G) - - if not config.sim_config.args.geometry_only: - model.solve(solver) + self._run_model(i) class MPIContext(Context): @@ -116,9 +119,27 @@ class MPIContext(Context): def __init__(self): super().__init__() from mpi4py import MPI + from gprMax.mpi import MPIExecutor + + self.comm = MPI.COMM_WORLD + self.rank = self.comm.rank + self.MPIExecutor = MPIExecutor def _run(self): - pass + """Specialise how the models are run.""" + + # compile jobs + jobs = [] + for i in range(config.sim_config.args.n): + jobs.append({'i': i}) + + # Execute jobs + log.info(f'Starting execution of {config.sim_config.args.n} gprMax model runs.') + with self.MPIExecutor(self._run_model, comm=self.comm) as executor: + if executor is not None: + results = executor.submit(jobs) + log.info('Results: %s' % str(results)) + log.info('Finished.') def create_context(): diff --git a/gprMax/exceptions.py b/gprMax/exceptions.py index 4b128c34..2172a65c 100644 --- a/gprMax/exceptions.py +++ b/gprMax/exceptions.py @@ -22,7 +22,7 @@ from colorama import init from colorama import Fore init() -log = logging.getLogger(__name__) +logger = logging.getLogger(__name__) class GeneralError(ValueError): @@ -30,7 +30,7 @@ class GeneralError(ValueError): def __init__(self, message, *args): self.message = message super(GeneralError, self).__init__(message, *args) - log.exception(Fore.RED) + logger.exception(Fore.RED) class CmdInputError(Exception): @@ -41,4 +41,4 @@ class CmdInputError(Exception): # def __init__(self, message, *args): # self.message = message # super(CmdInputError, self).__init__(message, *args) - # log.exception(Fore.RED) + # logger.exception(Fore.RED) diff --git a/gprMax/fields_outputs.py b/gprMax/fields_outputs.py index fcad1e8d..72f8c6a7 100644 --- a/gprMax/fields_outputs.py +++ b/gprMax/fields_outputs.py @@ -24,7 +24,7 @@ import h5py from ._version import __version__ -log = logging.getLogger(__name__) +logger = logging.getLogger(__name__) def store_outputs(G): @@ -126,7 +126,7 @@ def write_hdf5_outputfile(outputfile, G): write_grid(grp, sg, is_subgrid=True) if G.rxs or sg_rxs: - log.info(f'Written output file: {outputfile.name}') + logger.info(f'Written output file: {outputfile.name}') def write_grid(basegrp, G, is_subgrid=False): diff --git a/gprMax/geometry_outputs.py b/gprMax/geometry_outputs.py index 1b7d883e..57ff6fdd 100644 --- a/gprMax/geometry_outputs.py +++ b/gprMax/geometry_outputs.py @@ -31,7 +31,7 @@ from .cython.geometry_outputs import define_normal_geometry from .cython.geometry_outputs import define_fine_geometry from .utilities import round_value -log = logging.getLogger(__name__) +logger = logging.getLogger(__name__) class GeometryView: @@ -482,7 +482,7 @@ class GeometryViewFineMultiGrid: f.write('\n\n\n_'.encode('utf-8')) # Write points - log.info('\nWriting points main grid') + logger.info('\nWriting points main grid') datasize = np.dtype(np.float32).itemsize * self.vtk_numpoints * self.vtk_numpoint_components f.write(pack('I', datasize)) for i in range(0, G.nx + 1): @@ -491,7 +491,7 @@ class GeometryViewFineMultiGrid: f.write(pack('fff', i * G.dx, j * G.dy, k * G.dz)) for sg_v in self.sg_views: - log.info('Writing points subgrid') + logger.info('Writing points subgrid') sg_v.write_points(f, G) n_x_lines = self.nx * (self.ny + 1) * (self.nz + 1) @@ -506,7 +506,7 @@ class GeometryViewFineMultiGrid: z_lines = np.zeros((n_z_lines, 2), dtype=np.uint32) z_materials = np.zeros((n_z_lines), dtype=np.uint32) - log.info('Calculate connectivity main grid') + logger.info('Calculate connectivity main grid') label = 0 counter_x = 0 counter_y = 0 @@ -538,7 +538,7 @@ class GeometryViewFineMultiGrid: label = label + 1 - log.info('Calculate connectivity subgrids') + logger.info('Calculate connectivity subgrids') for sg_v in self.sg_views: sg_v.populate_connectivity_and_materials(label) # use the last subgrids label for the next view diff --git a/gprMax/gprMax.py b/gprMax/gprMax.py index 640631c7..b572de63 100644 --- a/gprMax/gprMax.py +++ b/gprMax/gprMax.py @@ -21,11 +21,9 @@ import logging from .config_parser import write_simulation_config from .contexts import create_context +from .utilities import setup_logging -# Configure logging -log = logging.getLogger(__name__) -# logging.basicConfig(level=logging.DEBUG, format='%(module)s %(lineno)d %(message)s') -logging.basicConfig(level=logging.INFO, format='%(message)s') +logger = logging.getLogger(__name__) def run( @@ -36,8 +34,6 @@ def run( task=None, restart=None, mpi=False, - mpi_no_spawn=False, - mpicomm=None, gpu=None, subgrid=None, autotranslate=False, @@ -45,7 +41,7 @@ def run( geometry_fixed=False, write_processed=False, ): - """This is the main function for gprMax when entering as application + """This is the main function for gprMax when entering using application programming interface (API). Run the simulation for the given list of scenes. @@ -75,13 +71,13 @@ def run( from A-scan 45 when creating a B-scan with 60 traces. :type restart: int, optional - :param mpi: number of Message Passing Interface (MPI) tasks, - i.e. master + workers, for MPI task farm. This option is most - usefully combined with n to allow individual models to be farmed - out using a MPI task farm, e.g. to create a B-scan with 60 traces - and use MPI to farm out each trace1. For further details see the - parallel performance section of the User Guide. - :type mpi: int, optional + :param mpi: flag to use Message Passing Interface (MPI) task farm. This + option is most usefully combined with n to allow individual + models to be farmed out using a MPI task farm, e.g. to create a + B-scan with 60 traces and use MPI to farm out each trace. + For further details see the parallel performance section of the + User Guide. + :type mpi: bool, optional :param gpu: flag to use NVIDIA GPU or list of NVIDIA GPU device ID(s) for specific GPU card(s). @@ -122,7 +118,6 @@ def run( args.task = task args.restart = restart args.mpi = mpi - args.mpicomm = mpicomm args.gpu = gpu args.subgrid = subgrid args.autotranslate = autotranslate @@ -133,29 +128,47 @@ def run( try: run_main(args) except Exception: - log.exception('Error from main API function', exc_info=True) + logger.exception('Error from main API function', exc_info=True) def main(): - """Main function for gprMax when entering from the command line interface (CLI).""" + """Main function for gprMax when entering using the command line interface (CLI).""" # Parse command line arguments parser = argparse.ArgumentParser(prog='gprMax', formatter_class=argparse.ArgumentDefaultsHelpFormatter) - parser.add_argument('inputfile', help='path to, and name of inputfile or file object') - parser.add_argument('-n', default=1, type=int, help='number of times to run the input file, e.g. to create a B-scan') - parser.add_argument('-task', type=int, help='task identifier (model number) for job array on Open Grid Scheduler/Grid Engine (http://gridscheduler.sourceforge.net/index.html)') - parser.add_argument('-restart', type=int, help='model number to restart from, e.g. when creating B-scan') - parser.add_argument('-mpi', type=int, help='number of MPI tasks, i.e. master + workers') - parser.add_argument('-gpu', type=int, action='append', nargs='*', help='flag to use Nvidia GPU or option to give list of device ID(s)') - parser.add_argument('--geometry-only', action='store_true', default=False, help='flag to only build model and produce geometry file(s)') - parser.add_argument('--geometry-fixed', action='store_true', default=False, help='flag to not reprocess model geometry, e.g. for B-scans where the geometry is fixed') - parser.add_argument('--write-processed', action='store_true', default=False, help='flag to write an input file after any Python code and include commands in the original input file have been processed') + parser.add_argument('inputfile', + help='relative or absolute path to inputfile') + parser.add_argument('-n', default=1, type=int, + help='number of times to run the input file, e.g. to create a B-scan') + parser.add_argument('-task', type=int, + help='task identifier (model number) for job array on ' + 'Open Grid Scheduler/Grid Engine (http://gridscheduler.sourceforge.net/index.html)') + parser.add_argument('-r', '--restart', type=int, + help='model number to restart from, e.g. when creating B-scan') + parser.add_argument('-mpi', action='store_true', default=False, + help='flag to enable MPI task farming') + parser.add_argument('-gpu', type=int, action='append', nargs='*', + help='flag to use Nvidia GPU or option to give list of device ID(s)') + parser.add_argument('--geometry-only', action='store_true', default=False, + help='flag to only build model and produce geometry file(s)') + parser.add_argument('--geometry-fixed', action='store_true', default=False, + help='flag to not reprocess model geometry, e.g. for B-scans where the geometry is fixed') + parser.add_argument('--write-processed', action='store_true', default=False, + help='flag to write an input file after any Python code and include commands ' + 'in the original input file have been processed') + parser.add_argument('-l', '--logfile', action='store_true', default=False, + help='flag to enable writing to a log file') + parser.add_argument('-v', '--verbose', action='store_true', default=False, + help="flag to increase output") args = parser.parse_args() + setup_logging() + try: run_main(args) except Exception: - log.exception('Error from main CLI function', exc_info=True) + logger.exception('Error from main CLI function', exc_info=True) + def run_main(args): """Called by either run (API) or main (CLI). diff --git a/gprMax/hash_cmds_file.py b/gprMax/hash_cmds_file.py index 494b87a6..bc5900f1 100644 --- a/gprMax/hash_cmds_file.py +++ b/gprMax/hash_cmds_file.py @@ -28,7 +28,7 @@ from .hash_cmds_geometry import process_geometrycmds from .hash_cmds_multiuse import process_multicmds from .hash_cmds_singleuse import process_singlecmds -log = logging.getLogger(__name__) +logger = logging.getLogger(__name__) def process_python_include_code(inputfile, usernamespace): @@ -62,7 +62,7 @@ def process_python_include_code(inputfile, usernamespace): # Process any Python code if(inputlines[x].startswith('#python:')): - log.warning('#python blocks are deprecated and will be removed in the next release of gprMax. Please convert your model to use our Python API instead.\n') + logger.warning('#python blocks are deprecated and will be removed in the next release of gprMax. Please convert your model to use our Python API instead.\n') # String to hold Python code to be executed pythoncode = '' x += 1 @@ -99,7 +99,7 @@ def process_python_include_code(inputfile, usernamespace): # Print any generated output that is not commands if pythonout: - log.info(f'Python messages (from stdout/stderr): {pythonout}\n') + logger.info(f'Python messages (from stdout/stderr): {pythonout}\n') # Add any other commands to list elif(inputlines[x].startswith('#')): @@ -157,7 +157,7 @@ def process_include_files(hashcmds, inputfile): x += 1 return processedincludecmds - +logger def write_processed_file(processedlines, G): """Writes an input file after any Python code and include commands @@ -176,7 +176,7 @@ def write_processed_file(processedlines, G): for item in processedlines: f.write(f'{item}') - log.info(f'Written input commands, after processing any Python code and include commands, to file: {processedfile}\n') + logger.info(f'Written input commands, after processing any Python code and include commands, to file: {processedfile}\n') def check_cmd_names(processedlines, checkessential=True): @@ -306,7 +306,7 @@ def parse_hash_commands(scene, G): for key, value in sorted(usernamespace.items()): if key != '__builtins__': uservars += f'{key}: {value}, ' - log.info(f'Constants/variables used/available for Python scripting: {{{uservars[:-2]}}}\n') + logger.info(f'Constants/variables used/available for Python scripting: {{{uservars[:-2]}}}\n') # Write a file containing the input commands after Python or include # file commands have been processed diff --git a/gprMax/hash_cmds_singleuse.py b/gprMax/hash_cmds_singleuse.py index 3295309e..30b22db2 100644 --- a/gprMax/hash_cmds_singleuse.py +++ b/gprMax/hash_cmds_singleuse.py @@ -16,7 +16,6 @@ # You should have received a copy of the GNU General Public License # along with gprMax. If not, see . -from .cmds_single_use import Messages from .cmds_single_use import Title from .cmds_single_use import NumThreads from .cmds_single_use import Discretisation @@ -46,15 +45,6 @@ def process_singlecmds(singlecmds): scene_objects = [] # Check validity of command parameters in order needed - cmd = '#messages' - if singlecmds[cmd] is not None: - tmp = singlecmds[cmd].split() - if len(tmp) != 1: - raise CmdInputError(cmd + ' requires exactly one parameter') - - messages = Messages(yn=str(tmp[0])) - scene_objects.append(messages) - cmd = '#title' if singlecmds[cmd] is not None: title = Title(name=str(singlecmds[cmd])) @@ -141,7 +131,7 @@ def process_singlecmds(singlecmds): tmp = singlecmds[cmd].split() if len(tmp) != 3: raise CmdInputError(cmd + ' requires exactly three parameters') - + p1 = (float(tmp[0]), float(tmp[1]), float(tmp[2])) src_steps = SrcSteps(p1=p1) scene_objects.append(src_steps) diff --git a/gprMax/model_build_run.py b/gprMax/model_build_run.py index 63e15026..430d89f4 100644 --- a/gprMax/model_build_run.py +++ b/gprMax/model_build_run.py @@ -50,7 +50,7 @@ from .utilities import human_size from .utilities import mem_check_all from .utilities import set_omp_threads -log = logging.getLogger(__name__) +logger = logging.getLogger(__name__) class ModelBuildRun: @@ -78,7 +78,7 @@ class ModelBuildRun: # Normal model reading/building process; bypassed if geometry information to be reused self.build_geometry() if not config.get_model_config().reuse_geometry else self.reuse_geometry() - log.info(f'\nOutput directory: {config.get_model_config().output_file_path.parent.resolve()}') + logger.info(f'\nOutput directory: {config.get_model_config().output_file_path.parent.resolve()}') # Adjust position of simple sources and receivers if required if G.srcsteps[0] != 0 or G.srcsteps[1] != 0 or G.srcsteps[2] != 0: @@ -110,9 +110,9 @@ class ModelBuildRun: # Write files for any geometry views and geometry object outputs if not (G.geometryviews or G.geometryobjectswrite) and config.sim_config.args.geometry_only: - log.warning(Fore.RED + f'\nNo geometry views or geometry objects found.' + Style.RESET_ALL) + logger.warning(Fore.RED + f'\nNo geometry views or geometry objects found.' + Style.RESET_ALL) for i, geometryview in enumerate(G.geometryviews): - log.info('') + logger.info('') geometryview.set_filename() pbar = tqdm(total=geometryview.datawritesize, unit='byte', unit_scale=True, desc=f'Writing geometry view file {i + 1}/{len(G.geometryviews)}, {geometryview.filename.name}', @@ -121,7 +121,7 @@ class ModelBuildRun: geometryview.write_vtk(G, pbar) pbar.close() for i, geometryobject in enumerate(G.geometryobjectswrite): - log.info('') + logger.info('') pbar = tqdm(total=geometryobject.datawritesize, unit='byte', unit_scale=True, desc=f'Writing geometry object file {i + 1}/{len(G.geometryobjectswrite)}, {geometryobject.filename.name}', ncols=get_terminal_width() - 1, file=sys.stdout, @@ -132,13 +132,13 @@ class ModelBuildRun: def build_geometry(self): G = self.G - log.info(config.get_model_config().inputfilestr) + logger.info(config.get_model_config().inputfilestr) scene = self.build_scene() # Print info on any subgrids for grid in G.subgrids: - log.info(grid) + logger.info(grid) # Combine available grids grids = [G] + G.subgrids @@ -154,12 +154,12 @@ class ModelBuildRun: # Check memory requirements total_mem, mem_strs = mem_check_all(grids) - log.info(f'\nMemory required: {" + ".join(mem_strs)} + ~{human_size(config.get_model_config().mem_overhead)} overhead = {human_size(total_mem)}') + logger.info(f'\nMemory required: {" + ".join(mem_strs)} + ~{human_size(config.get_model_config().mem_overhead)} overhead = {human_size(total_mem)}') # Build grids gridbuilders = [GridBuilder(grid) for grid in grids] for gb in gridbuilders: - log.info(print_pml_info(gb.grid)) + logger.info(print_pml_info(gb.grid)) if not all(value == 0 for value in gb.grid.pmlthickness.values()): gb.build_pmls() gb.build_components() @@ -175,21 +175,21 @@ class ModelBuildRun: # Check to see if numerical dispersion might be a problem results = dispersion_analysis(gb.grid) if results['error']: - log.warning(Fore.RED + f"\nNumerical dispersion analysis [{gb.grid.name}] not carried out as {results['error']}" + Style.RESET_ALL) + logger.warning(Fore.RED + f"\nNumerical dispersion analysis [{gb.grid.name}] not carried out as {results['error']}" + Style.RESET_ALL) elif results['N'] < config.get_model_config().numdispersion['mingridsampling']: raise GeneralError(f"\nNon-physical wave propagation in [{gb.grid.name}] detected. Material '{results['material'].ID}' has wavelength sampled by {results['N']} cells, less than required minimum for physical wave propagation. Maximum significant frequency estimated as {results['maxfreq']:g}Hz") elif (results['deltavp'] and np.abs(results['deltavp']) > config.get_model_config().numdispersion['maxnumericaldisp']): - log.warning(Fore.RED + f"\n[{gb.grid.name}] has potentially significant numerical dispersion. Estimated largest physical phase-velocity error is {results['deltavp']:.2f}% in material '{results['material'].ID}' whose wavelength sampled by {results['N']} cells. Maximum significant frequency estimated as {results['maxfreq']:g}Hz" + Style.RESET_ALL) + logger.warning(Fore.RED + f"\n[{gb.grid.name}] has potentially significant numerical dispersion. Estimated largest physical phase-velocity error is {results['deltavp']:.2f}% in material '{results['material'].ID}' whose wavelength sampled by {results['N']} cells. Maximum significant frequency estimated as {results['maxfreq']:g}Hz" + Style.RESET_ALL) elif results['deltavp']: - log.info(f"\nNumerical dispersion analysis [{gb.grid.name}]: estimated largest physical phase-velocity error is {results['deltavp']:.2f}% in material '{results['material'].ID}' whose wavelength sampled by {results['N']} cells. Maximum significant frequency estimated as {results['maxfreq']:g}Hz") + logger.info(f"\nNumerical dispersion analysis [{gb.grid.name}]: estimated largest physical phase-velocity error is {results['deltavp']:.2f}% in material '{results['material'].ID}' whose wavelength sampled by {results['N']} cells. Maximum significant frequency estimated as {results['maxfreq']:g}Hz") def reuse_geometry(self): # Reset iteration number self.G.iteration = 0 s = f'\n--- Model {config.get_model_config().appendmodelnumber}/{config.sim_config.model_end}, input file (not re-processed, i.e. geometry fixed): {config.sim_config.input_file_path}' config.get_model_config().inputfilestr = Fore.GREEN + f"{s} {'-' * (get_terminal_width() - 1 - len(s))}\n" + Style.RESET_ALL - log.info(config.get_model_config().inputfilestr) + logger.info(config.get_model_config().inputfilestr) for grid in [self.G] + self.G.subgrids: grid.reset_fields() @@ -223,7 +223,7 @@ class ModelBuildRun: snapshotdir = config.get_model_config().snapshot_file_path snapshotdir.mkdir(exist_ok=True) - log.info('') + logger.info('') for i, snap in enumerate(self.G.snapshots): fn = snapshotdir / Path(snap.filename) snap.filename = fn.with_suffix('.vti') @@ -231,7 +231,7 @@ class ModelBuildRun: unit_scale=True, desc=f'Writing snapshot file {i + 1} of {len(self.G.snapshots)}, {snap.filename.name}', ncols=get_terminal_width() - 1, file=sys.stdout, disable=not config.sim_config.general['progressbars']) snap.write_vtk_imagedata(pbar, self.G) pbar.close() - log.info('') + logger.info('') def print_resource_info(self, tsolve, memsolve): """Print resource information on runtime and memory usage. @@ -245,8 +245,8 @@ class ModelBuildRun: if config.sim_config.general['cuda']: mem_str = f' host + ~{human_size(memsolve)} GPU' - log.info(f'\nMemory used: ~{human_size(self.p.memory_full_info().uss)}{mem_str}') - log.info(f'Solving time [HH:MM:SS]: {datetime.timedelta(seconds=tsolve)}') + logger.info(f'\nMemory used: ~{human_size(self.p.memory_full_info().uss)}{mem_str}') + logger.info(f'Solving time [HH:MM:SS]: {datetime.timedelta(seconds=tsolve)}') def solve(self, solver): """Solve using FDTD method. @@ -260,15 +260,15 @@ class ModelBuildRun: # Check number of OpenMP threads if config.sim_config.general['cpu']: - log.info(f'CPU (OpenMP) threads for solving: {config.get_model_config().ompthreads}\n') + logger.info(f'CPU (OpenMP) threads for solving: {config.get_model_config().ompthreads}\n') if config.get_model_config().ompthreads > config.sim_config.hostinfo['physicalcores']: - log.warning(Fore.RED + f"You have specified more threads ({config.get_model_config().ompthreads}) than available physical CPU cores ({config.sim_config.hostinfo['physicalcores']}). This may lead to degraded performance." + Style.RESET_ALL) + logger.warning(Fore.RED + f"You have specified more threads ({config.get_model_config().ompthreads}) than available physical CPU cores ({config.sim_config.hostinfo['physicalcores']}). This may lead to degraded performance." + Style.RESET_ALL) # Print information about any GPU in use elif config.sim_config.general['cuda']: - log.info(f"GPU for solving: {config.get_model_config().cuda['gpu'].deviceID} - {config.get_model_config().cuda['gpu'].name}\n") + logger.info(f"GPU for solving: {config.get_model_config().cuda['gpu'].deviceID} - {config.get_model_config().cuda['gpu'].name}\n") # Prepare iterator - if config.sim_config.is_messages(): + if config.sim_config.general['progressbars']: iterator = tqdm(range(self.G.iterations), desc=f'Running model {config.model_num + 1}/{config.sim_config.model_end}', ncols=get_terminal_width() - 1, file=sys.stdout, disable=not config.sim_config.general['progressbars']) else: iterator = range(self.G.iterations) @@ -303,7 +303,7 @@ class GridBuilder: def build_components(self): # Build the model, i.e. set the material properties (ID) for every edge # of every Yee cell - log.info('') + logger.info('') pbar = tqdm(total=2, desc=f'Building Yee cells [{self.grid.name}]', ncols=get_terminal_width() - 1, file=sys.stdout, disable=not config.sim_config.general['progressbars']) @@ -335,5 +335,5 @@ class GridBuilder: materialstable.outer_border = False materialstable.justify_columns[0] = 'right' - log.info(f'\nMaterials [{self.grid.name}]:') - log.info(materialstable.table) + logger.info(f'\nMaterials [{self.grid.name}]:') + logger.info(materialstable.table) diff --git a/gprMax/mpi.py b/gprMax/mpi.py new file mode 100644 index 00000000..bb595837 --- /dev/null +++ b/gprMax/mpi.py @@ -0,0 +1,463 @@ +# Copyright (C) 2015-2020: The University of Edinburgh +# Authors: Tobias Schruff +# +# This file is part of gprMax. +# +# gprMax is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# gprMax is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with gprMax. If not, see . + +from enum import IntEnum +import logging +import time + +from mpi4py import MPI + +_log = logging.getLogger(__name__) + + +""" +MPI communication tags. +READY + Send by worker to master to signal that it is ready to receive a new job. +START + Send by master to worker together with a job dict to initiate work. +DONE + Send by worker to master together with the results of the current job. +EXIT + Send by master to worker to initiate worker shutdown and then + send back to master to signal shutdown has completed. +""" +Tags = IntEnum('Tags', 'READY START DONE EXIT') + + +class MPIExecutor(object): + """A generic parallel executor based on MPI. + This executor can be used to run generic jobs on multiple + processes based on a master/worker pattern with MPI being used for + communication between the master and the workers. + Examples + -------- + A basic example of how to use the `MPIExecutor` to run + `gprMax` models in parallel is given below. + >>> from mpi4py import MPI + >>> from gprMax.mpi import MPIExecutor + >>> from gprMax.model_build_run import run_model + >>> # choose an MPI.Intracomm for communication (MPI.COMM_WORLD by default) + >>> comm = MPI.COMM_WORLD + >>> # choose a target function + >>> func = run_model + >>> # define job parameters + >>> inputfile = 'some_input_file.in' + >>> n_traces = 10 + >>> jobs = [] + >>> # create jobs + >>> for i in range(n_traces): + >>> jobs.append({ + >>> 'inputfile': inputfile, + >>> 'currentmodelrun': i + 1, + >>> 'modelend': n_traces, + >>> 'numbermodelruns': n_traces + >>> }) + >>> gpr = MPIExecutor(func, comm=comm) + >>> # send the workers to their work loop + >>> gpr.start() + >>> if gpr.is_master(): + >>> results = gpr.submit(jobs) + >>> print('Results:', results) + >>> # make the workers exit their work loop + >>> # and join the main loop again + >>> gpr.join() + A slightly more concise way is to use the context manager + interface of `MPIExecutor` that automatically takes care + of calling `start()` and `join()` at the beginning and end + of the execution, respectively. + >>> with MPIExecutor(func, comm=comm) as executor: + >>> # executor will be None on all ranks except for the master + >>> if executor is not None: + >>> results = executor.submit(jobs) + >>> print('Results:', results) + Limitations + ----------- + Because some popular MPI implementations (especially on HPC machines) do not + support concurrent MPI calls from multiple threads yet, the `MPIExecutor` does + not use a separate thread in the master to do the communication between the + master and the workers. Hence, the lowest thread level of MPI_THREAD_SINGLE + (no multi-threading) is enough. + However, this imposes some slight limitations on the usage since it is not + possible to interact with the workers during a call to `submit()` until + `submit()` returns. + In particular, it is not possible to handle exceptions that occur on workers + in the main loop. Instead all exceptions that occur on workers are caught and + logged and the worker returns None instead of the actual result of the worker + function. A second limitation is that it is not possible to terminate workers. + If you need an MPI executor that supports custom exception handling, you should + use a multi-threading implementation such as the `MPICommExecutor` in + `mpi4py.futures`. Below is a brief example of how to use it with the example + given above. + >>> from mpi4py.futures import MPICommExecutor + >>> from concurrent.futures import as_completed + >>> # define comm, func, and jobs like above + >>> with MPICommExecutor(comm, root=0) as executor: + >>> if executor is not None: + >>> futures = [executor.submit(func, **job) for job in jobs] + >>> for future in as_completed(futures): + >>> try: + >>> print(future.result()) + >>> except Exception as e: + >>> # custom exception handling for exceptions + >>> # raised in the worker + >>> print(e) + >>> comm.Abort() + """ + + def __init__(self, func, master=0, comm=None): + """Initializes a new executor instance. + Parameters + ---------- + func: callable + The worker function. Jobs will be passed as keyword arguments, + so `func` must support this. This is usually the case, but + can be a problem when builtin functions are used, e.g. `abs()`. + master: int + The rank of the master. Must be in `comm`. All other + ranks in `comm` will be treated as workers. + comm: MPI.Intracomm + The MPI communicator used for communication between the + master and workers. + """ + if comm is None: + self.comm = MPI.COMM_WORLD + elif not comm.Is_intra(): + raise TypeError('MPI.Intracomm expected') + else: + self.comm = comm + + self.rank = self.comm.rank + self.size = self.comm.size + if self.size < 2: + raise RuntimeError('MPIExecutor must run with at least 2 processes') + + self._up = False + + master = int(master) + if master < 0: + raise ValueError('master rank must be non-negative') + elif master >= self.size: + raise ValueError('master not in comm') + else: + self.master = master + + # the worker ranks + self.workers = tuple(set(range(self.size)) - {self.master}) + # the worker function + if not callable(func): + raise TypeError('func must be a callable') + self.func = func + # holds the state of workers on the master + self.busy = [False] * len(self.workers) + + _log.debug(f'MPIExecutor on comm: {self.comm.name}, Master: {self.master}, Workers: {self.workers}') + if self.is_master(): + _log.debug('*** MASTER ***') + else: + _log.debug('*** WORKER ***') + + def __enter__(self): + """Context manager enter. + Only the master returns an executor, + all other ranks return None. + """ + self.start() + if self.is_master(): + return self + return None + + def __exit__(self, exc_type, exc_val, exc_tb): + """Context manager exit. + """ + if exc_type is not None: + _log.exception(exc_val) + return False + + # no exception handling necessary + # since we catch everything in __guarded_work + # exc_type should always be None + self.join() + return True + + def is_idle(self): + """Returns a bool indicating whether the executor is idle. + The executor is considered to be not idle if *any* worker + process is busy with a job. That means, it is idle only + if *all* workers are idle. + Note: This member must not be called on a worker. + """ + assert self.is_master() + return not any(self.busy) + + def is_master(self): + """Returns a bool indicating whether `self` is the master. + """ + return self.rank == self.master + + def is_worker(self): + """Returns a bool indicating whether `self` is a worker. + """ + return not self.is_master() + + def start(self): + """Starts up workers. + A check is performed on the master whether the executor + has already been terminated, in which case a RuntimeError + is raised on the master. + """ + if self.is_master(): + if self._up: + raise RuntimeError('start has already been called') + self._up = True + + _log.info('Starting up MPIExecutor master/workers') + if self.is_worker(): + self.__wait() + + def join(self): + """Joins the workers. + """ + if self.is_master(): + + _log.debug('Terminating. Sending sentinel to all workers.') + # send sentinel to all workers + for worker in self.workers: + self.comm.send(None, dest=worker, tag=Tags.EXIT) + + _log.debug('Waiting for all workers to terminate.') + + down = [False] * len(self.workers) + while True: + for i, worker in enumerate(self.workers): + if self.comm.Iprobe(source=worker, tag=Tags.EXIT): + self.comm.recv(source=worker, tag=Tags.EXIT) + down[i] = True + if all(down): + break + + self._up = False + _log.debug('All workers terminated.') + + def submit(self, jobs, sleep=0.0): + """Submits a list of jobs to the workers and returns the results. + Parameters + ---------- + jobs: list + A list of keyword argument dicts. Each dict describes + a job and will be unpacked and supplied to the work function. + sleep: float + The number of seconds the master will sleep for when trying + to find an idle worker. The default value is 0.0, which means + the master will not sleep at all. + Returns + ------- + results: list + A list of results, i.e. the return values of the work function, + received from the workers. The order of results is identical to + the order of `jobs`. + """ + if not self._up: + raise RuntimeError('cannot run jobs without a call to start()') + + _log.info('Running {:d} jobs.'.format(len(jobs))) + assert self.is_master(), 'run() must not be called on a worker process' + + my_jobs = jobs.copy() + num_jobs = len(my_jobs) + results = [None] * num_jobs + while len(my_jobs) or not self.is_idle(): + + for i, worker in enumerate(self.workers): + + if self.comm.Iprobe(source=worker, tag=Tags.DONE): + job_idx, result = self.comm.recv(source=worker, tag=Tags.DONE) + _log.debug(f'Received finished job {job_idx} from worker {worker:d}.') + results[job_idx] = result + self.busy[i] = False + elif self.comm.Iprobe(source=worker, tag=Tags.READY): + if len(my_jobs): + self.comm.recv(source=worker, tag=Tags.READY) + self.busy[i] = True + job_idx = num_jobs - len(my_jobs) + _log.debug(f'Sending job {job_idx} to worker {worker:d}.') + self.comm.send((job_idx, my_jobs.pop(0)), dest=worker, tag=Tags.START) + elif self.comm.Iprobe(source=worker, tag=Tags.EXIT): + _log.debug(f'Worker on rank {worker:d} has terminated.') + self.comm.recv(source=worker, tag=Tags.EXIT) + self.busy[i] = False + + time.sleep(sleep) + + _log.info('Finished all jobs.') + return results + + def __wait(self): + """The worker main loop. + The worker will enter the loop after `start()` has been called + and stay here until it receives the sentinel, e.g. by calling + `join()` on the master. In the mean time, the worker is + accepting work. + """ + assert self.is_worker() + + status = MPI.Status() + + _log.debug(f'Starting up worker.') + + while True: + + self.comm.send(None, dest=self.master, tag=Tags.READY) + _log.debug(f'Worker on rank {self.rank} waiting for job.') + + data = self.comm.recv(source=self.master, tag=MPI.ANY_TAG, status=status) + tag = status.tag + + if tag == Tags.START: + job_idx, work = data + _log.debug(f'Received job {job_idx} (work={work}).') + result = self.__guarded_work(work) + _log.debug(f'Finished job. Sending results to master.') + self.comm.send((job_idx, result), dest=self.master, tag=Tags.DONE) + elif tag == Tags.EXIT: + _log.debug(f'Received sentinel from master.') + break + + _log.debug('Terminating worker.') + self.comm.send(None, dest=self.master, tag=Tags.EXIT) + + def __guarded_work(self, work): + """Executes work safely on the workers. + Parameters + ---------- + work: dict + Keyword arguments that are unpacked and given to the + work function. + Notes + ----- + All exceptions that occur in the work function `func` are caught + and logged. The worker returns `None` to the master in that case + instead of the actual result. + """ + assert self.is_worker() + try: + return self.func(**work) + except Exception as e: + _log.exception(str(e)) + return None + + +# def main(args=None): +# """CLI for gprMax in MPI mode. +# Example Usage: +# mpirun -np 4 python -m mpi -n 10 my_input_file.in +# """ +# import argparse +# import os +# from gprMax.constants import c, e0, m0, z0 +# from gprMax.model_build_run import run_model +# +# # Parse command line arguments +# parser = argparse.ArgumentParser(prog='gprMax', formatter_class=argparse.ArgumentDefaultsHelpFormatter) +# parser.add_argument( +# 'inputfile', +# help='relative or absolute path to inputfile.') +# parser.add_argument( +# '-n', '--num-traces', type=int, default=1, +# help='number of model runs (traces) to create a B-scan') +# parser.add_argument( +# '--geometry-only', action='store_true', default=False, +# help='flag to only build model and produce geometry file(s)') +# parser.add_argument( +# '--geometry-fixed', action='store_true', default=False, +# help='flag to not reprocess model geometry, e.g. for B-scans where the geometry is fixed') +# parser.add_argument( +# '--write-processed', action='store_true', default=False, +# help='flag to write an input file after any Python code and include commands ' +# 'in the original input file have been processed') +# parser.add_argument( +# '-r', '--restart', type=int, default=1, +# help='model number to restart from, e.g. when creating B-scan') +# parser.add_argument( +# '-l', '--logfile', action='store_true', default=False, +# help='flag to enable writing to a log file') +# parser.add_argument( +# '-v', '--verbose', action='store_true', default=False, +# help="flag to increase output") +# parser.add_argument( +# '--gpu', type=int, action='append', nargs='*', +# help='flag to use Nvidia GPU or option to give list of device ID(s)') +# +# args = parser.parse_args(args) +# +# comm = MPI.COMM_WORLD +# rank = comm.rank +# +# # set-up logging +# logger = logging.getLogger('gprMax') +# level = logging.DEBUG if args.verbose else logging.INFO +# logger.setLevel(level) +# +# if args.logfile != "": +# mh = logging.FileHandler(f"log_{rank}.txt", mode='w') +# mh.setLevel(level) +# formatter = logging.Formatter('%(asctime)s:%(name)s:%(levelname)s: %(message)s') +# mh.setFormatter(formatter) +# logger.addHandler(mh) +# +# namespace = { +# 'c': c, +# 'e0': e0, +# 'm0': m0, +# 'z0': z0, +# 'number_model_runs': args.num_traces, +# 'inputfile': os.path.abspath(args.inputfile) +# } +# +# model_args = argparse.Namespace(**{ +# 'geometry_only': args.geometry_only, +# 'geometry_fixed': args.geometry_fixed, +# 'write_processed': args.write_processed, +# 'task': False, +# 'restart': False, +# 'gpu': args.gpu +# }) +# +# # compile jobs +# jobs = [] +# for i in range(args.num_traces): +# jobs.append({ +# 'args': model_args, +# 'inputfile': args.inputfile, +# 'currentmodelrun': i + 1, +# 'modelend': args.num_traces, +# 'numbermodelruns': args.num_traces, +# 'usernamespace': namespace.copy() +# }) +# +# # execute jobs +# logger.info(f'Starting execution of {args.num_traces} gprMax model runs.') +# with MPIExecutor(run_model, comm=comm) as gpr: +# if gpr is not None: +# results = gpr.submit(jobs) +# logger.info('Results: %s' % str(results)) +# logger.info('Finished.') +# +# +# if __name__ == '__main__': +# main() diff --git a/gprMax/scene.py b/gprMax/scene.py index 7deef38b..fc215a97 100644 --- a/gprMax/scene.py +++ b/gprMax/scene.py @@ -31,7 +31,7 @@ from .subgrids.user_objects import SubGridBase as SubGridUserBase from .user_inputs import create_user_input_points from .utilities import human_size -log = logging.getLogger(__name__) +logger = logging.getLogger(__name__) class Scene: @@ -93,7 +93,7 @@ class Scene: try: obj.create(grid, uip) except CmdInputError: - log.exception('Error creating user input object') + logger.exception('Error creating user input object') return self diff --git a/gprMax/updates.py b/gprMax/updates.py index ea469f76..2c48cfd5 100644 --- a/gprMax/updates.py +++ b/gprMax/updates.py @@ -38,8 +38,6 @@ from .sources import htod_src_arrays from .utilities import round32 from .utilities import timer -log = logging.getLogger(__name__) - class CPUUpdates: """Defines update functions for CPU-based solver.""" diff --git a/gprMax/user_inputs.py b/gprMax/user_inputs.py index fc2b169b..25778548 100644 --- a/gprMax/user_inputs.py +++ b/gprMax/user_inputs.py @@ -29,7 +29,7 @@ from .exceptions import CmdInputError from .subgrids.base import SubGridBase from .utilities import round_value -log = logging.getLogger(__name__) +logger = logging.getLogger(__name__) """Module contains classes to handle points supplied by a user. The classes implement a common interface such that geometry building objects @@ -76,7 +76,7 @@ class UserInput: s = f"'{cmd_str}' the {err.args[0]} {name}-coordinate {i * dl:g} is not within the model domain" else: s = f"'{cmd_str}' {err.args[0]}-coordinate {i * dl:g} is not within the model domain" - raise CmdInputError(log.exception(s)) + raise CmdInputError(logger.exception(s)) def discretise_point(self, p): """Function to get the index of a continuous point with the grid.""" @@ -104,7 +104,7 @@ class MainGridUserInput(UserInput): p = self.check_point(p, cmd_str, name) if self.grid.within_pml(p): - log.warning(Fore.RED + f"'{cmd_str}' sources and receivers should not normally be positioned within the PML." + Style.RESET_ALL) + logger.warning(Fore.RED + f"'{cmd_str}' sources and receivers should not normally be positioned within the PML." + Style.RESET_ALL) return p @@ -113,7 +113,7 @@ class MainGridUserInput(UserInput): p2 = self.check_point(p2, cmd_str, name='upper') if np.greater(p1, p2).any(): - raise CmdInputError(log.exception(f"'{cmd_str}' the lower coordinates should be less than the upper coordinates.")) + raise CmdInputError(logger.exception(f"'{cmd_str}' the lower coordinates should be less than the upper coordinates.")) return p1, p2 @@ -175,5 +175,5 @@ class SubgridUserInput(MainGridUserInput): # the OS non-working region. if (np.less(p_t, self.inner_bound).any() or np.greater(p_t, self.outer_bound).any()): - log.warning(Fore.RED + f"'{cmd_str}' this object traverses the Outer Surface. This is an advanced feature." + Style.RESET_ALL) + logger.warning(Fore.RED + f"'{cmd_str}' this object traverses the Outer Surface. This is an advanced feature." + Style.RESET_ALL) return p_t diff --git a/gprMax/utilities.py b/gprMax/utilities.py index 62981732..fe529645 100644 --- a/gprMax/utilities.py +++ b/gprMax/utilities.py @@ -44,7 +44,35 @@ import numpy as np import gprMax.config as config from .exceptions import GeneralError -log = logging.getLogger(__name__) +logger = logging.getLogger(__name__) + + +def setup_logging(level=logging.INFO, logfile=False): + """Setup and configure logging. + + Args: + level (logging level): set logging level to stdout. + logfile (bool): additional logging to file. + """ + + # Get root logger + logger = logging.getLogger() + logger.setLevel(level) + + # Logging to console + mh = logging.StreamHandler() + formatter = logging.Formatter('%(message)s') + mh.setLevel(level) + mh.setFormatter(formatter) + logger.addHandler(mh) + + # Logging to file + if logfile: + mh = logging.FileHandler("log_gprMax.txt", mode='w') + formatter = logging.Formatter('%(asctime)s:%(name)s:%(levelname)s: %(message)s') + mh.setLevel(logging.DEBUG) + mh.setFormatter(formatter) + logger.addHandler(mh) def get_terminal_width(): @@ -83,39 +111,14 @@ def logo(version): |___/|_| v""" + version - log.info(f"{description} {'=' * (get_terminal_width() - len(description) - 1)}\n") - log.info(Fore.CYAN + f'{logo}\n') - log.info(Style.RESET_ALL + textwrap.fill(copyright, width=get_terminal_width() - 1, initial_indent=' ')) - log.info(textwrap.fill(authors, width=get_terminal_width() - 1, initial_indent=' ')) - log.info('') - log.info(textwrap.fill(licenseinfo1, width=get_terminal_width() - 1, initial_indent=' ', subsequent_indent=' ')) - log.info(textwrap.fill(licenseinfo2, width=get_terminal_width() - 1, initial_indent=' ', subsequent_indent=' ')) - log.info(textwrap.fill(licenseinfo3, width=get_terminal_width() - 1, initial_indent=' ', subsequent_indent=' ')) - - -@contextmanager -def open_path_file(path_or_file): - """Accepts either a path as a string or a file object and returns a file - object (http://stackoverflow.com/a/6783680). - - Args: - path_or_file: path as a string or a file object. - - Returns: - f (object): File object. - """ - - if isinstance(path_or_file, str): - f = file_to_close = codecs.open(path_or_file, 'r', encoding='utf-8') - else: - f = path_or_file - file_to_close = None - - try: - yield f - finally: - if file_to_close: - file_to_close.close() + logger.info(f"{description} {'=' * (get_terminal_width() - len(description) - 1)}\n") + logger.info(Fore.CYAN + f'{logo}\n') + logger.info(Style.RESET_ALL + textwrap.fill(copyright, width=get_terminal_width() - 1, initial_indent=' ')) + logger.info(textwrap.fill(authors, width=get_terminal_width() - 1, initial_indent=' ')) + logger.info('') + logger.info(textwrap.fill(licenseinfo1, width=get_terminal_width() - 1, initial_indent=' ', subsequent_indent=' ')) + logger.info(textwrap.fill(licenseinfo2, width=get_terminal_width() - 1, initial_indent=' ', subsequent_indent=' ')) + logger.info(textwrap.fill(licenseinfo3, width=get_terminal_width() - 1, initial_indent=' ', subsequent_indent=' ')) def round_value(value, decimalplaces=0): @@ -518,10 +521,6 @@ def detect_check_gpus(deviceIDs): else: deviceIDsavail = range(drv.Device.count()) - # If no device ID is given use default of 0 - if not deviceIDs: - deviceIDs = [0] - # Check if requested device ID(s) exist for ID in deviceIDs: if ID not in deviceIDsavail: @@ -532,13 +531,12 @@ def detect_check_gpus(deviceIDs): for ID in deviceIDsavail: gpu = GPU(deviceID=ID) gpu.get_gpu_info(drv) - if ID in deviceIDs: - gpus.append(gpu) + gpus.append(gpu) return gpus def timer(): """Function to return time in fractional seconds.""" - log.debug('Review "thread_time" not currently available in macOS and bug (https://bugs.python.org/issue36205) with "process_time"') + logger.debug('"thread_time" not currently available in macOS and bug (https://bugs.python.org/issue36205) with "process_time"') return timer_fn() diff --git a/gprMax/waveforms.py b/gprMax/waveforms.py index 5076d31f..0bbcbf88 100644 --- a/gprMax/waveforms.py +++ b/gprMax/waveforms.py @@ -16,12 +16,8 @@ # You should have received a copy of the GNU General Public License # along with gprMax. If not, see . -import logging - import numpy as np -log = logging.getLogger(__name__) - class Waveform: """Definitions of waveform shapes that can be used with sources."""