diff --git a/gprMax/config.py b/gprMax/config.py index a406e81a..ab1b45e3 100644 --- a/gprMax/config.py +++ b/gprMax/config.py @@ -20,7 +20,7 @@ import logging import sys import warnings from pathlib import Path -from typing import List, Union +from typing import List, Optional, Union import cython import numpy as np @@ -42,10 +42,11 @@ class ModelConfig: N.B. Multiple models can exist within a simulation """ - def __init__(self): + def __init__(self, model_num): self.mode = "3D" self.grids = [] self.ompthreads = None + self.model_num = model_num # Store information for CUDA or OpenCL solver # dev: compute device object. @@ -70,7 +71,11 @@ class ModelConfig: except: deviceID = 0 - self.device = {"dev": sim_config.set_model_device(deviceID), "deviceID": deviceID, "snapsgpu2cpu": False} + self.device = { + "dev": sim_config.get_model_device(deviceID), + "deviceID": deviceID, + "snapsgpu2cpu": False, + } # Total memory usage for all grids in the model. Starts with 50MB overhead. self.mem_overhead = 65e6 @@ -79,11 +84,18 @@ class ModelConfig: self.reuse_geometry = False # String to print at start of each model run - s = f"\n--- Model {model_num + 1}/{sim_config.model_end}, " f"input file: {sim_config.input_file_path}" - self.inputfilestr = Fore.GREEN + f"{s} {'-' * (get_terminal_width() - 1 - len(s))}\n" + Style.RESET_ALL + s = ( + f"\n--- Model {model_num + 1}/{sim_config.model_end}, " + f"input file: {sim_config.input_file_path}" + ) + self.inputfilestr = ( + Fore.GREEN + f"{s} {'-' * (get_terminal_width() - 1 - len(s))}\n" + Style.RESET_ALL + ) # Output file path and name for specific model - self.appendmodelnumber = "" if sim_config.args.n == 1 else str(model_num + 1) # Indexed from 1 + self.appendmodelnumber = ( + "" if sim_config.args.n == 1 else str(model_num + 1) + ) # Indexed from 1 self.set_output_file_path() # Numerical dispersion analysis parameters @@ -111,9 +123,12 @@ class ModelConfig: "crealfunc": None, } + def reuse_geometry(self): + return self.model_num != 0 and sim_config.args.geometry_fixed + def get_scene(self): try: - return sim_config.scenes[model_num] + return sim_config.scenes[self.model_num] except: return None @@ -121,7 +136,7 @@ class ModelConfig: """Namespace only used with #python blocks which are deprecated.""" tmp = { "number_model_runs": sim_config.model_end, - "current_model_run": model_num + 1, + "current_model_run": self.model_num + 1, "inputfile": sim_config.input_file_path.resolve(), } return dict(**sim_config.em_consts, **tmp) @@ -177,6 +192,14 @@ class SimulationConfig: N.B. A simulation can consist of multiple models. """ + # TODO: Make this an enum + em_consts = { + "c": c, # Speed of light in free space (m/s) + "e0": e0, # Permittivity of free space (F/m) + "m0": m0, # Permeability of free space (H/m) + "z0": np.sqrt(m0 / e0), # Impedance of free space (Ohms) + } + def __init__(self, args): """ Args: @@ -185,14 +208,38 @@ class SimulationConfig: self.args = args - if self.args.taskfarm and self.args.geometry_fixed: + self.geometry_fixed: bool = args.geometry_fixed + self.geometry_only: bool = args.geometry_only + self.gpu: Union[List[str], bool] = args.gpu + self.mpi: List[int] = args.mpi + self.number_of_models: int = args.n + self.opencl: Union[List[str], bool] = args.opencl + self.output_file_path: str = args.outputfile + self.taskfarm: bool = args.taskfarm + self.write_processed_input_file: bool = ( + args.write_processed + ) # For depreciated Python blocks + + if self.taskfarm and self.geometry_fixed: logger.exception("The geometry fixed option cannot be used with MPI taskfarm.") raise ValueError - if self.args.gpu and self.args.opencl: + if self.gpu and self.opencl: logger.exception("You cannot use both CUDA and OpenCl simultaneously.") raise ValueError + if self.mpi and self.args.subgrid: + logger.exception("You cannot use subgrids with MPI.") + raise ValueError + + # Each model in a simulation is given a unique number when the instance of ModelConfig is created + self.current_model = 0 + + # Instances of ModelConfig that hold model configuration parameters. + # TODO: Consider if this would be better as a dictionary. + # Or maybe a non fixed length list (i.e. append each config) + self.model_configs: List[Optional[ModelConfig]] = [None] * self.number_of_models + # General settings for the simulation # solver: cpu, cuda, opencl. # precision: data type for electromagnetic field output (single/double). @@ -200,25 +247,25 @@ class SimulationConfig: # progressbars when logging level is greater than # info (20) - self.general = {"solver": "cpu", "precision": "single", "progressbars": args.log_level <= 20} - - self.em_consts = { - "c": c, # Speed of light in free space (m/s) - "e0": e0, # Permittivity of free space (F/m) - "m0": m0, # Permeability of free space (H/m) - "z0": np.sqrt(m0 / e0), # Impedance of free space (Ohms) + self.general = { + "solver": "cpu", + "precision": "single", + "progressbars": args.log_level <= 20, } # Store information about host machine self.hostinfo = get_host_info() # CUDA - if self.args.gpu is not None: + if self.gpu is not None: self.general["solver"] = "cuda" # Both single and double precision are possible on GPUs, but single # provides best performance. self.general["precision"] = "single" - self.devices = {"devs": [], "nvcc_opts": None} # pycuda device objects; nvcc compiler options + self.devices = { + "devs": [], + "nvcc_opts": None, + } # pycuda device objects; nvcc compiler options # Suppress nvcc warnings on Microsoft Windows if sys.platform == "win32": self.devices["nvcc_opts"] = ["-w"] @@ -227,10 +274,13 @@ class SimulationConfig: self.devices["devs"] = detect_cuda_gpus() # OpenCL - if self.args.opencl is not None: + if self.opencl is not None: self.general["solver"] = "opencl" self.general["precision"] = "single" - self.devices = {"devs": [], "compiler_opts": None} # pyopencl device device(s); compiler options + self.devices = { + "devs": [], + "compiler_opts": None, + } # pyopencl device device(s); compiler options # Suppress CompilerWarning (sub-class of UserWarning) warnings.filterwarnings("ignore", category=UserWarning) @@ -250,12 +300,16 @@ class SimulationConfig: self.general["subgrid"] and self.general["solver"] == "opencl" ): logger.exception( - "You cannot currently use CUDA or OpenCL-based " "solvers with models that contain sub-grids." + "You cannot currently use CUDA or OpenCL-based solvers with models that contain sub-grids." ) raise ValueError else: self.general["subgrid"] = False + self.autotranslate_subgrid_coordinates = True + if hasattr(self.args, "autotranslate"): + self.autotranslate_subgrid_coordinates: bool = args.autotranslate + # Scenes parameter may not exist if user enters via CLI try: self.scenes = args.scenes if args.scenes is not None else [] @@ -267,26 +321,6 @@ class SimulationConfig: self._set_input_file_path() self._set_model_start_end() - def set_model_device(self, deviceID): - """Specify pycuda/pyopencl object for model. - - Args: - deviceID: int of requested deviceID of compute device. - - Returns: - dev: requested pycuda/pyopencl device object. - """ - - found = False - for ID, dev in self.devices["devs"].items(): - if ID == deviceID: - found = True - return dev - - if not found: - logger.exception(f"Compute device with device ID {deviceID} does " "not exist.") - raise ValueError - def _set_precision(self): """Data type (precision) for electromagnetic field output. @@ -325,6 +359,15 @@ class SimulationConfig: elif self.general["solver"] == "opencl": self.dtypes["C_complex"] = "cdouble" + def _set_input_file_path(self): + """Sets input file path for CLI or API.""" + # API + if self.args.inputfile is None: + self.input_file_path = Path(self.args.outputfile) + # API/CLI + else: + self.input_file_path = Path(self.args.inputfile) + def _set_model_start_end(self): """Sets range for number of models to run (internally 0 index).""" if self.args.i: @@ -337,30 +380,69 @@ class SimulationConfig: self.model_start = modelstart self.model_end = modelend - def _set_input_file_path(self): - """Sets input file path for CLI or API.""" - # API - if self.args.inputfile is None: - self.input_file_path = Path(self.args.outputfile) - # API/CLI - else: - self.input_file_path = Path(self.args.inputfile) + def get_model_device(self, deviceID): + """Specify pycuda/pyopencl object for model. + + Args: + deviceID: int of requested deviceID of compute device. + + Returns: + dev: requested pycuda/pyopencl device object. + """ + + found = False + for ID, dev in self.devices["devs"].items(): + if ID == deviceID: + found = True + return dev + + if not found: + logger.exception(f"Compute device with device ID {deviceID} does " "not exist.") + raise ValueError + + def get_model_config(self, model_num: Optional[int] = None) -> ModelConfig: + """Return ModelConfig instance for specific model. + + Args: + model_num: number of the model. If None, returns the config for the current model + + Returns: + model_config: requested model config + """ + if model_num is None: + model_num = self.current_model + + model_config = self.model_configs[model_num] + if model_config is None: + logger.exception(f"Cannot get ModelConfig for model {model_num}. It has not been set.") + raise ValueError + + return model_config + + def set_model_config(self, model_config: ModelConfig, model_num: Optional[int] = None) -> None: + """Set ModelConfig instace for specific model. + + Args: + model_num: number of the model. If None, sets the config for the current model + """ + if model_num is None: + model_num = self.current_model + + self.model_configs[model_num] = model_config + + def set_current_model(self, model_num: int) -> None: + """Set the current model by it's unique identifier + + Args: + model_num: unique identifier for the current model + """ + self.current_model = model_num # Single instance of SimConfig to hold simulation configuration parameters. sim_config: SimulationConfig = None -# Instances of ModelConfig that hold model configuration parameters. -model_configs: Union[ModelConfig, List[ModelConfig]] = [] - -# Each model in a simulation is given a unique number when the instance of -# ModelConfig is created -model_num: int = 0 - def get_model_config() -> ModelConfig: - """Return ModelConfig instace for specific model.""" - if isinstance(model_configs, ModelConfig): - return model_configs - else: - return model_configs[model_num] + """Return ModelConfig instance for specific model.""" + return sim_config.get_model_config() diff --git a/gprMax/contexts.py b/gprMax/contexts.py index 24b5f21a..2dfabec9 100644 --- a/gprMax/contexts.py +++ b/gprMax/contexts.py @@ -28,6 +28,7 @@ from colorama import Fore, Style, init init() import gprMax.config as config +from gprMax.config import ModelConfig from ._version import __version__, codename from .model_build_run import ModelBuildRun @@ -81,10 +82,6 @@ class Context: self._start_simulation() - # Clear list of model configs. It can be retained when gprMax is - # called in a loop, and want to avoid this. - config.model_configs = [] - for i in self.model_range: self._run_model(i) @@ -99,8 +96,9 @@ class Context: model_num: index of model to be run """ - config.model_num = model_num - self._set_model_config() + config.sim_config.set_current_model(model_num) + model_config = self._create_model_config(model_num) + config.sim_config.set_model_config(model_config) # Always create a grid for the first model. The next model to run # only gets a new grid if the geometry is not re-used. @@ -124,10 +122,9 @@ class Context: gc.collect() - def _set_model_config(self) -> None: + def _create_model_config(self, model_num: int) -> ModelConfig: """Create model config and save to global config.""" - model_config = config.ModelConfig() - config.model_configs.append(model_config) + return ModelConfig(model_num) def print_logo_copyright(self) -> None: """Prints gprMax logo, version, and copyright/licencing information.""" @@ -193,12 +190,12 @@ class TaskfarmContext(Context): self.rank = self.comm.rank self.TaskfarmExecutor = TaskfarmExecutor - def _set_model_config(self) -> None: + def _create_model_config(self, model_num: int) -> ModelConfig: """Create model config and save to global config. Set device in model config according to MPI rank. """ - model_config = config.ModelConfig() + model_config = super()._create_model_config(model_num) # Set GPU deviceID according to worker rank if config.sim_config.general["solver"] == "cuda": model_config.device = { @@ -206,7 +203,7 @@ class TaskfarmContext(Context): "deviceID": self.rank - 1, "snapsgpu2cpu": False, } - config.model_configs = model_config + return model_config def _run_model(self, **work) -> None: """Process for running a single model. @@ -261,4 +258,4 @@ class TaskfarmContext(Context): if executor.is_master(): self._end_simulation() - return results + return results diff --git a/gprMax/model_build_run.py b/gprMax/model_build_run.py index e20164f9..058227a0 100644 --- a/gprMax/model_build_run.py +++ b/gprMax/model_build_run.py @@ -73,12 +73,15 @@ class ModelBuildRun: # Normal model reading/building process; bypassed if geometry information to be reused self.reuse_geometry() if config.get_model_config().reuse_geometry else self.build_geometry() - logger.info(f"\nOutput directory: {config.get_model_config().output_file_path.parent.resolve()}") + logger.info( + f"\nOutput directory: {config.get_model_config().output_file_path.parent.resolve()}" + ) # Adjust position of simple sources and receivers if required if G.srcsteps[0] != 0 or G.srcsteps[1] != 0 or G.srcsteps[2] != 0: + model_num = config.sim_config.current_model for source in itertools.chain(G.hertziandipoles, G.magneticdipoles): - if config.model_num == 0: + if model_num == 0: if ( source.xcoord + G.srcsteps[0] * config.sim_config.model_end < 0 or source.xcoord + G.srcsteps[0] * config.sim_config.model_end > G.nx @@ -87,14 +90,17 @@ class ModelBuildRun: or source.zcoord + G.srcsteps[2] * config.sim_config.model_end < 0 or source.zcoord + G.srcsteps[2] * config.sim_config.model_end > G.nz ): - logger.exception("Source(s) will be stepped to a position outside the domain.") + logger.exception( + "Source(s) will be stepped to a position outside the domain." + ) raise ValueError - source.xcoord = source.xcoordorigin + config.model_num * G.srcsteps[0] - source.ycoord = source.ycoordorigin + config.model_num * G.srcsteps[1] - source.zcoord = source.zcoordorigin + config.model_num * G.srcsteps[2] + source.xcoord = source.xcoordorigin + model_num * G.srcsteps[0] + source.ycoord = source.ycoordorigin + model_num * G.srcsteps[1] + source.zcoord = source.zcoordorigin + model_num * G.srcsteps[2] if G.rxsteps[0] != 0 or G.rxsteps[1] != 0 or G.rxsteps[2] != 0: + model_num = config.sim_config.current_model for receiver in G.rxs: - if config.model_num == 0: + if model_num == 0: if ( receiver.xcoord + G.rxsteps[0] * config.sim_config.model_end < 0 or receiver.xcoord + G.rxsteps[0] * config.sim_config.model_end > G.nx @@ -103,11 +109,13 @@ class ModelBuildRun: or receiver.zcoord + G.rxsteps[2] * config.sim_config.model_end < 0 or receiver.zcoord + G.rxsteps[2] * config.sim_config.model_end > G.nz ): - logger.exception("Receiver(s) will be stepped to a position outside the domain.") + logger.exception( + "Receiver(s) will be stepped to a position outside the domain." + ) raise ValueError - receiver.xcoord = receiver.xcoordorigin + config.model_num * G.rxsteps[0] - receiver.ycoord = receiver.ycoordorigin + config.model_num * G.rxsteps[1] - receiver.zcoord = receiver.zcoordorigin + config.model_num * G.rxsteps[2] + receiver.xcoord = receiver.xcoordorigin + model_num * G.rxsteps[0] + receiver.ycoord = receiver.ycoordorigin + model_num * G.rxsteps[1] + receiver.zcoord = receiver.zcoordorigin + model_num * G.rxsteps[2] # Write files for any geometry views and geometry object outputs gvs = G.geometryviews + [gv for sg in G.subgrids for gv in sg.geometryviews] @@ -204,7 +212,8 @@ class ModelBuildRun: results = dispersion_analysis(gb.grid) if results["error"]: logger.warning( - f"\nNumerical dispersion analysis [{gb.grid.name}] " f"not carried out as {results['error']}" + f"\nNumerical dispersion analysis [{gb.grid.name}] " + f"not carried out as {results['error']}" ) elif results["N"] < config.get_model_config().numdispersion["mingridsampling"]: logger.exception( @@ -218,7 +227,8 @@ class ModelBuildRun: raise ValueError elif ( results["deltavp"] - and np.abs(results["deltavp"]) > config.get_model_config().numdispersion["maxnumericaldisp"] + and np.abs(results["deltavp"]) + > config.get_model_config().numdispersion["maxnumericaldisp"] ): logger.warning( f"\n[{gb.grid.name}] has potentially significant " @@ -295,7 +305,7 @@ class ModelBuildRun: # Print information about and check OpenMP threads if config.sim_config.general["solver"] == "cpu": logger.basic( - f"\nModel {config.model_num + 1}/{config.sim_config.model_end} " + f"\nModel {config.sim_config.current_model + 1}/{config.sim_config.model_end} " f"on {config.sim_config.hostinfo['hostname']} " f"with OpenMP backend using {config.get_model_config().ompthreads} thread(s)" ) @@ -308,7 +318,10 @@ class ModelBuildRun: elif config.sim_config.general["solver"] in ["cuda", "opencl"]: if config.sim_config.general["solver"] == "opencl": solvername = "OpenCL" - platformname = " ".join(config.get_model_config().device["dev"].platform.name.split()) + " with " + platformname = ( + " ".join(config.get_model_config().device["dev"].platform.name.split()) + + " with " + ) devicename = ( f'Device {config.get_model_config().device["deviceID"]}: ' f'{" ".join(config.get_model_config().device["dev"].name.split())}' @@ -322,7 +335,7 @@ class ModelBuildRun: ) logger.basic( - f"\nModel {config.model_num + 1}/{config.sim_config.model_end} " + f"\nModel {config.sim_config.current_model + 1}/{config.sim_config.model_end} " f"solving on {config.sim_config.hostinfo['hostname']} " f"with {solvername} backend using {platformname}{devicename}" ) @@ -353,9 +366,13 @@ class ModelBuildRun: elif config.sim_config.general["solver"] == "opencl": mem_str = f" host + unknown for device" - logger.info(f"\nMemory used (estimated): " + f"~{humanize.naturalsize(self.p.memory_full_info().uss)}{mem_str}") logger.info( - f"Time taken: " + f"{humanize.precisedelta(datetime.timedelta(seconds=solver.solvetime), format='%0.4f')}" + f"\nMemory used (estimated): " + + f"~{humanize.naturalsize(self.p.memory_full_info().uss)}{mem_str}" + ) + logger.info( + f"Time taken: " + + f"{humanize.precisedelta(datetime.timedelta(seconds=solver.solvetime), format='%0.4f')}" )