diff --git a/gprMax/gprMax.py b/gprMax/gprMax.py index b9dbaa15..99c12e69 100644 --- a/gprMax/gprMax.py +++ b/gprMax/gprMax.py @@ -25,7 +25,7 @@ from .contexts import MPIContext from .utilities import setup_logging logger = logging.getLogger(__name__) -setup_logging(level=25) +setup_logging(level=20) def run( scenes=None, diff --git a/gprMax/model_build_run.py b/gprMax/model_build_run.py index 2b572685..388e7892 100644 --- a/gprMax/model_build_run.py +++ b/gprMax/model_build_run.py @@ -261,12 +261,12 @@ class ModelBuildRun: # Check number of OpenMP threads if config.sim_config.general['cpu']: - logger.basic(f'CPU solver with {config.get_model_config().ompthreads} OpenMP threads on {platform.node()}\n') + logger.basic(f'CPU (OpenMP) solver: {config.get_model_config().ompthreads} threads on {platform.node()}\n') if config.get_model_config().ompthreads > config.sim_config.hostinfo['physicalcores']: logger.warning(Fore.RED + f"You have specified more threads ({config.get_model_config().ompthreads}) than available physical CPU cores ({config.sim_config.hostinfo['physicalcores']}). This may lead to degraded performance." + Style.RESET_ALL) # Print information about any GPU in use elif config.sim_config.general['cuda']: - logger.basic(f"GPU solver with device {config.get_model_config().cuda['gpu'].deviceID} - {config.get_model_config().cuda['gpu'].name} on {platform.node()}\n") + logger.basic(f"GPU solver: {config.get_model_config().cuda['gpu'].deviceID} - {config.get_model_config().cuda['gpu'].name} on {platform.node()}\n") # Prepare iterator if config.sim_config.general['progressbars']: