MPI work/debug.

这个提交包含在:
craig-warren
2020-04-07 09:03:38 +01:00
父节点 73463a1d88
当前提交 14f31b4d95
共有 6 个文件被更改,包括 14 次插入26 次删除

查看文件

@@ -5,13 +5,11 @@
#
# Please keep the lists of sorted alphabetically (by surname for individuals).
gprMax has been supported through research projects funded by:
The Defence Science and Technology Laboratory (Dstl)
Google
gprMax is a contribution to COST Action TU1208 'Civil Engineering Applications of Ground Penetrating Radar'
@@ -20,3 +18,4 @@ As well as the aforementioned authors, the following individuals have contribute
Oystein Bjorndal
John Hartley
Rajath Kumar
Tobias Schruff

查看文件

@@ -18,6 +18,7 @@
import datetime
import logging
import platform
import sys
import gprMax.config as config
@@ -91,8 +92,8 @@ class Context:
"""Print information about any NVIDIA CUDA GPUs detected."""
gpus_info = []
for gpu in config.sim_config.cuda['gpus']:
gpus_info.append(f'{gpu.deviceID} - {gpu.name}, {human_size(gpu.totalmem, a_kilobyte_is_1024_bytes=True)}')
logger.basic(f" with GPU(s): {' | '.join(gpus_info)}")
gpus_info.append(f'{gpu.deviceID} {gpu.pcibusID} - {gpu.name}, {human_size(gpu.totalmem, a_kilobyte_is_1024_bytes=True)} (on {gpu.hostname})')
logger.basic(f"GPU resources: {' | '.join(gpus_info)}")
def print_time_report(self):
"""Print the total simulation time based on context."""
@@ -146,7 +147,7 @@ class MPIContext(Context):
if config.sim_config.general['cuda']:
self.print_gpu_info()
sys.stdout.flush()
logger.basic(platform.node())
# Contruct MPIExecutor
executor = self.MPIExecutor(self._run_model, comm=self.comm)
@@ -172,18 +173,3 @@ class MPIContext(Context):
if executor.is_master():
self.tsimend = timer()
self.print_time_report()
def create_context():
"""Create a context in which to run the simulation. i.e MPI.
Returns:
context (Context): Context for the model to run in.
"""
if config.sim_config.args.mpi:
context = MPIContext()
else:
context = Context()
return context

查看文件

@@ -20,7 +20,8 @@ import argparse
import logging
import gprMax.config as config
from .contexts import create_context
from .contexts import Context
from .contexts import MPIContext
from .utilities import setup_logging
logger = logging.getLogger(__name__)
@@ -172,5 +173,8 @@ def run_main(args):
"""
config.sim_config = config.SimulationConfig(args)
context = create_context()
if config.sim_config.args.mpi:
context = MPIContext()
else:
context = Context()
context.run()

查看文件

@@ -265,8 +265,7 @@ class ModelBuildRun:
logger.warning(Fore.RED + f"You have specified more threads ({config.get_model_config().ompthreads}) than available physical CPU cores ({config.sim_config.hostinfo['physicalcores']}). This may lead to degraded performance." + Style.RESET_ALL)
# Print information about any GPU in use
elif config.sim_config.general['cuda']:
import platform
logger.basic(f"GPU (on {platform.node()}) for solving: {config.get_model_config().cuda['gpu'].deviceID} - {config.get_model_config().cuda['gpu'].name}\n")
logger.basic(f"GPU for solving: {config.get_model_config().cuda['gpu'].deviceID} - {config.get_model_config().cuda['gpu'].name}\n")
# Prepare iterator
if config.sim_config.general['progressbars']:

查看文件

@@ -167,7 +167,7 @@ class MPIExecutor(object):
self.busy = [False] * len(self.workers)
if self.is_master():
logger.basic(f'\nMPIExecutor on comm: {self.comm.name}, Master: {self.master}, Workers: {self.workers}')
logger.basic(f'\nMPIExecutor with comm: {self.comm.name}, Master: {self.master}, Workers: {self.workers}')
def __enter__(self):
"""Context manager enter.

查看文件

@@ -68,7 +68,7 @@ def setup_logging(level=logging.INFO, logfile=False):
logger.setLevel(level)
# Logging to console
mh = logging.StreamHandler()
mh = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter('%(message)s')
mh.setLevel(level)
mh.setFormatter(formatter)