Rename existing internal MPI objects to Taskfarm

这个提交包含在:
nmannall
2024-02-01 17:13:51 +00:00
父节点 2fd8fcb662
当前提交 3b942de2be
共有 3 个文件被更改,包括 17 次插入17 次删除

查看文件

@@ -113,7 +113,7 @@ class Context:
logger.basic(f"{s} {'=' * (get_terminal_width() - 1 - len(s))}\n")
class MPIContext(Context):
class TaskfarmContext(Context):
"""Mixed mode MPI/OpenMP/CUDA context - MPI task farm is used to distribute
models, and each model parallelised using either OpenMP (CPU),
CUDA (GPU), or OpenCL (CPU/GPU).
@@ -123,11 +123,11 @@ class MPIContext(Context):
super().__init__()
from mpi4py import MPI
from gprMax.mpi import MPIExecutor
from gprMax.taskfarm import TaskfarmExecutor
self.comm = MPI.COMM_WORLD
self.rank = self.comm.rank
self.MPIExecutor = MPIExecutor
self.TaskfarmExecutor = TaskfarmExecutor
def _run_model(self, **work):
"""Process for running a single model.
@@ -184,8 +184,8 @@ class MPIContext(Context):
sys.stdout.flush()
# Contruct MPIExecutor
executor = self.MPIExecutor(self._run_model, comm=self.comm)
# Contruct TaskfarmExecutor
executor = self.TaskfarmExecutor(self._run_model, comm=self.comm)
# Check GPU resources versus number of MPI tasks
if (

查看文件

@@ -20,7 +20,7 @@ import argparse
import gprMax.config as config
from .contexts import Context, MPIContext
from .contexts import Context, TaskfarmContext
from .utilities.logging import logging_config
# Arguments (used for API) and their default values (used for API and CLI)
@@ -212,7 +212,7 @@ def run_main(args):
# MPI running with (OpenMP/CUDA/OpenCL)
if config.sim_config.args.mpi:
context = MPIContext()
context = TaskfarmContext()
# Standard running (OpenMP/CUDA/OpenCL)
else:
context = Context()

查看文件

@@ -40,17 +40,17 @@ EXIT
Tags = IntEnum("Tags", "READY START DONE EXIT")
class MPIExecutor(object):
"""A generic parallel executor based on MPI.
class TaskfarmExecutor(object):
"""A generic parallel executor (taskfarm) based on MPI.
This executor can be used to run generic jobs on multiple
processes based on a master/worker pattern with MPI being used for
communication between the master and the workers.
Examples
--------
A basic example of how to use the `MPIExecutor` to run
A basic example of how to use the `TaskfarmExecutor` to run
`gprMax` models in parallel is given below.
>>> from mpi4py import MPI
>>> from gprMax.mpi import MPIExecutor
>>> from gprMax.taskfarm import TaskfarmExecutor
>>> from gprMax.model_build_run import run_model
>>> # choose an MPI.Intracomm for communication (MPI.COMM_WORLD by default)
>>> comm = MPI.COMM_WORLD
@@ -68,7 +68,7 @@ class MPIExecutor(object):
>>> 'modelend': n_traces,
>>> 'numbermodelruns': n_traces
>>> })
>>> gpr = MPIExecutor(func, comm=comm)
>>> gpr = TaskfarmExecutor(func, comm=comm)
>>> # send the workers to their work loop
>>> gpr.start()
>>> if gpr.is_master():
@@ -78,10 +78,10 @@ class MPIExecutor(object):
>>> # and join the main loop again
>>> gpr.join()
A slightly more concise way is to use the context manager
interface of `MPIExecutor` that automatically takes care
interface of `TaskfarmExecutor` that automatically takes care
of calling `start()` and `join()` at the beginning and end
of the execution, respectively.
>>> with MPIExecutor(func, comm=comm) as executor:
>>> with TaskfarmExecutor(func, comm=comm) as executor:
>>> # executor will be None on all ranks except for the master
>>> if executor is not None:
>>> results = executor.submit(jobs)
@@ -89,7 +89,7 @@ class MPIExecutor(object):
Limitations
-----------
Because some popular MPI implementations (especially on HPC machines) do not
support concurrent MPI calls from multiple threads yet, the `MPIExecutor` does
support concurrent MPI calls from multiple threads yet, the `TaskfarmExecutor` does
not use a separate thread in the master to do the communication between the
master and the workers. Hence, the lowest thread level of MPI_THREAD_SINGLE
(no multi-threading) is enough.
@@ -143,7 +143,7 @@ class MPIExecutor(object):
self.rank = self.comm.rank
self.size = self.comm.size
if self.size < 2:
raise RuntimeError("MPIExecutor must run with at least 2 processes")
raise RuntimeError("TaskfarmExecutor must run with at least 2 processes")
self._up = False
@@ -214,7 +214,7 @@ class MPIExecutor(object):
raise RuntimeError("Start has already been called")
self._up = True
logger.debug(f"({self.comm.name}) - Starting up MPIExecutor master/workers...")
logger.debug(f"({self.comm.name}) - Starting up TaskfarmExecutor master/workers...")
if self.is_worker():
self.__wait()