Get global grid position from MPI rank

这个提交包含在:
nmannall
2024-03-11 17:03:26 +00:00
父节点 3380a19e3f
当前提交 86393cf242
共有 2 个文件被更改,包括 43 次插入14 次删除

查看文件

@@ -116,19 +116,37 @@ class Context:
class MPIContext(Context):
def __init__(self):
def __init__(self, x_dim: int, y_dim: int, z_dim: int):
super().__init__()
from mpi4py import MPI
self.comm = MPI.COMM_WORLD
self.rank = self.comm.rank
def run(self):
if self.rank == 0:
super().run()
if self.rank >= x_dim * y_dim * z_dim:
logger.warn(
(
f"Rank {self.rank}: Only {x_dim * y_dim * z_dim} MPI ranks required for the"
" dimensions specified. Either increase your MPI dimension size, or request"
" fewer MPI tasks."
)
)
self.x = -1
self.y = -1
self.z = -1
else:
grid = create_G()
solver = create_solver(grid)
self.x = self.rank % x_dim
self.y = (self.rank // x_dim) % y_dim
self.z = (self.rank // (x_dim * y_dim)) % z_dim
def run(self):
print(f"I am rank {self.rank} and I will run at grid position {self.x}, {self.y}, {self.z}")
if self.rank == 0:
print("Rank 0 is running the simulation")
return super().run()
else:
pass
class TaskfarmContext(Context):

查看文件

@@ -31,7 +31,7 @@ args_defaults = {
"n": 1,
"i": None,
"taskfarm": False,
"mpi": False,
"mpi": None,
"gpu": None,
"opencl": None,
"subgrid": False,
@@ -64,8 +64,9 @@ help_msg = {
" further details see the performance section of the User Guide."
),
"mpi": (
"(bool, opt): Flag to use Message Passing Interface (MPI) to divide the model between MPI"
"ranks."
"(list, opt): Flag to use Message Passing Interface (MPI) to divide the model between MPI"
" ranks. Three integers should be provided to define the number of MPI processes (min 1) in"
" the x, y, and z dimensions."
),
"gpu": (
"(list/bool, opt): Flag to use NVIDIA GPU or list of NVIDIA GPU device ID(s) for specific"
@@ -138,8 +139,10 @@ def run(
task farm, e.g. to create a B-scan with 60 traces and use
MPI to farm out each trace. For further details see the
performance section of the User Guide.
mpi: optional boolean flag to use Message Passing Interface
(MPI) to divide the model between MPI ranks.
mpi: optional flag to use Message Passing Interface (MPI) to
divide the model between MPI ranks. Three integers should be
provided to define the number of MPI processes (min 1) in
the x, y, and z dimensions.
gpu: optional list/boolean to use NVIDIA GPU or list of NVIDIA
GPU device ID(s) for specific GPU card(s).
opencl: optional list/boolean to use OpenCL or list of OpenCL
@@ -203,7 +206,12 @@ def cli():
help=help_msg["taskfarm"],
)
parser.add_argument(
"-mpi", action="store_true", default=args_defaults["mpi"], help=help_msg["mpi"]
"-mpi",
type=int,
action="store",
nargs=3,
default=args_defaults["mpi"],
help=help_msg["mpi"],
)
parser.add_argument("-gpu", type=int, action="append", nargs="*", help=help_msg["gpu"])
parser.add_argument("-opencl", type=int, action="append", nargs="*", help=help_msg["opencl"])
@@ -261,8 +269,11 @@ def run_main(args):
if config.sim_config.args.taskfarm:
context = TaskfarmContext()
# MPI running to divide model between ranks
elif config.sim_config.args.mpi:
context = MPIContext()
elif config.sim_config.args.mpi is not None:
x = config.sim_config.args.mpi[0]
y = config.sim_config.args.mpi[1]
z = config.sim_config.args.mpi[2]
context = MPIContext(x, y, z)
# Standard running (OpenMP/CUDA/OpenCL)
else:
context = Context()