你已经派生过 gprMax
镜像自地址
https://gitee.com/sunhf/gprMax.git
已同步 2025-08-07 04:56:51 +08:00
Remove unnecessary functions and add doc strings
这个提交包含在:
@@ -157,6 +157,15 @@ class MPIGrid(FDTDGrid):
|
||||
def get_grid_coord_from_local_coordinate(
|
||||
self, local_coord: npt.NDArray[np.int32]
|
||||
) -> npt.NDArray[np.int32]:
|
||||
"""Get the MPI grid coordinate for a local grid coordinate.
|
||||
|
||||
Args:
|
||||
local_coord: Local grid coordinate.
|
||||
|
||||
Returns:
|
||||
grid_coord: Coordinate of the MPI rank containing the local
|
||||
grid coordinate.
|
||||
"""
|
||||
coord = self.local_to_global_coordinate(local_coord)
|
||||
return self.get_grid_coord_from_coordinate(coord)
|
||||
|
||||
@@ -277,73 +286,24 @@ class MPIGrid(FDTDGrid):
|
||||
def local_bounds_overlap_grid(
|
||||
self, local_start: npt.NDArray[np.int32], local_stop: npt.NDArray[np.int32]
|
||||
) -> bool:
|
||||
return all(local_start < self.size) and all(local_stop > self.negative_halo_offset)
|
||||
"""Check if local bounds overlap with the grid.
|
||||
|
||||
def limit_global_bounds_to_within_local_grid(
|
||||
self,
|
||||
start: npt.NDArray[np.int32],
|
||||
stop: npt.NDArray[np.int32],
|
||||
step: npt.NDArray[np.int32] = np.ones(3, dtype=np.int32),
|
||||
) -> Tuple[npt.NDArray[np.int32], npt.NDArray[np.int32], npt.NDArray[np.int32]]:
|
||||
local_start = self.global_to_local_coordinate(start)
|
||||
|
||||
# Bring start into the local grid (and not in the negative halo)
|
||||
# local_start must still be aligned with the provided step.
|
||||
local_start = np.where(
|
||||
local_start < self.negative_halo_offset,
|
||||
self.negative_halo_offset + ((local_start - self.negative_halo_offset) % step),
|
||||
local_start,
|
||||
)
|
||||
|
||||
local_stop = self.global_to_local_coordinate(stop)
|
||||
|
||||
# Limit local_stop such that it is at most one step beyond the
|
||||
# max index of the grid. As local_stop is the upper bound, it is
|
||||
# exclusive, meaning when used to slice an array (with the
|
||||
# provided step), the last element accessed will one step below
|
||||
# local_stop.
|
||||
# Note: using self.size as an index in any dimension would fall
|
||||
# in the positive halo (this counts as outside the local grid).
|
||||
local_stop = np.where(
|
||||
local_stop > self.size,
|
||||
self.size + ((local_stop - self.size) % step),
|
||||
local_stop,
|
||||
)
|
||||
|
||||
offset = self.local_to_global_coordinate(local_start) - start
|
||||
|
||||
return local_start, local_stop, offset
|
||||
|
||||
def scatter_coord_objects(self, objects: List[CoordType]) -> List[CoordType]:
|
||||
"""Scatter coord objects to the correct MPI rank.
|
||||
|
||||
Coord objects (sources and receivers) are scattered to the MPI
|
||||
rank based on their location in the grid. The receiving MPI rank
|
||||
converts the object locations to its own local grid.
|
||||
The bounds overlap if any of the 3D box as defined by the lower
|
||||
and upper bounds overlaps with the local grid (excluding the
|
||||
halo).
|
||||
|
||||
Args:
|
||||
objects: Coord objects to be scattered.
|
||||
local_start: Lower bound in the local grid coordinate space.
|
||||
local_stop: Upper bound in the local grid coordinate space.
|
||||
|
||||
Returns:
|
||||
scattered_objects: List of Coord objects belonging to the
|
||||
current MPI rank.
|
||||
overlaps_grid: True if the box generated by the lower and
|
||||
upper bound overlaps with the local grid.
|
||||
"""
|
||||
if self.is_coordinator():
|
||||
objects_by_rank: List[List[CoordType]] = [[] for _ in range(self.comm.size)]
|
||||
for o in objects:
|
||||
objects_by_rank[self.get_rank_from_coordinate(o.coord)].append(o)
|
||||
else:
|
||||
objects_by_rank = None
|
||||
|
||||
objects = self.comm.scatter(objects_by_rank, root=self.COORDINATOR_RANK)
|
||||
|
||||
for o in objects:
|
||||
o.coord = self.global_to_local_coordinate(o.coord)
|
||||
|
||||
return objects
|
||||
return all(local_start < self.size) and all(local_stop > self.negative_halo_offset)
|
||||
|
||||
def gather_coord_objects(self, objects: List[CoordType]) -> List[CoordType]:
|
||||
"""Scatter coord objects to the correct MPI rank.
|
||||
"""Gather coord objects on the coordinator MPI rank.
|
||||
|
||||
The sending MPI rank converts the object locations to the global
|
||||
grid. The coord objects (sources and receivers) are all sent to
|
||||
@@ -359,6 +319,7 @@ class MPIGrid(FDTDGrid):
|
||||
"""
|
||||
for o in objects:
|
||||
o.coord = self.local_to_global_coordinate(o.coord)
|
||||
|
||||
gathered_objects: Optional[List[List[CoordType]]] = self.comm.gather(
|
||||
objects, root=self.COORDINATOR_RANK
|
||||
)
|
||||
@@ -368,152 +329,6 @@ class MPIGrid(FDTDGrid):
|
||||
else:
|
||||
return objects
|
||||
|
||||
def scatter_snapshots(self):
|
||||
"""Scatter snapshots to the correct MPI rank.
|
||||
|
||||
Each snapshot is sent by the coordinator to the MPI ranks
|
||||
containing the snapshot. A new communicator is created for each
|
||||
snapshot, and each rank bounds the snapshot to within its own
|
||||
local grid.
|
||||
"""
|
||||
if self.is_coordinator():
|
||||
snapshots_by_rank = [[] for _ in range(self.comm.size)]
|
||||
for snapshot in self.snapshots:
|
||||
ranks = self.get_ranks_between_coordinates(snapshot.start, snapshot.stop)
|
||||
for rank in range(
|
||||
self.comm.size
|
||||
): # TODO: Loop over ranks in snapshot, not all ranks
|
||||
if rank in ranks:
|
||||
snapshots_by_rank[rank].append(snapshot)
|
||||
else:
|
||||
# All ranks need the same number of 'snapshots'
|
||||
# (which may be None) to ensure snapshot
|
||||
# communicators are setup correctly and to avoid
|
||||
# deadlock.
|
||||
snapshots_by_rank[rank].append(None)
|
||||
else:
|
||||
snapshots_by_rank = None
|
||||
|
||||
snapshots = self.comm.scatter(snapshots_by_rank, root=self.COORDINATOR_RANK)
|
||||
|
||||
for snapshot in snapshots:
|
||||
if snapshot is None:
|
||||
self.comm.Split(MPI.UNDEFINED)
|
||||
else:
|
||||
comm = self.comm.Split()
|
||||
assert isinstance(comm, MPI.Intracomm)
|
||||
start = self.get_grid_coord_from_coordinate(snapshot.start)
|
||||
stop = self.get_grid_coord_from_coordinate(snapshot.stop) + 1
|
||||
snapshot.comm = comm.Create_cart((stop - start).tolist())
|
||||
|
||||
snapshot.start = self.global_to_local_coordinate(snapshot.start)
|
||||
# Calculate number of steps needed to bring the start
|
||||
# into the local grid (and not in the negative halo)
|
||||
snapshot.offset = np.where(
|
||||
snapshot.start < self.negative_halo_offset,
|
||||
np.abs((snapshot.start - self.negative_halo_offset) // snapshot.step),
|
||||
snapshot.offset,
|
||||
)
|
||||
snapshot.start += snapshot.step * snapshot.offset
|
||||
|
||||
snapshot.stop = self.global_to_local_coordinate(snapshot.stop)
|
||||
snapshot.stop = np.where(
|
||||
snapshot.stop > self.size,
|
||||
self.size + ((snapshot.stop - self.size) % snapshot.step),
|
||||
snapshot.stop,
|
||||
)
|
||||
|
||||
self.snapshots = [s for s in snapshots if s is not None]
|
||||
|
||||
def scatter_3d_array(self, array: npt.NDArray) -> npt.NDArray:
|
||||
"""Scatter a 3D array to each MPI rank
|
||||
|
||||
Use to distribute a 3D array across MPI ranks. Each rank will
|
||||
receive its own segment of the array including a negative halo,
|
||||
but NOT a positive halo.
|
||||
|
||||
Args:
|
||||
array: Array to be scattered
|
||||
|
||||
Returns:
|
||||
scattered_array: Local extent of the array for the current
|
||||
MPI rank.
|
||||
"""
|
||||
# TODO: Use Scatter instead of Bcast
|
||||
self.comm.Bcast(array, root=self.COORDINATOR_RANK)
|
||||
|
||||
return array[
|
||||
self.lower_extent[Dim.X] : self.upper_extent[Dim.X],
|
||||
self.lower_extent[Dim.Y] : self.upper_extent[Dim.Y],
|
||||
self.lower_extent[Dim.Z] : self.upper_extent[Dim.Z],
|
||||
].copy(order="C")
|
||||
|
||||
def scatter_4d_array(self, array: npt.NDArray) -> npt.NDArray:
|
||||
"""Scatter a 4D array to each MPI rank
|
||||
|
||||
Use to distribute a 4D array across MPI ranks. The first
|
||||
dimension is ignored when partitioning the array. Each rank will
|
||||
receive its own segment of the array including a negative halo,
|
||||
but NOT a positive halo.
|
||||
|
||||
Args:
|
||||
array: Array to be scattered
|
||||
|
||||
Returns:
|
||||
scattered_array: Local extent of the array for the current
|
||||
MPI rank.
|
||||
"""
|
||||
# TODO: Use Scatter instead of Bcast
|
||||
self.comm.Bcast(array, root=self.COORDINATOR_RANK)
|
||||
|
||||
return array[
|
||||
:,
|
||||
self.lower_extent[Dim.X] : self.upper_extent[Dim.X],
|
||||
self.lower_extent[Dim.Y] : self.upper_extent[Dim.Y],
|
||||
self.lower_extent[Dim.Z] : self.upper_extent[Dim.Z],
|
||||
].copy(order="C")
|
||||
|
||||
def scatter_4d_array_with_positive_halo(self, array: npt.NDArray) -> npt.NDArray:
|
||||
"""Scatter a 4D array to each MPI rank
|
||||
|
||||
Use to distribute a 4D array across MPI ranks. The first
|
||||
dimension is ignored when partitioning the array. Each rank will
|
||||
receive its own segment of the array including both a negative
|
||||
and positive halo.
|
||||
|
||||
Args:
|
||||
array: Array to be scattered
|
||||
|
||||
Returns:
|
||||
scattered_array: Local extent of the array for the current
|
||||
MPI rank.
|
||||
"""
|
||||
# TODO: Use Scatter instead of Bcast
|
||||
self.comm.Bcast(array, root=self.COORDINATOR_RANK)
|
||||
|
||||
return array[
|
||||
:,
|
||||
self.lower_extent[Dim.X] : self.upper_extent[Dim.X] + 1,
|
||||
self.lower_extent[Dim.Y] : self.upper_extent[Dim.Y] + 1,
|
||||
self.lower_extent[Dim.Z] : self.upper_extent[Dim.Z] + 1,
|
||||
].copy(order="C")
|
||||
|
||||
def distribute_grid(self):
|
||||
"""Distribute grid properties and objects to all MPI ranks.
|
||||
|
||||
Global properties/objects are broadcast to all ranks whereas
|
||||
local properties/objects are scattered to the relevant ranks.
|
||||
"""
|
||||
pass
|
||||
# self.scatter_snapshots()
|
||||
|
||||
# self._halo_swap_array(self.ID[0])
|
||||
# self._halo_swap_array(self.ID[1])
|
||||
# self._halo_swap_array(self.ID[2])
|
||||
# self._halo_swap_array(self.ID[3])
|
||||
# self._halo_swap_array(self.ID[4])
|
||||
# self._halo_swap_array(self.ID[5])
|
||||
|
||||
def gather_grid_objects(self):
|
||||
"""Gather sources and receivers."""
|
||||
|
||||
@@ -670,7 +485,6 @@ class MPIGrid(FDTDGrid):
|
||||
raise ValueError
|
||||
|
||||
self.set_halo_map()
|
||||
self.distribute_grid()
|
||||
|
||||
# TODO: Check PML is not thicker than the grid size
|
||||
|
||||
|
@@ -242,6 +242,7 @@ class MPIModel(Model):
|
||||
if self.G.snapshots:
|
||||
save_snapshots(self.G.snapshots)
|
||||
|
||||
# TODO: Output sources and receivers using parallel I/O
|
||||
self.G.gather_grid_objects()
|
||||
|
||||
# Write output data to file if they are any receivers in any grids
|
||||
|
在新工单中引用
屏蔽一个用户