你已经派生过 gprMax
镜像自地址
https://gitee.com/sunhf/gprMax.git
已同步 2025-08-08 15:27:57 +08:00
More adjustments to MPI print messages.
这个提交包含在:
@@ -384,7 +384,7 @@ def run_mpi_sim(args, inputfile, usernamespace, optparams=None):
|
|||||||
size = comm.Get_size() # total number of processes
|
size = comm.Get_size() # total number of processes
|
||||||
rank = comm.Get_rank() # rank of this process
|
rank = comm.Get_rank() # rank of this process
|
||||||
tsimstart = perf_counter()
|
tsimstart = perf_counter()
|
||||||
print('MPI master ({}, rank {}) on {} using {} workers\n'.format(comm, rank, hostname, numworkers))
|
print('MPI master ({}, rank {}) on {} using {} workers\n'.format(comm.name, rank, hostname, numworkers))
|
||||||
|
|
||||||
# Assemble a sys.argv replacement to pass to spawned worker
|
# Assemble a sys.argv replacement to pass to spawned worker
|
||||||
# N.B This is required as sys.argv not available when gprMax is called via api()
|
# N.B This is required as sys.argv not available when gprMax is called via api()
|
||||||
@@ -421,6 +421,7 @@ def run_mpi_sim(args, inputfile, usernamespace, optparams=None):
|
|||||||
|
|
||||||
# Spawn workers
|
# Spawn workers
|
||||||
newcomm = comm.Spawn(sys.executable, args=['-m', 'gprMax'] + myargv + [workerflag], maxprocs=numworkers)
|
newcomm = comm.Spawn(sys.executable, args=['-m', 'gprMax'] + myargv + [workerflag], maxprocs=numworkers)
|
||||||
|
newcomm.Set_name(comm.name)
|
||||||
|
|
||||||
# Reply to whoever asks until done
|
# Reply to whoever asks until done
|
||||||
for work in worklist:
|
for work in worklist:
|
||||||
@@ -470,7 +471,7 @@ def run_mpi_sim(args, inputfile, usernamespace, optparams=None):
|
|||||||
modelusernamespace = usernamespace
|
modelusernamespace = usernamespace
|
||||||
|
|
||||||
# Run the model
|
# Run the model
|
||||||
print('MPI worker ({}, rank {}) starting model {}/{}{} on {}\n'.format(comm, rank, currentmodelrun, numbermodelruns, gpuinfo, hostname))
|
print('MPI worker ({}, rank {}) starting model {}/{}{} on {}\n'.format(comm.name, rank, currentmodelrun, numbermodelruns, gpuinfo, hostname))
|
||||||
run_model(args, currentmodelrun, modelend - 1, numbermodelruns, inputfile, modelusernamespace)
|
run_model(args, currentmodelrun, modelend - 1, numbermodelruns, inputfile, modelusernamespace)
|
||||||
|
|
||||||
# Shutdown
|
# Shutdown
|
||||||
|
在新工单中引用
屏蔽一个用户