你已经派生过 gprMax
镜像自地址
https://gitee.com/sunhf/gprMax.git
已同步 2025-08-07 15:10:13 +08:00
Formatting and comments update.
这个提交包含在:
@@ -33,7 +33,7 @@ from ._version import __version__
|
||||
from .constants import c, e0, m0, z0
|
||||
from .exceptions import GeneralError
|
||||
from .fields_update import update_electric, update_magnetic, update_electric_dispersive_multipole_A, update_electric_dispersive_multipole_B, update_electric_dispersive_1pole_A, update_electric_dispersive_1pole_B
|
||||
from .grid import FDTDGrid, dispersion_check, Ix, Iy, Iz
|
||||
from .grid import FDTDGrid, dispersion_check
|
||||
from .input_cmds_geometry import process_geometrycmds
|
||||
from .input_cmds_file import process_python_include_code, write_processed_file, check_cmd_names
|
||||
from .input_cmds_multiuse import process_multicmds
|
||||
@@ -125,7 +125,7 @@ def run_main(args):
|
||||
else:
|
||||
run_std_sim(args, numbermodelruns, inputfile, usernamespace)
|
||||
|
||||
print('\nSimulation completed.\n{}\n'.format(68*'*'))
|
||||
print('\nSimulation completed.\n{}\n'.format(68 * '*'))
|
||||
|
||||
|
||||
def run_std_sim(args, numbermodelruns, inputfile, usernamespace, optparams=None):
|
||||
@@ -165,13 +165,14 @@ def run_benchmark_sim(args, inputfile, usernamespace):
|
||||
# Number of threads to test - start from max physical CPU cores and divide in half until 1
|
||||
thread = psutil.cpu_count(logical=False)
|
||||
threads = [thread]
|
||||
while not thread%2:
|
||||
while not thread % 2:
|
||||
thread /= 2
|
||||
threads.append(int(thread))
|
||||
|
||||
benchtimes = np.zeros(len(threads))
|
||||
|
||||
numbermodelruns = len(threads)
|
||||
usernamespace['number_model_runs'] = numbermodelruns
|
||||
tsimstart = perf_counter()
|
||||
for modelrun in range(1, numbermodelruns + 1):
|
||||
os.environ['OMP_NUM_THREADS'] = str(threads[modelrun - 1])
|
||||
@@ -181,7 +182,7 @@ def run_benchmark_sim(args, inputfile, usernamespace):
|
||||
|
||||
# Save number of threads and benchmarking times to NumPy archive
|
||||
threads = np.array(threads)
|
||||
np.savez(os.path.splitext(inputfile)[0], threads=threads, benchtimes=benchtimes)
|
||||
np.savez(os.path.splitext(inputfile)[0], threads=threads, benchtimes=benchtimes, version=__version__)
|
||||
|
||||
print('\nTotal simulation time [HH:MM:SS]: {}'.format(datetime.timedelta(seconds=int(tsimend - tsimstart))))
|
||||
|
||||
@@ -215,7 +216,7 @@ def run_mpi_sim(args, numbermodelruns, inputfile, usernamespace, optparams=None)
|
||||
closedworkers = 0
|
||||
print('Master: PID {} on {} using {} workers.'.format(os.getpid(), name, numworkers))
|
||||
while closedworkers < numworkers:
|
||||
data = comm.recv(source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG, status=status)
|
||||
# data = comm.recv(source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG, status=status) # Check if this line is really needed
|
||||
source = status.Get_source()
|
||||
tag = status.Get_tag()
|
||||
|
||||
@@ -238,7 +239,7 @@ def run_mpi_sim(args, numbermodelruns, inputfile, usernamespace, optparams=None)
|
||||
print('Worker {}: PID {} on {} requesting {} OpenMP threads.'.format(rank, os.getpid(), name, os.environ.get('OMP_NUM_THREADS')))
|
||||
while True:
|
||||
comm.send(None, dest=0, tag=tags.READY.value)
|
||||
modelrun = comm.recv(source=0, tag=MPI.ANY_TAG, status=status) # Receive a model number to run from the master
|
||||
modelrun = comm.recv(source=0, tag=MPI.ANY_TAG, status=status) # Receive a model number to run from the master
|
||||
tag = status.Get_tag()
|
||||
|
||||
# Run a model
|
||||
@@ -281,8 +282,8 @@ def run_model(args, modelrun, numbermodelruns, inputfile, usernamespace):
|
||||
global G
|
||||
|
||||
# Normal model reading/building process; bypassed if geometry information to be reused
|
||||
if not 'G' in globals():
|
||||
print('\n{}\n\nModel input file: {}\n'.format(68*'*', inputfile))
|
||||
if 'G' not in globals():
|
||||
print('\n{}\n\nModel input file: {}\n'.format(68 * '*', inputfile))
|
||||
|
||||
# Add the current model run to namespace that can be accessed by user in any Python code blocks in input file
|
||||
usernamespace['current_model_run'] = modelrun
|
||||
@@ -355,7 +356,7 @@ def run_model(args, modelrun, numbermodelruns, inputfile, usernamespace):
|
||||
if G.messages:
|
||||
print('\nMaterials:\n')
|
||||
print('ID\tName\t\tProperties')
|
||||
print('{}'.format('-'*50))
|
||||
print('{}'.format('-' * 50))
|
||||
for material in G.materials:
|
||||
|
||||
# Calculate update coefficients for material
|
||||
@@ -370,7 +371,7 @@ def run_model(args, modelrun, numbermodelruns, inputfile, usernamespace):
|
||||
if Material.maxpoles != 0:
|
||||
z = 0
|
||||
for pole in range(Material.maxpoles):
|
||||
G.updatecoeffsdispersive[material.numID, z:z+3] = e0 * material.eqt2[pole], material.eqt[pole], material.zt[pole]
|
||||
G.updatecoeffsdispersive[material.numID, z:z + 3] = e0 * material.eqt2[pole], material.eqt[pole], material.zt[pole]
|
||||
z += 3
|
||||
|
||||
if G.messages:
|
||||
@@ -423,7 +424,7 @@ def run_model(args, modelrun, numbermodelruns, inputfile, usernamespace):
|
||||
tgeostart = perf_counter()
|
||||
for geometryview in G.geometryviews:
|
||||
geometryview.write_vtk(modelrun, numbermodelruns, G)
|
||||
#geometryview.write_xdmf(modelrun, numbermodelruns, G)
|
||||
# geometryview.write_xdmf(modelrun, numbermodelruns, G)
|
||||
tgeoend = perf_counter()
|
||||
print('\nGeometry file(s) written in [HH:MM:SS]: {}'.format(datetime.timedelta(seconds=int(tgeoend - tgeostart))))
|
||||
|
||||
@@ -519,8 +520,8 @@ def run_model(args, modelrun, numbermodelruns, inputfile, usernamespace):
|
||||
write_hdf5(outputfile, G.Ex, G.Ey, G.Ez, G.Hx, G.Hy, G.Hz, G)
|
||||
|
||||
tsolveend = perf_counter()
|
||||
print('\n\nSolving took [HH:MM:SS]: {}'.format(datetime.timedelta(seconds=int(tsolveend - tsolvestart))))
|
||||
print('Peak memory (approx) used: {}'.format(human_size(p.memory_info().rss)))
|
||||
print('\n\nSolving took [HH:MM:SS]: {} @ {:g} cells/s'.format(datetime.timedelta(seconds=int(tsolveend - tsolvestart)), (G.nx * G.ny * G.nz) / (tsolveend - tsolvestart)))
|
||||
print('Memory (RAM) usage: ~{}'.format(human_size(p.memory_info().rss)))
|
||||
|
||||
##################################
|
||||
# End - Main FDTD calculations #
|
||||
@@ -531,7 +532,3 @@ def run_model(args, modelrun, numbermodelruns, inputfile, usernamespace):
|
||||
del G
|
||||
|
||||
return int(tsolveend - tsolvestart)
|
||||
|
||||
|
||||
|
||||
|
||||
|
在新工单中引用
屏蔽一个用户