Moved run_model function into its own separate module.

这个提交包含在:
Craig Warren
2017-02-21 12:34:01 +00:00
父节点 46577b0763
当前提交 a80045b18e

查看文件

@@ -21,33 +21,16 @@
import argparse
import datetime
from enum import Enum
import itertools
import os
import psutil
import sys
from time import perf_counter
from colorama import init, Fore, Style
init()
import numpy as np
from terminaltables import AsciiTable
from tqdm import tqdm
from ._version import __version__
from .constants import c, e0, m0, z0
from .exceptions import GeneralError
from .fields_outputs import write_hdf5_outputfile
from .fields_update import update_electric, update_magnetic, update_electric_dispersive_multipole_A, update_electric_dispersive_multipole_B, update_electric_dispersive_1pole_A, update_electric_dispersive_1pole_B
from .grid import FDTDGrid, dispersion_analysis
from .input_cmds_geometry import process_geometrycmds
from .input_cmds_file import process_python_include_code, write_processed_file, check_cmd_names
from .input_cmds_multiuse import process_multicmds
from .input_cmds_singleuse import process_singlecmds
from .materials import Material, process_materials
from .pml import PML, build_pmls
from .receivers import store_outputs
from .utilities import logo, human_size, get_host_info, get_terminal_width, round_value
from .yee_cell_build import build_electric_components, build_magnetic_components
from gprMax._version import __version__
from gprMax.constants import c, e0, m0, z0
from gprMax.exceptions import GeneralError
from gprMax.model_build_run import run_model
from gprMax.utilities import logo, human_size, get_host_info, get_terminal_width
def main():
@@ -57,7 +40,7 @@ def main():
logo(__version__ + ' (Bowmore)')
# Parse command line arguments
parser = argparse.ArgumentParser(prog='gprMax')
parser = argparse.ArgumentParser(prog='gprMax', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('inputfile', help='path to, and name of inputfile')
parser.add_argument('-n', default=1, type=int, help='number of times to run the input file, e.g. to create a B-scan')
parser.add_argument('-mpi', action='store_true', default=False, help='flag to switch on MPI task farm')
@@ -114,34 +97,40 @@ def run_main(args):
# Create a separate namespace that users can access in any Python code blocks in the input file
usernamespace = {'c': c, 'e0': e0, 'm0': m0, 'z0': z0, 'number_model_runs': numbermodelruns, 'input_directory': inputdirectory}
# Process for Taguchi optimisation
if args.opt_taguchi:
#######################################
# Process for benchmarking simulation #
#######################################
if args.benchmark:
run_benchmark_sim(args, inputfile, usernamespace)
####################################################
# Process for simulation with Taguchi optimisation #
####################################################
elif args.opt_taguchi:
if args.benchmark:
raise GeneralError('Taguchi optimisation should not be used with benchmarking mode')
from gprMax.optimisation_taguchi import run_opt_sim
run_opt_sim(args, numbermodelruns, inputfile, usernamespace)
# Process for benchmarking simulation
elif args.benchmark:
run_benchmark_sim(args, inputfile, usernamespace)
# Process for standard simulation
################################################
# Process for standard simulation (CPU or GPU) #
################################################
else:
# Mixed mode MPI/OpenMP - MPI task farm for models with each model parallelised with OpenMP
# Mixed mode MPI with OpenMP or CUDA - MPI task farm for models with each model parallelised with OpenMP (CPU) or CUDA (GPU)
if args.mpi:
if args.benchmark:
raise GeneralError('MPI should not be used with benchmarking mode')
if numbermodelruns == 1:
raise GeneralError('MPI is not beneficial when there is only one model to run')
run_mpi_sim(args, numbermodelruns, inputfile, usernamespace)
# Standard behaviour - part of a job array on Open Grid Scheduler/Grid Engine with each model parallelised with OpenMP
# Standard behaviour - part of a job array on Open Grid Scheduler/Grid Engine with each model parallelised with OpenMP (CPU) or CUDA (GPU)
elif args.taskid:
if args.benchmark:
raise GeneralError('A job array should not be used with benchmarking mode')
if numbermodelruns == 1:
raise GeneralError('A job array is not beneficial when there is only one model to run')
run_job_array_sim(args, numbermodelruns, inputfile, usernamespace)
# Standard behaviour - models run serially with each model parallelised with OpenMP
# Standard behaviour - models run serially with each model parallelised with OpenMP (CPU) or CUDA (GPU)
else:
run_std_sim(args, numbermodelruns, inputfile, usernamespace)
@@ -158,17 +147,17 @@ def run_std_sim(args, numbermodelruns, inputfile, usernamespace, optparams=None)
"""
tsimstart = perf_counter()
for modelrun in range(1, numbermodelruns + 1):
for currentmodelrun in range(1, numbermodelruns + 1):
if optparams: # If Taguchi optimistaion, add specific value for each parameter to optimise for each experiment to user accessible namespace
tmp = {}
tmp.update((key, value[modelrun - 1]) for key, value in optparams.items())
tmp.update((key, value[currentmodelrun - 1]) for key, value in optparams.items())
modelusernamespace = usernamespace.copy()
modelusernamespace.update({'optparams': tmp})
else:
modelusernamespace = usernamespace
run_model(args, modelrun, numbermodelruns, inputfile, modelusernamespace)
run_model(args, currentmodelrun, numbermodelruns, inputfile, modelusernamespace)
tsimend = perf_counter()
simcompletestr = '\n=== Simulation completed in [HH:MM:SS]: {}'.format(datetime.timedelta(seconds=int(tsimend - tsimstart)))
simcompletestr = '\n=== Simulation completed in [HH:MM:SS]: {}'.format(datetime.timedelta(seconds=tsimend - tsimstart))
print('{} {}\n'.format(simcompletestr, '=' * (get_terminal_width() - 1 - len(simcompletestr))))
@@ -183,19 +172,19 @@ def run_job_array_sim(args, numbermodelruns, inputfile, usernamespace, optparams
optparams (dict): Optional argument. For Taguchi optimisation it provides the parameters to optimise and their values.
"""
modelrun = args.taskid
currentmodelrun = args.taskid
tsimstart = perf_counter()
if optparams: # If Taguchi optimistaion, add specific value for each parameter to optimise for each experiment to user accessible namespace
tmp = {}
tmp.update((key, value[modelrun - 1]) for key, value in optparams.items())
tmp.update((key, value[currentmodelrun - 1]) for key, value in optparams.items())
modelusernamespace = usernamespace.copy()
modelusernamespace.update({'optparams': tmp})
else:
modelusernamespace = usernamespace
run_model(args, modelrun, numbermodelruns, inputfile, modelusernamespace)
run_model(args, currentmodelrun, numbermodelruns, inputfile, modelusernamespace)
tsimend = perf_counter()
simcompletestr = '\n=== Simulation completed in [HH:MM:SS]: {}'.format(datetime.timedelta(seconds=int(tsimend - tsimstart)))
simcompletestr = '\n=== Simulation completed in [HH:MM:SS]: {}'.format(datetime.timedelta(seconds=tsimend - tsimstart))
print('{} {}\n'.format(simcompletestr, '=' * (get_terminal_width() - 1 - len(simcompletestr))))
@@ -226,10 +215,10 @@ def run_benchmark_sim(args, inputfile, usernamespace):
numbermodelruns = len(threads)
usernamespace['number_model_runs'] = numbermodelruns
for modelrun in range(1, numbermodelruns + 1):
os.environ['OMP_NUM_THREADS'] = str(threads[modelrun - 1])
tsolve = run_model(args, modelrun, numbermodelruns, inputfile, usernamespace)
benchtimes[modelrun - 1] = tsolve
for currentmodelrun in range(1, numbermodelruns + 1):
os.environ['OMP_NUM_THREADS'] = str(threads[currentmodelrun - 1])
tsolve = run_model(args, currentmodelrun, numbermodelruns, inputfile, usernamespace)
benchtimes[currentmodelrun - 1] = tsolve
# Save number of threads and benchmarking times to NumPy archive
threads = np.array(threads)
@@ -266,7 +255,7 @@ def run_mpi_sim(args, numbermodelruns, inputfile, usernamespace, optparams=None)
# Master process
if rank == 0:
modelrun = 1
currentmodelrun = 1
numworkers = size - 1
closedworkers = 0
print('Master: PID {} on {} using {} workers.'.format(os.getpid(), name, numworkers))
@@ -276,10 +265,10 @@ def run_mpi_sim(args, numbermodelruns, inputfile, usernamespace, optparams=None)
tag = status.Get_tag()
if tag == tags.READY.value: # Worker is ready, so send it a task
if modelrun < numbermodelruns + 1:
comm.send(modelrun, dest=source, tag=tags.START.value)
print('Master: sending model {} to worker {}.'.format(modelrun, source))
modelrun += 1
if currentmodelrun < numbermodelruns + 1:
comm.send(currentmodelrun, dest=source, tag=tags.START.value)
print('Master: sending model {} to worker {}.'.format(currentmodelrun, source))
currentmodelrun += 1
else:
comm.send(None, dest=source, tag=tags.EXIT.value)
@@ -295,20 +284,20 @@ def run_mpi_sim(args, numbermodelruns, inputfile, usernamespace, optparams=None)
print('Worker {}: PID {} on {}.'.format(rank, os.getpid(), name))
while True:
comm.send(None, dest=0, tag=tags.READY.value)
modelrun = comm.recv(source=0, tag=MPI.ANY_TAG, status=status) #  Receive a model number to run from the master
currentmodelrun = comm.recv(source=0, tag=MPI.ANY_TAG, status=status) #  Receive a model number to run from the master
tag = status.Get_tag()
# Run a model
if tag == tags.START.value:
if optparams: # If Taguchi optimistaion, add specific value for each parameter to optimise for each experiment to user accessible namespace
tmp = {}
tmp.update((key, value[modelrun - 1]) for key, value in optparams.items())
tmp.update((key, value[currentmodelrun - 1]) for key, value in optparams.items())
modelusernamespace = usernamespace.copy()
modelusernamespace.update({'optparams': tmp})
else:
modelusernamespace = usernamespace
run_model(args, modelrun, numbermodelruns, inputfile, modelusernamespace)
run_model(args, currentmodelrun, numbermodelruns, inputfile, modelusernamespace)
comm.send(None, dest=0, tag=tags.DONE.value)
elif tag == tags.EXIT.value:
@@ -317,282 +306,5 @@ def run_mpi_sim(args, numbermodelruns, inputfile, usernamespace, optparams=None)
comm.send(None, dest=0, tag=tags.EXIT.value)
tsimend = perf_counter()
simcompletestr = '\n=== Simulation completed in [HH:MM:SS]: {}'.format(datetime.timedelta(seconds=int(tsimend - tsimstart)))
simcompletestr = '\n=== Simulation completed in [HH:MM:SS]: {}'.format(datetime.timedelta(seconds=tsimend - tsimstart))
print('{} {}\n'.format(simcompletestr, '=' * (get_terminal_width() - 1 - len(simcompletestr))))
def run_model(args, modelrun, numbermodelruns, inputfile, usernamespace):
"""Runs a model - processes the input file; builds the Yee cells; calculates update coefficients; runs main FDTD loop.
Args:
args (dict): Namespace with command line arguments
modelrun (int): Current model run number.
numbermodelruns (int): Total number of model runs.
inputfile (str): Name of the input file to open.
usernamespace (dict): Namespace that can be accessed by user in any Python code blocks in input file.
Returns:
tsolve (int): Length of time (seconds) of main FDTD calculations
"""
# Monitor memory usage
p = psutil.Process()
# Declare variable to hold FDTDGrid class
global G
# Normal model reading/building process; bypassed if geometry information to be reused
if 'G' not in globals():
inputfilestr = '\n--- Model {}/{}, input file: {}'.format(modelrun, numbermodelruns, inputfile)
print(Fore.GREEN + '{} {}\n'.format(inputfilestr, '-' * (get_terminal_width() - 1 - len(inputfilestr))) + Style.RESET_ALL)
# Add the current model run to namespace that can be accessed by user in any Python code blocks in input file
usernamespace['current_model_run'] = modelrun
# Read input file and process any Python or include commands
processedlines = process_python_include_code(inputfile, usernamespace)
# Print constants/variables in user-accessable namespace
uservars = ''
for key, value in sorted(usernamespace.items()):
if key != '__builtins__':
uservars += '{}: {}, '.format(key, value)
print('Constants/variables used/available for Python scripting: {{{}}}\n'.format(uservars[:-2]))
# Write a file containing the input commands after Python or include commands have been processed
if args.write_processed:
write_processed_file(inputfile, modelrun, numbermodelruns, processedlines)
# Check validity of command names and that essential commands are present
singlecmds, multicmds, geometry = check_cmd_names(processedlines)
# Initialise an instance of the FDTDGrid class
G = FDTDGrid()
G.inputfilename = os.path.split(inputfile)[1]
G.inputdirectory = os.path.dirname(os.path.abspath(inputfile))
# Create built-in materials
m = Material(0, 'pec')
m.se = float('inf')
m.type = 'builtin'
m.averagable = False
G.materials.append(m)
m = Material(1, 'free_space')
m.type = 'builtin'
G.materials.append(m)
# Process parameters for commands that can only occur once in the model
process_singlecmds(singlecmds, G)
# Process parameters for commands that can occur multiple times in the model
print()
process_multicmds(multicmds, G)
# Initialise an array for volumetric material IDs (solid), boolean arrays for specifying materials not to be averaged (rigid),
# an array for cell edge IDs (ID)
G.initialise_geometry_arrays()
# Initialise arrays for the field components
G.initialise_field_arrays()
# Process geometry commands in the order they were given
process_geometrycmds(geometry, G)
# Build the PMLs and calculate initial coefficients
print()
if all(value == 0 for value in G.pmlthickness.values()):
if G.messages:
print('PML boundaries: switched off')
pass # If all the PMLs are switched off don't need to build anything
else:
if G.messages:
if all(value == G.pmlthickness['x0'] for value in G.pmlthickness.values()):
pmlinfo = str(G.pmlthickness['x0']) + ' cells'
else:
pmlinfo = ''
for key, value in G.pmlthickness.items():
pmlinfo += '{}: {} cells, '.format(key, value)
pmlinfo = pmlinfo[:-2]
print('PML boundaries: {}'.format(pmlinfo))
pbar = tqdm(total=sum(1 for value in G.pmlthickness.values() if value > 0), desc='Building PML boundaries', ncols=get_terminal_width() - 1, file=sys.stdout, disable=G.tqdmdisable)
build_pmls(G, pbar)
pbar.close()
# Build the model, i.e. set the material properties (ID) for every edge of every Yee cell
print()
pbar = tqdm(total=2, desc='Building main grid', ncols=get_terminal_width() - 1, file=sys.stdout, disable=G.tqdmdisable)
build_electric_components(G.solid, G.rigidE, G.ID, G)
pbar.update()
build_magnetic_components(G.solid, G.rigidH, G.ID, G)
pbar.update()
pbar.close()
# Process any voltage sources (that have resistance) to create a new material at the source location
for voltagesource in G.voltagesources:
voltagesource.create_material(G)
# Initialise arrays of update coefficients to pass to update functions
G.initialise_std_update_coeff_arrays()
# Initialise arrays of update coefficients and temporary values if there are any dispersive materials
if Material.maxpoles != 0:
G.initialise_dispersive_arrays()
# Process complete list of materials - calculate update coefficients, store in arrays, and build text list of materials/properties
materialsdata = process_materials(G)
if G.messages:
materialstable = AsciiTable(materialsdata)
materialstable.outer_border = False
materialstable.justify_columns[0] = 'right'
print(materialstable.table)
# Check to see if numerical dispersion might be a problem
results = dispersion_analysis(G)
if all(value == False for value in results.values()):
print('\nNumerical dispersion analysis: No waveform present in model')
elif results['N'] < G.mingridsampling:
raise GeneralError("Non-physical wave propagation: Material '{}' has a wavelength sampled by {} cells, less than required minimum for physical wave propagation. Maximum significant frequency estimated as {:g}Hz".format(results['material'].ID, results['N'], results['maxfreq']))
elif results['deltavp'] and np.abs(results['deltavp']) > G.maxnumericaldisp and G.messages:
print(Fore.RED + "\nWARNING: Potentially significant numerical dispersion. Estimated largest physical phase-velocity error is {:.2f}% in material '{}' whose wavelength is sampled by {} cells. Maximum significant frequency estimated as {:g}Hz".format(results['deltavp'], results['material'].ID, results['N'], results['maxfreq']) + Style.RESET_ALL)
elif results['deltavp'] and G.messages:
print("\nNumerical dispersion analysis: estimated largest physical phase-velocity error is {:.2f}% in material '{}' whose wavelength is sampled by {} cells. Maximum significant frequency estimated as {:g}Hz".format(results['deltavp'], results['material'].ID, results['N'], results['maxfreq']))
# If geometry information to be reused between model runs
else:
inputfilestr = '\n--- Model {}/{}, input file not re-processed, i.e. geometry fixed: {}'.format(modelrun, numbermodelruns, inputfile)
print(Fore.GREEN + '{} {}\n'.format(inputfilestr, '-' * (get_terminal_width() - 1 - len(inputfilestr))) + Style.RESET_ALL)
# Clear arrays for field components
G.initialise_field_arrays()
# Clear arrays for fields in PML
for pml in G.pmls:
pml.initialise_field_arrays()
# Adjust position of simple sources and receivers if required
if G.srcsteps[0] != 0 or G.srcsteps[1] != 0 or G.srcsteps[2] != 0:
for source in itertools.chain(G.hertziandipoles, G.magneticdipoles):
if modelrun == 1:
if source.xcoord + G.srcsteps[0] * (numbermodelruns - 1) < 0 or source.xcoord + G.srcsteps[0] * (numbermodelruns - 1) > G.nx or source.ycoord + G.srcsteps[1] * (numbermodelruns - 1) < 0 or source.ycoord + G.srcsteps[1] * (numbermodelruns - 1) > G.ny or source.zcoord + G.srcsteps[2] * (numbermodelruns - 1) < 0 or source.zcoord + G.srcsteps[2] * (numbermodelruns - 1) > G.nz:
raise GeneralError('Source(s) will be stepped to a position outside the domain.')
source.xcoord = source.xcoordorigin + (modelrun - 1) * G.srcsteps[0]
source.ycoord = source.ycoordorigin + (modelrun - 1) * G.srcsteps[1]
source.zcoord = source.zcoordorigin + (modelrun - 1) * G.srcsteps[2]
if G.rxsteps[0] != 0 or G.rxsteps[1] != 0 or G.rxsteps[2] != 0:
for receiver in G.rxs:
if modelrun == 1:
if receiver.xcoord + G.rxsteps[0] * (numbermodelruns - 1) < 0 or receiver.xcoord + G.rxsteps[0] * (numbermodelruns - 1) > G.nx or receiver.ycoord + G.rxsteps[1] * (numbermodelruns - 1) < 0 or receiver.ycoord + G.rxsteps[1] * (numbermodelruns - 1) > G.ny or receiver.zcoord + G.rxsteps[2] * (numbermodelruns - 1) < 0 or receiver.zcoord + G.rxsteps[2] * (numbermodelruns - 1) > G.nz:
raise GeneralError('Receiver(s) will be stepped to a position outside the domain.')
receiver.xcoord = receiver.xcoordorigin + (modelrun - 1) * G.rxsteps[0]
receiver.ycoord = receiver.ycoordorigin + (modelrun - 1) * G.rxsteps[1]
receiver.zcoord = receiver.zcoordorigin + (modelrun - 1) * G.rxsteps[2]
# Write files for any geometry views and geometry object outputs
if not (G.geometryviews or G.geometryobjectswrite) and args.geometry_only:
print(Fore.RED + '\nWARNING: No geometry views or geometry objects to output found.' + Style.RESET_ALL)
if G.geometryviews:
print()
for i, geometryview in enumerate(G.geometryviews):
geometryview.set_filename(modelrun, numbermodelruns, G)
pbar = tqdm(total=geometryview.datawritesize, unit='byte', unit_scale=True, desc='Writing geometry view file {}/{}, {}'.format(i + 1, len(G.geometryviews), os.path.split(geometryview.filename)[1]), ncols=get_terminal_width() - 1, file=sys.stdout, disable=G.tqdmdisable)
geometryview.write_vtk(modelrun, numbermodelruns, G, pbar)
pbar.close()
if G.geometryobjectswrite:
for i, geometryobject in enumerate(G.geometryobjectswrite):
pbar = tqdm(total=geometryobject.datawritesize, unit='byte', unit_scale=True, desc='Writing geometry object file {}/{}, {}'.format(i + 1, len(G.geometryobjectswrite), os.path.split(geometryobject.filename)[1]), ncols=get_terminal_width() - 1, file=sys.stdout, disable=G.tqdmdisable)
geometryobject.write_hdf5(G, pbar)
pbar.close()
# Run simulation (if not doing geometry only)
if not args.geometry_only:
# Prepare any snapshot files
for snapshot in G.snapshots:
snapshot.prepare_vtk_imagedata(modelrun, numbermodelruns, G)
# Output filename
inputfileparts = os.path.splitext(inputfile)
if numbermodelruns == 1:
outputfile = inputfileparts[0] + '.out'
else:
outputfile = inputfileparts[0] + str(modelrun) + '.out'
print('\nOutput file: {}\n'.format(outputfile))
####################################
# Start - Main FDTD calculations #
####################################
tsolvestart = perf_counter()
# Absolute time
abstime = 0
for timestep in tqdm(range(G.iterations), desc='Running simulation, model ' + str(modelrun) + '/' + str(numbermodelruns), ncols=get_terminal_width() - 1, file=sys.stdout, disable=G.tqdmdisable):
# Store field component values for every receiver and transmission line
store_outputs(timestep, G.Ex, G.Ey, G.Ez, G.Hx, G.Hy, G.Hz, G)
# Write any snapshots to file
for i, snap in enumerate(G.snapshots):
if snap.time == timestep + 1:
snapiters = 36 * (((snap.xf - snap.xs) / snap.dx) * ((snap.yf - snap.ys) / snap.dy) * ((snap.zf - snap.zs) / snap.dz))
pbar = tqdm(total=snapiters, leave=False, unit='byte', unit_scale=True, desc=' Writing snapshot file {} of {}, {}'.format(i + 1, len(G.snapshots), os.path.split(snap.filename)[1]), ncols=get_terminal_width() - 1, file=sys.stdout, disable=G.tqdmdisable)
snap.write_vtk_imagedata(G.Ex, G.Ey, G.Ez, G.Hx, G.Hy, G.Hz, G, pbar)
pbar.close()
# Update electric field components
if Material.maxpoles == 0: # All materials are non-dispersive so do standard update
update_electric(G.nx, G.ny, G.nz, G.nthreads, G.updatecoeffsE, G.ID, G.Ex, G.Ey, G.Ez, G.Hx, G.Hy, G.Hz)
elif Material.maxpoles == 1: # If there are any dispersive materials do 1st part of dispersive update (it is split into two parts as it requires present and updated electric field values).
update_electric_dispersive_1pole_A(G.nx, G.ny, G.nz, G.nthreads, G.updatecoeffsE, G.updatecoeffsdispersive, G.ID, G.Tx, G.Ty, G.Tz, G.Ex, G.Ey, G.Ez, G.Hx, G.Hy, G.Hz)
elif Material.maxpoles > 1:
update_electric_dispersive_multipole_A(G.nx, G.ny, G.nz, G.nthreads, Material.maxpoles, G.updatecoeffsE, G.updatecoeffsdispersive, G.ID, G.Tx, G.Ty, G.Tz, G.Ex, G.Ey, G.Ez, G.Hx, G.Hy, G.Hz)
# Update electric field components with the PML correction
for pml in G.pmls:
pml.update_electric(G)
# Update electric field components from sources (update any Hertzian dipole sources last)
for source in G.voltagesources + G.transmissionlines + G.hertziandipoles:
source.update_electric(abstime, G.updatecoeffsE, G.ID, G.Ex, G.Ey, G.Ez, G)
# If there are any dispersive materials do 2nd part of dispersive update (it is split into two parts as it requires present and updated electric field values). Therefore it can only be completely updated after the electric field has been updated by the PML and source updates.
if Material.maxpoles == 1:
update_electric_dispersive_1pole_B(G.nx, G.ny, G.nz, G.nthreads, G.updatecoeffsdispersive, G.ID, G.Tx, G.Ty, G.Tz, G.Ex, G.Ey, G.Ez)
elif Material.maxpoles > 1:
update_electric_dispersive_multipole_B(G.nx, G.ny, G.nz, G.nthreads, Material.maxpoles, G.updatecoeffsdispersive, G.ID, G.Tx, G.Ty, G.Tz, G.Ex, G.Ey, G.Ez)
# Increment absolute time value
abstime += 0.5 * G.dt
# Update magnetic field components
update_magnetic(G.nx, G.ny, G.nz, G.nthreads, G.updatecoeffsH, G.ID, G.Ex, G.Ey, G.Ez, G.Hx, G.Hy, G.Hz)
# Update magnetic field components with the PML correction
for pml in G.pmls:
pml.update_magnetic(G)
# Update magnetic field components from sources
for source in G.transmissionlines + G.magneticdipoles:
source.update_magnetic(abstime, G.updatecoeffsH, G.ID, G.Hx, G.Hy, G.Hz, G)
# Increment absolute time value
abstime += 0.5 * G.dt
tsolve = int(perf_counter() - tsolvestart)
# Write an output file in HDF5 format
write_hdf5_outputfile(outputfile, G.Ex, G.Ey, G.Ez, G.Hx, G.Hy, G.Hz, G)
##################################
# End - Main FDTD calculations #
##################################
if G.messages:
print('Memory (RAM) used: ~{}'.format(human_size(p.memory_info().rss)))
# If geometry information to be reused between model runs then FDTDGrid class instance must be global so that it persists
if not args.geometry_fixed:
del G
# Return time to complete solving if in benchmarking mode
if args.benchmark:
return tsolve