More docstring cleaning.

这个提交包含在:
Craig Warren
2022-11-08 13:28:31 +00:00
父节点 1c020ee71a
当前提交 d4520b281e
共有 20 个文件被更改,包括 525 次插入577 次删除

查看文件

@@ -39,12 +39,12 @@ def process_python_include_code(inputfile, usernamespace):
and insert the contents of the included file at that location.
Args:
inputfile (object): File object for input file.
usernamespace (dict): Namespace that can be accessed by user
in any Python code blocks in input file.
inputfile: file object for input file.
usernamespace: namespace that can be accessed by user in any Python code
blocks in input file.
Returns:
processedlines (list): Input commands after Python processing.
processedlines: list of input commands after Python processing.
"""
# Strip out any newline characters and comments that must begin with double hashes
@@ -61,7 +61,9 @@ def process_python_include_code(inputfile, usernamespace):
# Process any Python code
if(inputlines[x].startswith('#python:')):
logger.warning('#python blocks are deprecated and will be removed in the next release of gprMax. Please convert your model to use our Python API instead.\n')
logger.warning('#python blocks are deprecated and will be removed in ' +
'the next release of gprMax. Please convert your ' +
'model to use our Python API instead.\n')
# String to hold Python code to be executed
pythoncode = ''
x += 1
@@ -70,7 +72,8 @@ def process_python_include_code(inputfile, usernamespace):
pythoncode += inputlines[x] + '\n'
x += 1
if x == len(inputlines):
logger.exception('Cannot find the end of the Python code block, i.e. missing #end_python: command.')
logger.exception('Cannot find the end of the Python code ' +
'block, i.e. missing #end_python: command.')
raise SyntaxError
# Compile code for faster execution
pythoncompiledcode = compile(pythoncode, '<string>', 'exec')
@@ -110,22 +113,21 @@ def process_python_include_code(inputfile, usernamespace):
x += 1
# Process any include file commands
processedlines = process_include_files(processedlines, inputfile)
processedlines = process_include_files(processedlines)
return processedlines
def process_include_files(hashcmds, inputfile):
def process_include_files(hashcmds):
"""Looks for and processes any include file commands and insert
the contents of the included file at that location.
Args:
hashcmds (list): Input commands.
inputfile (object): File object for input file.
hashcmds: list of input commands.
Returns:
processedincludecmds (list): Input commands after processing
any include file commands.
processedincludecmds: list of input commands after processing any
include file commands.
"""
processedincludecmds = []
@@ -160,14 +162,13 @@ def process_include_files(hashcmds, inputfile):
return processedincludecmds
def write_processed_file(processedlines, G):
def write_processed_file(processedlines):
"""Writes an input file after any Python code and include commands
in the original input file have been processed.
Args:
processedlines (list): Input commands after after processing any
Python code and include commands.
G (FDTDGrid): Parameters describing a grid in a model.
processedlines: list of input commands after after processing any
Python code and include commands.
"""
parts = config.get_model_config().output_file_path.parts
@@ -177,7 +178,8 @@ def write_processed_file(processedlines, G):
for item in processedlines:
f.write(f'{item}')
logger.info(f'Written input commands, after processing any Python code and include commands, to file: {processedfile}\n')
logger.info(f'Written input commands, after processing any Python code and ' +
f'include commands, to file: {processedfile}\n')
def check_cmd_names(processedlines, checkessential=True):
@@ -185,13 +187,14 @@ def check_cmd_names(processedlines, checkessential=True):
and that all essential commands are present.
Args:
processedlines (list): Input commands after Python processing.
checkessential (boolean): Perform check to see that all essential commands are present.
processedlines: list of input commands after Python processing.
checkessential: boolean to check for essential commands or not.
Returns:
singlecmds (dict): Commands that can only occur once in the model.
multiplecmds (dict): Commands that can have multiple instances in the model.
geometry (list): Geometry commands in the model.
singlecmds: dict of commands that can only occur once in the model.
multiplecmds: dict of commands that can have multiple instances in the
model.
geometry: list of geometry commands in the model.
"""
# Dictionaries of available commands
@@ -240,12 +243,17 @@ def check_cmd_names(processedlines, checkessential=True):
# check first character of parameter string. Ignore case when there
# are no parameters for a command, e.g. for #taguchi:
if ' ' not in cmdparams[0] and len(cmdparams.strip('\n')) != 0:
logger.exception('There must be a space between the command name and parameters in ' + processedlines[lindex])
logger.exception('There must be a space between the command name ' +
'and parameters in ' + processedlines[lindex])
raise SyntaxError
# Check if command name is valid
if cmdname not in essentialcmds and cmdname not in singlecmds and cmdname not in multiplecmds and cmdname not in geometrycmds:
logger.exception('Your input file contains an invalid command: ' + cmdname)
if (cmdname not in essentialcmds and
cmdname not in singlecmds and
cmdname not in multiplecmds and
cmdname not in geometrycmds):
logger.exception('Your input file contains an invalid command: ' +
cmdname)
raise SyntaxError
# Count essential commands
@@ -257,7 +265,8 @@ def check_cmd_names(processedlines, checkessential=True):
if singlecmds[cmdname] is None:
singlecmds[cmdname] = cmd[1].strip(' \t\n')
else:
logger.exception('You can only have a single instance of ' + cmdname + ' in your model')
logger.exception('You can only have a single instance of ' +
cmdname + ' in your model')
raise SyntaxError
elif cmdname in multiplecmds:
@@ -270,25 +279,27 @@ def check_cmd_names(processedlines, checkessential=True):
if checkessential:
if (countessentialcmds < len(essentialcmds)):
logger.exception('Your input file is missing essential commands required to run a model. Essential commands are: ' + ', '.join(essentialcmds))
logger.exception('Your input file is missing essential commands ' +
'required to run a model. Essential commands are: ' +
', '.join(essentialcmds))
raise SyntaxError
return singlecmds, multiplecmds, geometry
def get_user_objects(processedlines, check=True):
def get_user_objects(processedlines, checkessential=True):
"""Make a list of all user objects.
Args:
processedlines (list): Input commands after Python processing.
check (bool): Whether to check for essential commands or not.
processedlines: list of input commands after Python processing.
checkessential: boolean to check for essential commands or not.
Returns:
user_objs (list): All user objects.
user_objs: list of all user objects.
"""
# Check validity of command names and that essential commands are present
parsed_commands = check_cmd_names(processedlines, checkessential=check)
parsed_commands = check_cmd_names(processedlines, checkessential=checkessential)
# Process parameters for commands that can only occur once in the model
single_user_objs = process_singlecmds(parsed_commands[0])
@@ -305,15 +316,14 @@ def get_user_objects(processedlines, check=True):
return user_objs
def parse_hash_commands(scene, G):
def parse_hash_commands(scene):
"""Parse user hash commands and add them to the scene.
Args:
scene (Scene): Scene object.
G (FDTDGrid): Parameters describing a grid in a model.
scene: Scene object.
Returns:
scene (Scene): Scene object.
scene: Scene object.
"""
with open(config.sim_config.input_file_path) as inputfile:
@@ -328,14 +338,15 @@ def parse_hash_commands(scene, G):
for key, value in sorted(usernamespace.items()):
if key != '__builtins__':
uservars += f'{key}: {value}, '
logger.info(f'Constants/variables used/available for Python scripting: {{{uservars[:-2]}}}\n')
logger.info(f'Constants/variables used/available for Python scripting: ' +
f'{{{uservars[:-2]}}}\n')
# Write a file containing the input commands after Python or include
# file commands have been processed
if config.sim_config.args.write_processed:
write_processed_file(processedlines, G)
write_processed_file(processedlines)
user_objs = get_user_objects(processedlines, check=True)
user_objs = get_user_objects(processedlines, checkessential==True)
for user_obj in user_objs:
scene.add(user_obj)
@@ -365,6 +376,6 @@ def user_libs_fn_to_scene_obj(f, *args, **kwargs):
with Capturing() as str_cmds:
f(*args, **kwargs)
user_objects = get_user_objects(str_cmds, check=False)
user_objects = get_user_objects(str_cmds, checkessential=False)
return user_objects

查看文件

@@ -39,11 +39,11 @@ def check_averaging(averaging):
"""Check and set material averaging value.
Args:
averaging (string): Input value from hash command - should be 'y'
or 'n'
averaging: string for input value from hash command - should be 'y'
or 'n'.
Returns:
averaging (bool): geometry object material averaging
averaging: boolean for geometry object material averaging.
"""
if averaging == 'y':
@@ -51,22 +51,21 @@ def check_averaging(averaging):
elif averaging == 'n':
averaging = False
else:
logger.exception(self.__str__() + f' requires averaging to be either y or n')
logger.exception('Averaging should be either y or n')
return averaging
def process_geometrycmds(geometry):
"""
This function checks the validity of command parameters, creates instances
of classes of parameters, and calls functions to directly set arrays
solid, rigid and ID.
"""Checks the validity of command parameters, creates instances of classes
of parameters, and calls functions to directly set arrays solid, rigid
and ID.
Args:
geometry (list): Geometry commands in the model,
geometry: list of geometry commands in the model.
Returns:
scene_objects (list): Holds objects in scene.
scene_objects: list that holds objects in scene.
"""
scene_objects = []
@@ -78,7 +77,8 @@ def process_geometrycmds(geometry):
from .cmds_geometry.geometry_objects_read import GeometryObjectsRead
if len(tmp) != 6:
logger.exception("'" + ' '.join(tmp) + "'" + ' requires exactly five parameters')
logger.exception("'" + ' '.join(tmp) + "'" +
' requires exactly five parameters')
raise ValueError
p1 = (float(tmp[1]), float(tmp[2]), float(tmp[3]))
@@ -88,7 +88,8 @@ def process_geometrycmds(geometry):
elif tmp[0] == '#edge:':
if len(tmp) != 8:
logger.exception("'" + ' '.join(tmp) + "'" + ' requires exactly seven parameters')
logger.exception("'" + ' '.join(tmp) + "'" +
' requires exactly seven parameters')
raise ValueError
edge = Edge(p1=(float(tmp[1]), float(tmp[2]), float(tmp[3])),
@@ -99,7 +100,8 @@ def process_geometrycmds(geometry):
elif tmp[0] == '#plate:':
if len(tmp) < 8:
logger.exception("'" + ' '.join(tmp) + "'" + ' requires at least seven parameters')
logger.exception("'" + ' '.join(tmp) + "'" +
' requires at least seven parameters')
raise ValueError
# Isotropic case
@@ -115,14 +117,16 @@ def process_geometrycmds(geometry):
material_ids=tmp[7:])
else:
logger.exception("'" + ' '.join(tmp) + "'" + ' too many parameters have been given')
logger.exception("'" + ' '.join(tmp) + "'" +
' too many parameters have been given')
raise ValueError
scene_objects.append(plate)
elif tmp[0] == '#triangle:':
if len(tmp) < 12:
logger.exception("'" + ' '.join(tmp) + "'" + ' requires at least eleven parameters')
logger.exception("'" + ' '.join(tmp) + "'" +
' requires at least eleven parameters')
raise ValueError
p1 = (float(tmp[1]), float(tmp[2]), float(tmp[3]))
@@ -132,26 +136,31 @@ def process_geometrycmds(geometry):
# Isotropic case with no user specified averaging
if len(tmp) == 12:
triangle = Triangle(p1=p1, p2=p2, p3=p3, thickness=thickness, material_id=tmp[11])
triangle = Triangle(p1=p1, p2=p2, p3=p3, thickness=thickness,
material_id=tmp[11])
# Isotropic case with user specified averaging
elif len(tmp) == 13:
averaging = check_averaging(tmp[12].lower())
triangle = Triangle(p1=p1, p2=p2, p3=p3, thickness=thickness, material_id=tmp[11], averaging=averaging)
triangle = Triangle(p1=p1, p2=p2, p3=p3, thickness=thickness,
material_id=tmp[11], averaging=averaging)
# Uniaxial anisotropic case
elif len(tmp) == 14:
triangle = Triangle(p1=p1, p2=p2, p3=p3, thickness=thickness, material_ids=tmp[11:])
triangle = Triangle(p1=p1, p2=p2, p3=p3, thickness=thickness,
material_ids=tmp[11:])
else:
logger.exception("'" + ' '.join(tmp) + "'" + ' too many parameters have been given')
logger.exception("'" + ' '.join(tmp) + "'" +
' too many parameters have been given')
raise ValueError
scene_objects.append(triangle)
elif tmp[0] == '#box:':
if len(tmp) < 8:
logger.exception("'" + ' '.join(tmp) + "'" + ' requires at least seven parameters')
logger.exception("'" + ' '.join(tmp) + "'" +
' requires at least seven parameters')
raise ValueError
p1 = (float(tmp[1]), float(tmp[2]), float(tmp[3]))
@@ -171,14 +180,16 @@ def process_geometrycmds(geometry):
box = Box(p1=p1, p2=p2, material_ids=tmp[7:])
else:
logger.exception("'" + ' '.join(tmp) + "'" + ' too many parameters have been given')
logger.exception("'" + ' '.join(tmp) + "'" +
' too many parameters have been given')
raise ValueError
scene_objects.append(box)
elif tmp[0] == '#cylinder:':
if len(tmp) < 9:
logger.exception("'" + ' '.join(tmp) + "'" + ' requires at least eight parameters')
logger.exception("'" + ' '.join(tmp) + "'" +
' requires at least eight parameters')
raise ValueError
p1 = (float(tmp[1]), float(tmp[2]), float(tmp[3]))
@@ -192,21 +203,24 @@ def process_geometrycmds(geometry):
# Isotropic case with user specified averaging
elif len(tmp) == 10:
averaging = check_averaging(tmp[9].lower())
cylinder = Cylinder(p1=p1, p2=p2, r=r, material_id=tmp[8], averaging=averaging)
cylinder = Cylinder(p1=p1, p2=p2, r=r, material_id=tmp[8],
averaging=averaging)
# Uniaxial anisotropic case
elif len(tmp) == 11:
cylinder = Cylinder(p1=p1, p2=p2, r=r, material_ids=tmp[8:])
else:
logger.exception("'" + ' '.join(tmp) + "'" + ' too many parameters have been given')
logger.exception("'" + ' '.join(tmp) + "'" +
' too many parameters have been given')
raise ValueError
scene_objects.append(cylinder)
elif tmp[0] == '#cylindrical_sector:':
if len(tmp) < 10:
logger.exception("'" + ' '.join(tmp) + "'" + ' requires at least nine parameters')
logger.exception("'" + ' '.join(tmp) + "'" +
' requires at least nine parameters')
raise ValueError
normal = tmp[1].lower()
@@ -220,29 +234,41 @@ def process_geometrycmds(geometry):
# Isotropic case with no user specified averaging
if len(tmp) == 10:
cylindrical_sector = CylindricalSector(normal=normal, ctr1=ctr1, ctr2=ctr2, extent1=extent1,
extent2=extent2, r=r, start=start, end=end, msterial_id=tmp[9])
cylindrical_sector = CylindricalSector(normal=normal, ctr1=ctr1,
ctr2=ctr2, extent1=extent1,
extent2=extent2, r=r,
start=start, end=end,
material_id=tmp[9])
# Isotropic case with user specified averaging
elif len(tmp) == 11:
averaging = check_averaging(tmp[10].lower())
cylindrical_sector = CylindricalSector(normal=normal, ctr1=ctr1, ctr2=ctr2, extent1=extent1, extent2=extent2,
r=r, start=start, end=end, averaging=averaging, material_id=tmp[9])
cylindrical_sector = CylindricalSector(normal=normal, ctr1=ctr1,
ctr2=ctr2, extent1=extent1,
extent2=extent2, r=r,
start=start, end=end,
averaging=averaging,
material_id=tmp[9])
# Uniaxial anisotropic case
elif len(tmp) == 12:
cylindrical_sector = CylindricalSector(normal=normal, ctr1=ctr1, ctr2=ctr2, extent1=extent1,
extent2=extent2, r=r, start=start, end=end, material_ids=tmp[9:])
cylindrical_sector = CylindricalSector(normal=normal, ctr1=ctr1,
ctr2=ctr2, extent1=extent1,
extent2=extent2, r=r,
start=start, end=end,
material_ids=tmp[9:])
else:
logger.exception("'" + ' '.join(tmp) + "'" + ' too many parameters have been given')
logger.exception("'" + ' '.join(tmp) + "'" +
' too many parameters have been given')
raise ValueError
scene_objects.append(cylindrical_sector)
elif tmp[0] == '#sphere:':
if len(tmp) < 6:
logger.exception("'" + ' '.join(tmp) + "'" + ' requires at least five parameters')
logger.exception("'" + ' '.join(tmp) + "'" +
' requires at least five parameters')
raise ValueError
p1 = (float(tmp[1]), float(tmp[2]), float(tmp[3]))
@@ -255,14 +281,16 @@ def process_geometrycmds(geometry):
# Isotropic case with user specified averaging
elif len(tmp) == 7:
averaging = check_averaging(tmp[6].lower())
sphere = Sphere(p1=p1, r=r, material_id=tmp[5], averaging=averaging)
sphere = Sphere(p1=p1, r=r, material_id=tmp[5],
averaging=averaging)
# Uniaxial anisotropic case
elif len(tmp) == 8:
sphere = Sphere(p1=p1, r=r, material_id=tmp[5:])
else:
logger.exception("'" + ' '.join(tmp) + "'" + ' too many parameters have been given')
logger.exception("'" + ' '.join(tmp) + "'" +
' too many parameters have been given')
raise ValueError
scene_objects.append(sphere)
@@ -271,7 +299,8 @@ def process_geometrycmds(geometry):
# Default is no dielectric smoothing for a fractal box
if len(tmp) < 14:
logger.exception("'" + ' '.join(tmp) + "'" + ' requires at least thirteen parameters')
logger.exception("'" + ' '.join(tmp) + "'" +
' requires at least thirteen parameters')
raise ValueError
p1 = (float(tmp[1]), float(tmp[2]), float(tmp[3]))
@@ -283,13 +312,21 @@ def process_geometrycmds(geometry):
ID = tmp[13]
if len(tmp) == 14:
fb = FractalBox(p1=p1, p2=p2, frac_dim=frac_dim, weighting=weighting, mixing_model_id=mixing_model_id, id=ID, n_materials=n_materials)
fb = FractalBox(p1=p1, p2=p2, frac_dim=frac_dim,
weighting=weighting, mixing_model_id=mixing_model_id,
id=ID, n_materials=n_materials)
elif len(tmp) == 15:
fb = FractalBox(p1=p1, p2=p2, frac_dim=frac_dim, weighting=weighting, mixing_model_id=mixing_model_id, id=ID, n_materials=n_materials, seed=tmp[14])
fb = FractalBox(p1=p1, p2=p2, frac_dim=frac_dim,
weighting=weighting, mixing_model_id=mixing_model_id,
id=ID, n_materials=n_materials, seed=tmp[14])
elif len(tmp) == 16:
fb = FractalBox(p1=p1, p2=p2, frac_dim=frac_dim, weighting=weighting, mixing_model_id=mixing_model_id, id=ID, n_materials=n_materials, seed=tmp[14], averaging=tmp[15].lower())
fb = FractalBox(p1=p1, p2=p2, frac_dim=frac_dim,
weighting=weighting, mixing_model_id=mixing_model_id,
id=ID, n_materials=n_materials, seed=tmp[14],
averaging=tmp[15].lower())
else:
logger.exception("'" + ' '.join(tmp) + "'" + ' too many parameters have been given')
logger.exception("'" + ' '.join(tmp) + "'" +
' too many parameters have been given')
raise ValueError
scene_objects.append(fb)
@@ -300,7 +337,8 @@ def process_geometrycmds(geometry):
if tmp[0] == '#add_surface_roughness:':
if len(tmp) < 13:
logger.exception("'" + ' '.join(tmp) + "'" + ' requires at least twelve parameters')
logger.exception("'" + ' '.join(tmp) + "'" +
' requires at least twelve parameters')
raise ValueError
p1 = (float(tmp[1]), float(tmp[2]), float(tmp[3]))
@@ -311,18 +349,25 @@ def process_geometrycmds(geometry):
fractal_box_id = tmp[12]
if len(tmp) == 13:
asr = AddSurfaceRoughness(p1=p1, p2=p2, frac_dim=frac_dim, weighting=weighting, limits=limits, fractal_box_id=fractal_box_id)
asr = AddSurfaceRoughness(p1=p1, p2=p2, frac_dim=frac_dim,
weighting=weighting, limits=limits,
fractal_box_id=fractal_box_id)
elif len(tmp) == 14:
asr = AddSurfaceRoughness(p1=p1, p2=p2, frac_dim=frac_dim, weighting=weighting, limits=limits, fractal_box_id=fractal_box_id, seed=int(tmp[13]))
asr = AddSurfaceRoughness(p1=p1, p2=p2, frac_dim=frac_dim,
weighting=weighting, limits=limits,
fractal_box_id=fractal_box_id,
seed=int(tmp[13]))
else:
logger.exception("'" + ' '.join(tmp) + "'" + ' too many parameters have been given')
logger.exception("'" + ' '.join(tmp) + "'" +
' too many parameters have been given')
raise ValueError
scene_objects.append(asr)
if tmp[0] == '#add_surface_water:':
if len(tmp) != 9:
logger.exception("'" + ' '.join(tmp) + "'" + ' requires exactly eight parameters')
logger.exception("'" + ' '.join(tmp) + "'" +
' requires exactly eight parameters')
raise ValueError
p1 = (float(tmp[1]), float(tmp[2]), float(tmp[3]))
@@ -330,12 +375,14 @@ def process_geometrycmds(geometry):
depth = float(tmp[7])
fractal_box_id = tmp[8]
asf = AddSurfaceWater(p1=p1, p2=p2, depth=depth, fractal_box_id=fractal_box_id)
asf = AddSurfaceWater(p1=p1, p2=p2, depth=depth,
fractal_box_id=fractal_box_id)
scene_objects.append(asf)
if tmp[0] == '#add_grass:':
if len(tmp) < 12:
logger.exception("'" + ' '.join(tmp) + "'" + ' requires at least eleven parameters')
logger.exception("'" + ' '.join(tmp) + "'" +
' requires at least eleven parameters')
raise ValueError
p1 = (float(tmp[1]), float(tmp[2]), float(tmp[3]))
@@ -346,11 +393,17 @@ def process_geometrycmds(geometry):
fractal_box_id = tmp[11]
if len(tmp) == 12:
grass = AddGrass(p1=p1, p2=p2, frac_dim=frac_dim, limits=limits, n_blades=n_blades, fractal_box_id=fractal_box_id)
grass = AddGrass(p1=p1, p2=p2, frac_dim=frac_dim,
limits=limits, n_blades=n_blades,
fractal_box_id=fractal_box_id)
elif len(tmp) == 13:
grass = AddGrass(p1=p1, p2=p2, frac_dim=frac_dim, limits=limits, n_blades=n_blades, fractal_box_id=fractal_box_id, seed=int(tmp[12]))
grass = AddGrass(p1=p1, p2=p2, frac_dim=frac_dim,
limits=limits, n_blades=n_blades,
fractal_box_id=fractal_box_id,
seed=int(tmp[12]))
else:
logger.exception("'" + ' '.join(tmp) + "'" + ' too many parameters have been given')
logger.exception("'" + ' '.join(tmp) + "'" +
' too many parameters have been given')
raise ValueError
scene_objects.append(grass)

查看文件

@@ -28,15 +28,14 @@ logger = logging.getLogger(__name__)
def process_multicmds(multicmds):
"""
Checks the validity of command parameters and creates instances of
"""Checks the validity of command parameters and creates instances of
classes of parameters.
Args:
multicmds (dict): Commands that can have multiple instances in the model.
multicmds: dict of commands that can have multiple instances in the model.
Returns:
scene_objects (list): Holds objects in scene.
scene_objects: list that holds objects in scene.
"""
scene_objects = []
@@ -46,10 +45,12 @@ def process_multicmds(multicmds):
for cmdinstance in multicmds[cmdname]:
tmp = cmdinstance.split()
if len(tmp) != 4:
logger.exception("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' requires exactly four parameters')
logger.exception("'" + cmdname + ': ' + ' '.join(tmp) + "'" +
' requires exactly four parameters')
raise ValueError
waveform = Waveform(wave_type=tmp[0], amp=float(tmp[1]), freq=float(tmp[2]), id=tmp[3])
waveform = Waveform(wave_type=tmp[0], amp=float(tmp[1]),
freq=float(tmp[2]), id=tmp[3])
scene_objects.append(waveform)
cmdname = '#voltage_source'
@@ -57,11 +58,19 @@ def process_multicmds(multicmds):
for cmdinstance in multicmds[cmdname]:
tmp = cmdinstance.split()
if len(tmp) == 6:
voltage_source = VoltageSource(polarisation=tmp[0].lower(), p1=(float(tmp[1]), float(tmp[2]), float(tmp[3])), resistance=float(tmp[4]), waveform_id=tmp[5])
voltage_source = VoltageSource(polarisation=tmp[0].lower(),
p1=(float(tmp[1]), float(tmp[2]),
float(tmp[3])), resistance=float(tmp[4]),
waveform_id=tmp[5])
elif len(tmp) == 8:
voltage_source = VoltageSource(polarisation=tmp[0].lower(), p1=(float(tmp[1]), float(tmp[2]), float(tmp[3])), resistance=float(tmp[4]), waveform_id=tmp[5], start=float(tmp[6]), end=float(tmp[7]))
voltage_source = VoltageSource(polarisation=tmp[0].lower(),
p1=(float(tmp[1]), float(tmp[2]),
float(tmp[3])), resistance=float(tmp[4]),
waveform_id=tmp[5], start=float(tmp[6]),
end=float(tmp[7]))
else:
logger.exception("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' requires at least six parameters')
logger.exception("'" + cmdname + ': ' + ' '.join(tmp) + "'" +
' requires at least six parameters')
raise ValueError
scene_objects.append(voltage_source)
@@ -71,14 +80,21 @@ def process_multicmds(multicmds):
for cmdinstance in multicmds[cmdname]:
tmp = cmdinstance.split()
if len(tmp) < 5:
logger.exception("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' requires at least five parameters')
logger.exception("'" + cmdname + ': ' + ' '.join(tmp) + "'" +
' requires at least five parameters')
raise ValueError
if len(tmp) == 5:
hertzian_dipole = HertzianDipole(polarisation=tmp[0], p1=(float(tmp[1]), float(tmp[2]), float(tmp[3])), waveform_id=tmp[4])
hertzian_dipole = HertzianDipole(polarisation=tmp[0], p1=(float(tmp[1]),
float(tmp[2]), float(tmp[3])),
waveform_id=tmp[4])
elif len(tmp) == 7:
hertzian_dipole = HertzianDipole(polarisation=tmp[0], p1=(float(tmp[1]), float(tmp[2]), float(tmp[3])), waveform_id=tmp[4], start=float(tmp[5]), end=float(tmp[6]))
hertzian_dipole = HertzianDipole(polarisation=tmp[0], p1=(float(tmp[1]),
float(tmp[2]), float(tmp[3])),
waveform_id=tmp[4], start=float(tmp[5]),
end=float(tmp[6]))
else:
logger.exception("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' too many parameters')
logger.exception("'" + cmdname + ': ' + ' '.join(tmp) + "'" +
' too many parameters')
raise ValueError
scene_objects.append(hertzian_dipole)
@@ -88,14 +104,21 @@ def process_multicmds(multicmds):
for cmdinstance in multicmds[cmdname]:
tmp = cmdinstance.split()
if len(tmp) < 5:
logger.exception("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' requires at least five parameters')
logger.exception("'" + cmdname + ': ' + ' '.join(tmp) + "'" +
' requires at least five parameters')
raise ValueError
if len(tmp) == 5:
magnetic_dipole = MagneticDipole(polarisation=tmp[0], p1=(float(tmp[1]), float(tmp[2]), float(tmp[3])), waveform_id=tmp[4])
magnetic_dipole = MagneticDipole(polarisation=tmp[0], p1=(float(tmp[1]),
float(tmp[2]), float(tmp[3])),
waveform_id=tmp[4])
elif len(tmp) == 7:
magnetic_dipole = MagneticDipole(polarisation=tmp[0], p1=(float(tmp[1]), float(tmp[2]), float(tmp[3])), waveform_id=tmp[4], start=float(tmp[5]), end=float(tmp[6]))
magnetic_dipole = MagneticDipole(polarisation=tmp[0], p1=(float(tmp[1]),
float(tmp[2]), float(tmp[3])),
waveform_id=tmp[4], start=float(tmp[5]),
end=float(tmp[6]))
else:
logger.exception("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' too many parameters')
logger.exception("'" + cmdname + ': ' + ' '.join(tmp) + "'" +
' too many parameters')
raise ValueError
scene_objects.append(magnetic_dipole)
@@ -105,15 +128,22 @@ def process_multicmds(multicmds):
for cmdinstance in multicmds[cmdname]:
tmp = cmdinstance.split()
if len(tmp) < 6:
logger.exception("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' requires at least six parameters')
logger.exception("'" + cmdname + ': ' + ' '.join(tmp) + "'" +
' requires at least six parameters')
raise ValueError
if len(tmp) == 6:
tl = TransmissionLine(polarisation=tmp[0], p1=(float(tmp[1]), float(tmp[2]), float(tmp[3])), resistance=float(tmp[4]), waveform_id=tmp[5])
tl = TransmissionLine(polarisation=tmp[0], p1=(float(tmp[1]),
float(tmp[2]), float(tmp[3])),
resistance=float(tmp[4]), waveform_id=tmp[5])
elif len(tmp) == 8:
tl = TransmissionLine(polarisation=tmp[0], p1=(float(tmp[1]), float(tmp[2]), float(tmp[3])), resistance=float(tmp[4]), waveform_id=tmp[5], start=tmp[6], end=tmp[7])
tl = TransmissionLine(polarisation=tmp[0], p1=(float(tmp[1]),
float(tmp[2]), float(tmp[3])),
resistance=float(tmp[4]), waveform_id=tmp[5],
start=tmp[6], end=tmp[7])
else:
logger.exception("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' too many parameters')
logger.exception("'" + cmdname + ': ' + ' '.join(tmp) + "'" +
' too many parameters')
raise ValueError
scene_objects.append(tl)
@@ -123,12 +153,14 @@ def process_multicmds(multicmds):
for cmdinstance in multicmds[cmdname]:
tmp = cmdinstance.split()
if len(tmp) != 3 and len(tmp) < 5:
logger.exception("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' has an incorrect number of parameters')
logger.exception("'" + cmdname + ': ' + ' '.join(tmp) + "'" +
' has an incorrect number of parameters')
raise ValueError
if len(tmp) == 3:
rx = Rx(p1=(float(tmp[0]), float(tmp[1]), float(tmp[2])))
else:
rx = Rx(p1=(float(tmp[0]), float(tmp[1]), float(tmp[2])), id=tmp[3], outputs=' '.join(tmp[4:]))
rx = Rx(p1=(float(tmp[0]), float(tmp[1]), float(tmp[2])),
id=tmp[3], outputs=' '.join(tmp[4:]))
scene_objects.append(rx)
@@ -137,7 +169,8 @@ def process_multicmds(multicmds):
for cmdinstance in multicmds[cmdname]:
tmp = cmdinstance.split()
if len(tmp) != 9:
logger.exception("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' requires exactly nine parameters')
logger.exception("'" + cmdname + ': ' + ' '.join(tmp) + "'" +
' requires exactly nine parameters')
raise ValueError
p1 = (float(tmp[0]), float(tmp[1]), float(tmp[2]))
@@ -152,7 +185,8 @@ def process_multicmds(multicmds):
for cmdinstance in multicmds[cmdname]:
tmp = cmdinstance.split()
if len(tmp) != 11:
logger.exception("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' requires exactly eleven parameters')
logger.exception("'" + cmdname + ': ' + ' '.join(tmp) + "'" +
' requires exactly eleven parameters')
raise ValueError
p1 = (float(tmp[0]), float(tmp[1]), float(tmp[2]))
@@ -162,11 +196,13 @@ def process_multicmds(multicmds):
try:
iterations = int(tmp[9])
snapshot = Snapshot(p1=p1, p2=p2, dl=dl, iterations=iterations, filename=filename)
snapshot = Snapshot(p1=p1, p2=p2, dl=dl, iterations=iterations,
filename=filename)
except ValueError:
time = float(tmp[9])
snapshot = Snapshot(p1=p1, p2=p2, dl=dl, time=time, filename=filename)
snapshot = Snapshot(p1=p1, p2=p2, dl=dl, time=time,
filename=filename)
scene_objects.append(snapshot)
@@ -175,10 +211,12 @@ def process_multicmds(multicmds):
for cmdinstance in multicmds[cmdname]:
tmp = cmdinstance.split()
if len(tmp) != 5:
logger.exception("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' requires exactly five parameters')
logger.exception("'" + cmdname + ': ' + ' '.join(tmp) + "'" +
' requires exactly five parameters')
raise ValueError
material = Material(er=float(tmp[0]), se=float(tmp[1]), mr=float(tmp[2]), sm=float(tmp[3]), id=tmp[4])
material = Material(er=float(tmp[0]), se=float(tmp[1]),
mr=float(tmp[2]), sm=float(tmp[3]), id=tmp[4])
scene_objects.append(material)
cmdname = '#add_dispersion_debye'
@@ -187,7 +225,8 @@ def process_multicmds(multicmds):
tmp = cmdinstance.split()
if len(tmp) < 4:
logger.exception("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' requires at least four parameters')
logger.exception("'" + cmdname + ': ' + ' '.join(tmp) + "'" +
' requires at least four parameters')
raise ValueError
poles = int(tmp[0])
@@ -199,7 +238,8 @@ def process_multicmds(multicmds):
er_delta.append(float(tmp[pole]))
tau.append(float(tmp[pole + 1]))
debye_dispersion = AddDebyeDispersion(poles=poles, er_delta=er_delta, tau=tau, material_ids=material_ids)
debye_dispersion = AddDebyeDispersion(poles=poles, er_delta=er_delta,
tau=tau, material_ids=material_ids)
scene_objects.append(debye_dispersion)
cmdname = '#add_dispersion_lorentz'
@@ -208,7 +248,8 @@ def process_multicmds(multicmds):
tmp = cmdinstance.split()
if len(tmp) < 5:
logger.exception("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' requires at least five parameters')
logger.exception("'" + cmdname + ': ' + ' '.join(tmp) + "'" +
' requires at least five parameters')
raise ValueError
poles = int(tmp[0])
@@ -222,7 +263,10 @@ def process_multicmds(multicmds):
tau.append(float(tmp[pole + 1]))
alpha.append(float(tmp[pole + 2]))
lorentz_dispersion = AddLorentzDispersion(poles=poles, material_ids=material_ids, er_delta=er_delta, tau=tau, alpha=alpha)
lorentz_dispersion = AddLorentzDispersion(poles=poles,
material_ids=material_ids,
er_delta=er_delta, tau=tau,
alpha=alpha)
scene_objects.append(lorentz_dispersion)
cmdname = '#add_dispersion_drude'
@@ -231,7 +275,8 @@ def process_multicmds(multicmds):
tmp = cmdinstance.split()
if len(tmp) < 5:
logger.exception("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' requires at least five parameters')
logger.exception("'" + cmdname + ': ' + ' '.join(tmp) + "'" +
' requires at least five parameters')
raise ValueError
poles = int(tmp[0])
@@ -243,7 +288,9 @@ def process_multicmds(multicmds):
tau.append(float(tmp[pole]))
alpha.append(float(tmp[pole + 1]))
drude_dispersion = AddDrudeDispersion(poles=poles, material_ids=material_ids, tau=tau, alpha=alpha)
drude_dispersion = AddDrudeDispersion(poles=poles,
material_ids=material_ids,
tau=tau, alpha=alpha)
scene_objects.append(drude_dispersion)
cmdname = '#soil_peplinski'
@@ -252,7 +299,8 @@ def process_multicmds(multicmds):
tmp = cmdinstance.split()
if len(tmp) != 7:
logger.exception("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' requires at exactly seven parameters')
logger.exception("'" + cmdname + ': ' + ' '.join(tmp) + "'" +
' requires at exactly seven parameters')
raise ValueError
soil = SoilPeplinski(sand_fraction=float(tmp[0]),
clay_fraction=float(tmp[1]),
@@ -268,14 +316,16 @@ def process_multicmds(multicmds):
for cmdinstance in multicmds[cmdname]:
tmp = cmdinstance.split()
if len(tmp) != 11:
logger.exception("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' requires exactly eleven parameters')
logger.exception("'" + cmdname + ': ' + ' '.join(tmp) + "'" +
' requires exactly eleven parameters')
raise ValueError
p1 = float(tmp[0]), float(tmp[1]), float(tmp[2])
p2 = float(tmp[3]), float(tmp[4]), float(tmp[5])
dl = float(tmp[6]), float(tmp[7]), float(tmp[8])
geometry_view = GeometryView(p1=p1, p2=p2, dl=dl, filename=tmp[9], output_type=tmp[10])
geometry_view = GeometryView(p1=p1, p2=p2, dl=dl, filename=tmp[9],
output_type=tmp[10])
scene_objects.append(geometry_view)
cmdname = '#geometry_objects_write'
@@ -283,7 +333,8 @@ def process_multicmds(multicmds):
for cmdinstance in multicmds[cmdname]:
tmp = cmdinstance.split()
if len(tmp) != 7:
logger.exception("'" + cmdname + ': ' + ' '.join(tmp) + "'" + ' requires exactly seven parameters')
logger.exception("'" + cmdname + ': ' + ' '.join(tmp) + "'" +
' requires exactly seven parameters')
raise ValueError
p1 = float(tmp[0]), float(tmp[1]), float(tmp[2])

查看文件

@@ -27,15 +27,14 @@ logger = logging.getLogger(__name__)
def process_singlecmds(singlecmds):
"""
Checks the validity of command parameters and creates instances of
"""Checks the validity of command parameters and creates instances of
classes of parameters.
Args:
singlecmds (dict): Commands that can only occur once in the model.
singlecmds: dict of commands that can only occur once in the model.
Returns:
scene_objects (list): Holds objects in scene.
scene_objects: list that holds objects in scene.
"""
scene_objects = []
@@ -56,7 +55,8 @@ def process_singlecmds(singlecmds):
if singlecmds[cmd] is not None:
tmp = tuple(int(x) for x in singlecmds[cmd].split())
if len(tmp) != 1:
logger.exception(cmd + ' requires exactly one parameter to specify the number of CPU OpenMP threads to use')
logger.exception(cmd + ' requires exactly one parameter to specify ' +
'the number of CPU OpenMP threads to use')
raise ValueError
omp_threads = OMPThreads(n=tmp[0])
@@ -94,7 +94,8 @@ def process_singlecmds(singlecmds):
if singlecmds[cmd] is not None:
tmp = singlecmds[cmd].split()
if len(tmp) != 1:
logger.exception(cmd + ' requires exactly one parameter to specify the time window. Either in seconds or number of iterations.')
logger.exception(cmd + ' requires exactly one parameter to specify ' +
'the time window. Either in seconds or number of iterations.')
raise ValueError
tmp = tmp[0].lower()

查看文件

@@ -29,8 +29,8 @@ class Material:
def __init__(self, numID, ID):
"""
Args:
numID (int): Numeric identifier of the material.
ID (str): Name of the material.
numID: int for numeric I of the material.
ID: string for name of the material.
"""
self.numID = numID
@@ -49,7 +49,7 @@ class Material:
"""Calculates the magnetic update coefficients of the material.
Args:
G (FDTDGrid): Parameters describing a grid in a model.
G: FDTDGrid class describing a grid in a model.
"""
HA = (config.m0 * self.mr / G.dt) + 0.5 * self.sm
@@ -64,7 +64,7 @@ class Material:
"""Calculates the electric update coefficients of the material.
Args:
G (FDTDGrid): Parameters describing a grid in a model.
G: FDTDGrid class describing a grid in a model.
"""
EA = (config.sim_config.em_consts['e0'] * self.er / G.dt) + 0.5 * self.se
@@ -88,11 +88,11 @@ class Material:
specific frequency.
Args:
freq (float): Frequency used to calculate complex relative
permittivity.
freq: float for frequency used to calculate complex relative
permittivity.
Returns:
er (float): Complex relative permittivity.
er: float for complex relative permittivity.
"""
return self.er
@@ -126,7 +126,7 @@ class DispersiveMaterial(Material):
"""Calculates the electric update coefficients of the material.
Args:
G (FDTDGrid): Parameters describing a grid in a model.
G: FDTDGrid class describing a grid in a model.
"""
# The implementation of the dispersive material modelling comes from the
@@ -183,11 +183,11 @@ class DispersiveMaterial(Material):
specific frequency.
Args:
freq (float): Frequency used to calculate complex relative
permittivity.
freq: float for frequency used to calculate complex relative
permittivity.
Returns:
er (float): Complex relative permittivity.
er: float for complex relative permittivity.
"""
# Permittivity at infinite frequency if the material is dispersive
@@ -212,15 +212,15 @@ class DispersiveMaterial(Material):
def process_materials(G):
"""Process complete list of materials - calculate update coefficients,
store in arrays, and build text list of materials/properties
"""Processes complete list of materials - calculates update coefficients,
stores in arrays, and builds text list of materials/properties
Args:
G (FDTDGrid): Parameters describing a grid in a model.
G: FDTDGrid class describing a grid in a model.
Returns:
materialsdata (list): List of material IDs, names, and properties to
print a table.
materialsdata: list of material IDs, names, and properties to
print a table.
"""
if config.get_model_config().materials['maxpoles'] == 0:
@@ -290,14 +290,15 @@ class PeplinskiSoil:
def __init__(self, ID, sandfraction, clayfraction, bulkdensity, sandpartdensity, watervolfraction):
"""
Args:
ID (str): Name of the soil.
sandfraction (float): Sand fraction of the soil.
clayfraction (float): Clay fraction of the soil.
bulkdensity (float): Bulk density of the soil (g/cm3).
sandpartdensity (float): Density of the sand particles in the
soil (g/cm3).
watervolfraction (float): Two numbers that specify a range for the
volumetric water fraction of the soil.
ID: string for name of the soil.
sandfraction: float of sand fraction of the soil.
clayfraction: float of clay fraction of the soil.
bulkdensity: float of bulk density of the soil (g/cm3).
sandpartdensity: float of density of the sand particles in the
soil (g/cm3).
watervolfraction: tuple of floats of two numbers that specify a
range for the volumetric water fraction of the
soil.
"""
self.ID = ID
@@ -314,8 +315,8 @@ class PeplinskiSoil:
model (http://dx.doi.org/10.1109/36.387598).
Args:
nbins (int): Number of bins to use to create the different materials.
G (FDTDGrid): Parameters describing a grid in a model.
nbins: int for number of bins to use to create the different materials.
G: FDTDGrid class describing a grid in a model.
"""
# Debye model properties of water at 25C & zero salinity
@@ -384,7 +385,7 @@ def create_built_in_materials(G):
"""Create pre-defined (built-in) materials.
Args:
G (FDTDGrid): Parameters describing a grid in a model.
G: FDTDGrid class describing a grid in a model.
"""
G.n_built_in_materials = len(G.materials)
@@ -406,23 +407,25 @@ def calculate_water_properties(T=25, S=0):
"""Get extended Debye model properties for water.
Args:
T (float): Temperature of water (degrees centigrade)
S (float): Salinity of water (part per thousand)
T: float for emperature of water (degrees centigrade).
S: float for salinity of water (part per thousand).
Returns:
eri (float): Relative permittivity at infinite frequency.
er (float): Static relative permittivity.
tau (float): Relaxation time (s).
sig (float): Conductivity (S/m)
eri: float for relative permittivity at infinite frequency.
er: float for static relative permittivity.
tau: float for relaxation time (s).
sig: float for conductivity (Siemens/m).
"""
# Properties of water from: https://doi.org/10.1109/JOE.1977.1145319
eri = 4.9
er = 88.045 - 0.4147 * T + 6.295e-4 * T**2 + 1.075e-5 * T**3
tau = (1 / (2 * np.pi)) * (1.1109e-10 - 3.824e-12 * T + 6.938e-14 * T**2 - 5.096e-16 * T**3)
tau = (1 / (2 * np.pi)) * (1.1109e-10 - 3.824e-12 * T + 6.938e-14 * T**2 -
5.096e-16 * T**3)
delta = 25 - T
beta = 2.033e-2 + 1.266e-4 * delta + 2.464e-6 * delta**2 - S * (1.849e-5 - 2.551e-7 * delta + 2.551e-8 * delta**2)
beta = (2.033e-2 + 1.266e-4 * delta + 2.464e-6 * delta**2 - S *
(1.849e-5 - 2.551e-7 * delta + 2.551e-8 * delta**2))
sig_25s = S * (0.182521 - 1.46192e-3 * S + 2.09324e-5 * S**2 - 1.28205e-7 * S**3)
sig = sig_25s * np.exp(-delta * beta)
@@ -434,9 +437,9 @@ def create_water(G, T=25, S=0):
salinity.
Args:
T (float): Temperature of water (degrees centigrade)
S (float): Salinity of water (part per thousand)
G (FDTDGrid): Parameters describing a grid in a model.
T: float for temperature of water (degrees centigrade).
S: float for salinity of water (part per thousand).
G: FDTDGrid class describing a grid in a model.
"""
eri, er, tau, sig = calculate_water_properties(T, S)
@@ -462,7 +465,7 @@ def create_grass(G):
"""Create single-pole Debye model for grass
Args:
G (FDTDGrid): Parameters describing a grid in a model.
G: FDTDGrid class describing a grid in a model.
"""
# Properties of grass from: http://dx.doi.org/10.1007/BF00902994

查看文件

@@ -117,7 +117,8 @@ class ModelBuildRun:
logger.info('')
for i, go in enumerate(G.geometryobjectswrite):
pbar = tqdm(total=go.datawritesize, unit='byte', unit_scale=True,
desc=f'Writing geometry object file {i + 1}/{len(G.geometryobjectswrite)}, {go.filename_hdf5.name}',
desc=f'Writing geometry object file {i + 1}/{len(G.geometryobjectswrite)}, ' +
f'{go.filename_hdf5.name}',
ncols=get_terminal_width() - 1, file=sys.stdout,
disable=not config.sim_config.general['progressbars'])
go.write_hdf5(G, pbar)

查看文件

@@ -122,18 +122,16 @@ class MPIExecutor(object):
def __init__(self, func, master=0, comm=None):
"""Initializes a new executor instance.
Parameters
----------
func: callable
The worker function. Jobs will be passed as keyword arguments,
so `func` must support this. This is usually the case, but
can be a problem when builtin functions are used, e.g. `abs()`.
master: int
The rank of the master. Must be in `comm`. All other
ranks in `comm` will be treated as workers.
comm: MPI.Intracomm
The MPI communicator used for communication between the
master and workers.
Attributes:
func: callable worker function. Jobs will be passed as keyword
arguments, so `func` must support this. This is usually the
case, but can be a problem when builtin functions are used,
e.g. `abs()`.
master: int of the rank of the master. Must be in `comm`. All other
ranks in `comm` will be treated as workers.
comm: MPI.Intracomm communicator used for communication between the
master and workers.
"""
if comm is None:
self.comm = MPI.COMM_WORLD
@@ -170,9 +168,8 @@ class MPIExecutor(object):
logger.basic(f'\n({self.comm.name}) - Master: {self.master}, Workers: {self.workers}')
def __enter__(self):
"""Context manager enter.
Only the master returns an executor,
all other ranks return None.
"""Context manager enter. Only the master returns an executor, all other
ranks return None.
"""
self.start()
if self.is_master():
@@ -180,8 +177,7 @@ class MPIExecutor(object):
return None
def __exit__(self, exc_type, exc_val, exc_tb):
"""Context manager exit.
"""
"""Context manager exit."""
if exc_type is not None:
logger.exception(exc_val)
return False
@@ -192,30 +188,26 @@ class MPIExecutor(object):
return True
def is_idle(self):
"""Returns a bool indicating whether the executor is idle.
The executor is considered to be not idle if *any* worker
process is busy with a job. That means, it is idle only
if *all* workers are idle.
Note: This member must not be called on a worker.
"""Returns a bool indicating whether the executor is idle. The executor
is considered to be not idle if *any* worker process is busy with a
job. That means, it is idle only if *all* workers are idle.
Note: This member must not be called on a worker.
"""
assert self.is_master()
return not any(self.busy)
def is_master(self):
"""Returns a bool indicating whether `self` is the master.
"""
"""Returns a bool indicating whether `self` is the master."""
return self.rank == self.master
def is_worker(self):
"""Returns a bool indicating whether `self` is a worker.
"""
"""Returns a bool indicating whether `self` is a worker."""
return not self.is_master()
def start(self):
"""Starts up workers.
A check is performed on the master whether the executor
has already been terminated, in which case a RuntimeError
is raised on the master.
"""Starts up workers. A check is performed on the master whether the
executor has already been terminated, in which case a RuntimeError
is raised on the master.
"""
if self.is_master():
if self._up:
@@ -227,8 +219,7 @@ class MPIExecutor(object):
self.__wait()
def join(self):
"""Joins the workers.
"""
"""Joins the workers."""
if self.is_master():
logger.debug(f'({self.comm.name}) - Terminating. Sending sentinel to all workers.')
@@ -252,21 +243,18 @@ class MPIExecutor(object):
def submit(self, jobs, sleep=0.0):
"""Submits a list of jobs to the workers and returns the results.
Parameters
----------
jobs: list
A list of keyword argument dicts. Each dict describes
a job and will be unpacked and supplied to the work function.
sleep: float
The number of seconds the master will sleep for when trying
to find an idle worker. The default value is 0.0, which means
the master will not sleep at all.
Returns
-------
results: list
A list of results, i.e. the return values of the work function,
received from the workers. The order of results is identical to
the order of `jobs`.
Args:
jobs: list of keyword argument dicts. Each dict describes a job and
will be unpacked and supplied to the work function.
sleep: float of number of seconds the master will sleep for when
trying to find an idle worker. The default value is 0.0,
which means the master will not sleep at all.
Returns:
results: list of results, i.e. the return values of the work
function, received from the workers. The order of
results is identical to the order of `jobs`.
"""
if not self._up:
raise RuntimeError('Cannot run jobs without a call to start()')
@@ -303,11 +291,10 @@ class MPIExecutor(object):
return results
def __wait(self):
"""The worker main loop.
The worker will enter the loop after `start()` has been called
and stay here until it receives the sentinel, e.g. by calling
`join()` on the master. In the mean time, the worker is
accepting work.
"""The worker main loop. The worker will enter the loop after `start()`
has been called and stay here until it receives the sentinel,
e.g. by calling `join()` on the master. In the mean time, the worker
is accepting work.
"""
assert self.is_worker()
@@ -337,16 +324,13 @@ class MPIExecutor(object):
def __guarded_work(self, work):
"""Executes work safely on the workers.
Parameters
----------
work: dict
Keyword arguments that are unpacked and given to the
work function.
Notes
-----
All exceptions that occur in the work function `func` are caught
and logged. The worker returns `None` to the master in that case
instead of the actual result.
N.B. All exceptions that occur in the work function `func` are caught
and logged. The worker returns `None` to the master in that case
instead of the actual result.
Args:
work: dict ofeyword arguments that are unpacked and given to the
work function.
"""
assert self.is_worker()
try:

查看文件

@@ -77,8 +77,7 @@ class CFS:
d: float for dx, dy, or dz in direction of PML.
er: float for average permittivity of underlying material.
mr: float for average permeability of underlying material.
G: FDTDGrid object that holds essential parameters describing the
model.
G: FDTDGrid class describing a grid in a model.
"""
# Calculation of the maximum value of sigma from http://dx.doi.org/10.1109/8.546249
@@ -184,8 +183,7 @@ class PML:
def __init__(self, G, ID=None, direction=None, xs=0, xf=0, ys=0, yf=0, zs=0, zf=0):
"""
Args:
G: FDTDGrid object that holds essential parameters describing the
model.
G: FDTDGrid class describing a grid in a model.
ID: string identifier for PML slab.
direction: string for direction of increasing absorption.
xs, xf, ys, yf, zs, zf: floats of extent of the PML slab.
@@ -577,7 +575,7 @@ def print_pml_info(G):
"""Information about PMLs.
Args:
G: FDTDGrid objects that holds parameters describing a grid in a model.
G: FDTDGrid class describing a grid in a model.
"""
# No PML
if all(value == 0 for value in G.pmlthickness.values()):
@@ -601,7 +599,7 @@ def build_pml(G, key, value):
(based on underlying material er and mr from solid array).
Args:
G: FDTDGrid objects that holds parameters describing a grid in a model.
G: FDTDGrid class describing a grid in a model.
key: string dentifier of PML slab.
value: int with thickness of PML slab in cells.
"""

查看文件

@@ -46,7 +46,7 @@ def htod_rx_arrays(G, queue=None):
components for receivers.
Args:
G: FDTDGrid object that holds essential parameters describing the model.
G: FDTDGrid class describing a grid in a model.
queue: pyopencl queue.
Returns:
@@ -92,7 +92,7 @@ def dtoh_rx_array(rxs_dev, rxcoords_dev, G):
rxcoords_dev: int array of receiver coordinates on compute device.
rxs_dev: float array of receiver data on compute device - rows are field
components; columns are iterations; pages are receivers.
G: FDTDGrid object that holds essential parameters describing the model.
G: FDTDGrid class describing a grid in a model.
"""

查看文件

@@ -42,8 +42,9 @@ class Scene:
def add(self, user_object):
"""Add the user object to the scene.
:param user_object: User object to add to the scene. For example, :class:`gprMax.cmds_single_use.Domain`
:type user_object: UserObjectMulti/UserObjectGeometry/UserObjectSingle
Args:
user_object: user object to add to the scene. For example,
:class:`gprMax.cmds_single_use.Domain`
"""
if isinstance(user_object, UserObjectMulti):
self.multiple_cmds.append(user_object)
@@ -55,7 +56,7 @@ class Scene:
logger.exception('This object is unknown to gprMax')
raise ValueError
def process_subgrid_commands(self, subgrids):
def process_subgrid_commands(self):
# Check for subgrid user objects
def func(obj):
if isinstance(obj, SubGridUserBase):
@@ -106,7 +107,9 @@ class Scene:
for cmd_type in self.essential_cmds:
d = any([isinstance(cmd, cmd_type) for cmd in cmds_unique])
if not d:
logger.exception('Your input file is missing essential commands required to run a model. Essential commands are: Domain, Discretisation, Time Window')
logger.exception('Your input file is missing essential commands ' +
'required to run a model. Essential commands ' +
'are: Domain, Discretisation, Time Window')
raise ValueError
self.process_cmds(cmds_unique, G)
@@ -139,6 +142,6 @@ class Scene:
self.process_cmds(self.geometry_cmds, G, sort=False)
# Process all the commands for the subgrid
self.process_subgrid_commands(G.subgrids)
self.process_subgrid_commands()
return self

查看文件

@@ -49,11 +49,11 @@ class Snapshot:
dx=None, dy=None, dz=None, time=None, filename=None, fileext=None):
"""
Args:
xs, xf, ys, yf, zs, zf (int): Extent of the volume in cells.
dx, dy, dz (int): Spatial discretisation in cells.
time (int): Iteration number to take the snapshot on.
filename (str): Filename to save to.
fileext (str): File extension.
xs, xf, ys, yf, zs, zf: ints for the extent of the volume in cells.
dx, dy, dz: ints for the spatial discretisation in cells.
time: int for the iteration number to take the snapshot on.
filename: string for the filename to save to.
fileext: string for the file extension.
"""
self.fileext = fileext
@@ -90,7 +90,7 @@ class Snapshot:
"""Store (in memory) electric and magnetic field values for snapshot.
Args:
G (FDTDGrid): Parameters describing a grid in a model.
G: FDTDGrid class describing a grid in a model.
"""
# Memory views of field arrays to dimensions required for the snapshot
@@ -132,8 +132,8 @@ class Snapshot:
or HDF5 format (.h5) files
Args:
pbar (class): Progress bar class instance.
G (FDTDGrid): Parameters describing a grid in a model.
pbar: Progress bar class instance.
G: FDTDGrid class describing a grid in a model.
"""
if self.fileext == '.vti':
@@ -147,8 +147,8 @@ class Snapshot:
N.B. No Python 3 support for VTK at time of writing (03/2015)
Args:
pbar (class): Progress bar class instance.
G (FDTDGrid): Parameters describing a grid in a model.
pbar: Progress bar class instance.
G: FDTDGrid class describing a grid in a model.
"""
hfield_offset = (3 * np.dtype(config.sim_config.dtypes['float_or_double']).itemsize
@@ -197,8 +197,8 @@ class Snapshot:
"""Write snapshot file in HDF5 (.h5) format.
Args:
pbar (class): Progress bar class instance.
G (FDTDGrid): Parameters describing a grid in a model.
pbar: Progress bar class instance.
G: FDTDGrid class describing a grid in a model.
"""
f = h5py.File(self.filename, 'w')
@@ -227,7 +227,7 @@ def htod_snapshot_array(G, queue=None):
"""Initialise array on compute device for to store field data for snapshots.
Args:
G: FDTDGrid object with parameters describing a grid in a model.
G: FDTDGrid class describing a grid in a model.
queue: pyopencl queue.
Returns:

查看文件

@@ -27,7 +27,7 @@ def create_G():
"""Create grid object according to solver.
Returns:
G: FDTDGrid that holds essential parameters describing the model.
G: FDTDGrid class describing a grid in a model.
"""
if config.sim_config.general['solver'] == 'cpu':
@@ -44,7 +44,7 @@ def create_solver(G):
"""Create configured solver object.
Args:
G: FDTDGrid that holds essential parameters describing the model.
G: FDTDGrid class describing a grid in a model.
Returns:
solver: Solver object.
@@ -80,7 +80,7 @@ class Solver:
"""
Args:
updates: Updates contains methods to run FDTD algorithm.
hsg: bool to use sub-gridding.
hsg: boolean to use sub-gridding.
"""
self.updates = updates

查看文件

@@ -45,8 +45,7 @@ class Source:
"""Calculates all waveform values for source for duration of simulation.
Args:
G: FDTDGrid object that olds essential parameters describing the
model.
G: FDTDGrid class describing a grid in a model.
"""
# Waveform values for electric sources - calculated half a timestep later
self.waveformvaluesJ = np.zeros((G.iterations),
@@ -89,8 +88,7 @@ class VoltageSource(Source):
ID: memory view of array of numeric IDs corresponding to materials
in the model.
Ex, Ey, Ez: memory view of array of electric field values.
G: FDTDGrid object that olds essential parameters describing the
model.
G: FDTDGrid class describing a grid in a model.
"""
if iteration * G.dt >= self.start and iteration * G.dt <= self.stop:
@@ -128,8 +126,7 @@ class VoltageSource(Source):
voltage source conductivity to the underlying parameters.
Args:
G: FDTDGrid object that olds essential parameters describing the
model.
G: FDTDGrid class describing a grid in a model.
"""
if self.resistance != 0:
@@ -175,8 +172,7 @@ class HertzianDipole(Source):
ID: memory view of array of numeric IDs corresponding to materials
in the model.
Ex, Ey, Ez: memory view of array of electric field values.
G: FDTDGrid object that olds essential parameters describing the
model.
G: FDTDGrid class describing a grid in a model.
"""
if iteration * G.dt >= self.start and iteration * G.dt <= self.stop:
@@ -213,8 +209,7 @@ class MagneticDipole(Source):
ID: memory view of array of numeric IDs corresponding to materials
in the model.
Hx, Hy, Hz: memory view of array of magnetic field values.
G: FDTDGrid object that olds essential parameters describing the
model.
G: FDTDGrid class describing a grid in a model.
"""
if iteration * G.dt >= self.start and iteration * G.dt <= self.stop:
@@ -245,7 +240,7 @@ def htod_src_arrays(sources, G, queue=None):
Args:
sources: list of sources of one type, e.g. HertzianDipole
G: FDTDGrid object that holds essential parameters describing the model.
G: FDTDGrid class describing a grid in a model.
queue: pyopencl queue.
Returns:
@@ -303,8 +298,7 @@ class TransmissionLine(Source):
def __init__(self, G):
"""
Args:
G: FDTDGrid object that holds essential parameters describing the
model.
G: FDTDGrid class describing a grid in a model.
"""
super().__init__()
@@ -341,8 +335,7 @@ class TransmissionLine(Source):
from: http://dx.doi.org/10.1002/mop.10415
Args:
G: FDTDGrid object that holds essential parameters describing the
model.
G: FDTDGrid class describing a grid in a model.
"""
for iteration in range(G.iterations):
@@ -358,8 +351,7 @@ class TransmissionLine(Source):
"""Updates absorbing boundary condition at end of the transmission line.
Args:
G: FDTDGrid object that holds essential parameters describing the
model.
G: FDTDGrid class describing a grid in a model.
"""
h = (config.c * G.dt - self.dl) / (config.c * G.dt + self.dl)
@@ -373,8 +365,7 @@ class TransmissionLine(Source):
Args:
iteration: int of current iteration (timestep).
G: FDTDGrid object that holds essential parameters describing the
model.
G: FDTDGrid class describing a grid in a model.
"""
# Update all the voltage values along the line
@@ -392,8 +383,7 @@ class TransmissionLine(Source):
Args:
iteration: int of current iteration (timestep).
G: FDTDGrid object that holds essential parameters describing the
model.
G: FDTDGrid class describing a grid in a model.
"""
# Update all the current values along the line
@@ -416,8 +406,7 @@ class TransmissionLine(Source):
ID: memory view of array of numeric IDs corresponding to materials
in the model.
Ex, Ey, Ez: memory view of array of electric field values.
G: FDTDGrid object that olds essential parameters describing the
model.
G: FDTDGrid class describing a grid in a model.
"""
if iteration * G.dt >= self.start and iteration * G.dt <= self.stop:
@@ -447,8 +436,7 @@ class TransmissionLine(Source):
ID: memory view of array of numeric IDs corresponding to materials
in the model.
Hx, Hy, Hz: memory view of array of magnetic field values.
G: FDTDGrid object that olds essential parameters describing the
model.
G: FDTDGrid class describing a grid in a model.
"""
if iteration * G.dt >= self.start and iteration * G.dt <= self.stop:

查看文件

@@ -45,7 +45,7 @@ class CPUUpdates:
def __init__(self, G):
"""
Args:
G (FDTDGrid): Parameters describing a grid in a model.
G: FDTDGrid class describing a grid in a model.
"""
self.grid = G
self.dispersive_update_a = None
@@ -201,7 +201,7 @@ class CPUUpdates:
"""Set dispersive update functions.
Args:
props (Props): Dispersive material properties.
props: dispersive material properties.
"""
update_f = 'update_electric_dispersive_{}pole_{}_{}_{}'
disp_a = update_f.format(props.poles, 'A', props.precision, props.dispersion_type)
@@ -234,7 +234,7 @@ class CUDAUpdates:
def __init__(self, G):
"""
Args:
G: CUDAGrid of parameters describing a grid in a model.
G: CUDAGrid class describing a grid in a model.
"""
self.grid = G
@@ -754,7 +754,7 @@ class OpenCLUpdates:
def __init__(self, G):
"""
Args:
G: OpenCLGrid of parameters describing a grid in a model.
G: OpenCLGrid class describing a grid in a model.
"""
self.grid = G

查看文件

@@ -37,7 +37,7 @@ logger = logging.getLogger(__name__)
def create_user_input_points(grid, user_obj):
"""Return a point checker class based on the grid supplied."""
"""Returns a point checker class based on the grid supplied."""
if isinstance(grid, SubGridBase):
# Local object configuration trumps. User can turn of autotranslate for
@@ -54,7 +54,7 @@ def create_user_input_points(grid, user_obj):
class UserInput:
"""Base class to handle (x, y, z) points supplied by the user."""
"""Handles (x, y, z) points supplied by the user."""
def __init__(self, grid):
self.grid = grid
@@ -69,34 +69,38 @@ class UserInput:
# Incorrect index
i = p[v.index(err.args[0])]
if name:
s = f"\n'{cmd_str}' {err.args[0]} {name}-coordinate {i * dl:g} is not within the model domain"
s = (f"\n'{cmd_str}' {err.args[0]} {name}-coordinate {i * dl:g} " +
"is not within the model domain")
else:
s = f"\n'{cmd_str}' {err.args[0]}-coordinate {i * dl:g} is not within the model domain"
s = (f"\n'{cmd_str}' {err.args[0]}-coordinate {i * dl:g} is not " +
"within the model domain")
logger.exception(s)
raise
def discretise_point(self, p):
"""Function to get the index of a continuous point with the grid."""
"""Gets the index of a continuous point with the grid."""
rv = np.vectorize(round_value)
return rv(p / self.grid.dl)
def round_to_grid(self, p):
"""Function to get the nearest continuous point on the grid from a continuous point in space."""
"""Gets the nearest continuous point on the grid from a continuous point
in space.
"""
return self.discretise_point(p) * self.grid.dl
def descretised_to_continuous(self, p):
"""Function to return a point given as indices to a continous point in the real space."""
"""Returns a point given as indices to a continous point in the real space."""
return p * self.grid.dl
class MainGridUserInput(UserInput):
"""Class to handle (x, y, z) points supplied by the user in the main grid."""
"""Handles (x, y, z) points supplied by the user in the main grid."""
def __init__(self, grid):
super().__init__(grid)
def check_point(self, p, cmd_str, name=''):
"""Discretise point and check its within the domain"""
"""Discretises point and check its within the domain"""
p = self.discretise_point(p)
self.point_within_bounds(p, cmd_str, name)
return p
@@ -105,7 +109,8 @@ class MainGridUserInput(UserInput):
p = self.check_point(p, cmd_str, name)
if self.grid.within_pml(p):
logger.warning(f"'{cmd_str}' sources and receivers should not normally be positioned within the PML.")
logger.warning(f"'{cmd_str}' sources and receivers should not " +
"normally be positioned within the PML.")
return p
@@ -114,7 +119,8 @@ class MainGridUserInput(UserInput):
p2 = self.check_point(p2, cmd_str, name='upper')
if np.greater(p1, p2).any():
logger.exception(f"'{cmd_str}' the lower coordinates should be less than the upper coordinates.")
logger.exception(f"'{cmd_str}' the lower coordinates should be less " +
"than the upper coordinates.")
raise ValueError
return p1, p2
@@ -127,16 +133,20 @@ class MainGridUserInput(UserInput):
return p1, p2, p3
def discretise_static_point(self, p):
"""Function to get the index of a continuous point regardless of the point of origin of the grid."""
"""Gets the index of a continuous point regardless of the point of
origin of the grid.
"""
return super().discretise_point(p)
def round_to_grid_static_point(self, p):
"""Function to get the index of a continuous point regardless of the point of origin of the grid."""
"""Gets the index of a continuous point regardless of the point of
origin of the grid.
"""
return super().discretise_point(p) * self.grid.dl
class SubgridUserInput(MainGridUserInput):
"""Class to handle (x, y, z) points supplied by the user in the sub grid.
"""Handles (x, y, z) points supplied by the user in the sub grid.
This class autotranslates points from main grid to subgrid equivalent
(within IS). Useful if material traverse is not required.
"""
@@ -153,9 +163,7 @@ class SubgridUserInput(MainGridUserInput):
self.inner_bound)
def translate_to_gap(self, p):
"""Function to translate the user input point to the real point in the
subgrid.
"""
"""Translates the user input point to the real point in the subgrid."""
p1 = (p[0] - self.grid.i0 * self.grid.ratio) + self.grid.n_boundary_cells_x
p2 = (p[1] - self.grid.j0 * self.grid.ratio) + self.grid.n_boundary_cells_y
@@ -164,9 +172,9 @@ class SubgridUserInput(MainGridUserInput):
return np.array([p1, p2, p3])
def discretise_point(self, p):
"""Function to discretise a point. Does not provide any checks. The
user enters coordinates relative to self.inner_bound. This function
translate the user point to the correct index for building objects.
"""Discretises a point. Does not provide any checks. The user enters
coordinates relative to self.inner_bound. This function translate
the user point to the correct index for building objects.
"""
p = super().discretise_point(p)
@@ -185,15 +193,18 @@ class SubgridUserInput(MainGridUserInput):
# the OS non-working region.
if (np.less(p_t, self.inner_bound).any() or
np.greater(p_t, self.outer_bound).any()):
logger.warning(f"'{cmd_str}' this object traverses the Outer Surface. This is an advanced feature.")
logger.warning(f"'{cmd_str}' this object traverses the Outer " +
"Surface. This is an advanced feature.")
return p_t
def discretise_static_point(self, p):
"""Function to get the index of a continuous point regardless of the point of origin of the grid."""
"""Gets the index of a continuous point regardless of the point of
origin of the grid."""
return super().discretise_point(p)
def round_to_grid_static_point(self, p):
"""Function to get the index of a continuous point regardless of the point of origin of the grid."""
"""Gets the index of a continuous point regardless of the point of
origin of the grid."""
return super().discretise_point(p) * self.grid.dl

查看文件

@@ -54,10 +54,13 @@ class Waveform:
waveforms.
"""
if self.type == 'gaussian' or self.type == 'gaussiandot' or self.type == 'gaussiandotnorm' or self.type == 'gaussianprime' or self.type == 'gaussiandoubleprime':
if (self.type == 'gaussian' or self.type == 'gaussiandot' or
self.type == 'gaussiandotnorm' or self.type == 'gaussianprime' or
self.type == 'gaussiandoubleprime'):
self.chi = 1 / self.freq
self.zeta = 2 * np.pi**2 * self.freq**2
elif self.type == 'gaussiandotdot' or self.type == 'gaussiandotdotnorm' or self.type == 'ricker':
elif (self.type == 'gaussiandotdot' or
self.type == 'gaussiandotdotnorm' or self.type == 'ricker':)
self.chi = np.sqrt(2) / self.freq
self.zeta = np.pi**2 * self.freq**2
@@ -65,11 +68,11 @@ class Waveform:
"""Calculates value of the waveform at a specific time.
Args:
time (float): Absolute time.
dt (float): Absolute time discretisation.
time: float for absolute time.
dt: float for absolute time discretisation.
Returns:
ampvalue (float): Calculated value for waveform.
ampvalue: float for calculated value for waveform.
"""
self.calculate_coefficients()
@@ -130,7 +133,3 @@ class Waveform:
ampvalue *= self.amp
return ampvalue
def __str__(self):
logger.debug('Do we need this?')
return f'Waveform: ID={self.ID}, type={self.type}, amp{self.amp}, freq={self.freq}'

查看文件

@@ -23,16 +23,18 @@ from gprMax.waveforms import Waveform
def hertzian_dipole_fs(iterations, dt, dxdydz, rx):
"""Analytical solution of a z-directed Hertzian dipole in free space with a Gaussian current waveform (http://dx.doi.org/10.1016/0021-9991(83)90103-1).
"""Analytical solution of a z-directed Hertzian dipole in free space with a
Gaussian current waveform (http://dx.doi.org/10.1016/0021-9991(83)90103-1).
Args:
iterations (int): Number of time steps.
dt (float): Time step (seconds).
dxdydz (float): Tuple of spatial resolution (metres).
rx (float): Tuple of coordinates of receiver position relative to transmitter position (metres).
iterations: int for number of time steps.
dt: float for time step (seconds).
dxdydz: tuple of floats for spatial resolution (metres).
rx: tuple of floats for coordinates of receiver position relative to
transmitter position (metres).
Returns:
fields (float): Array contain electric and magnetic field components.
fields: float array containing electric and magnetic field components.
"""
# Waveform
@@ -111,7 +113,8 @@ def hertzian_dipole_fs(iterations, dt, dxdydz, rx):
# Calculate fields
for timestep in range(iterations):
# Calculate values for waveform, I * dl (current multiplied by dipole length) to match gprMax behaviour
# Calculate values for waveform, I * dl (current multiplied by dipole
# length) to match gprMax behaviour
fint_Ex = wint.calculate_value((timestep * dt) - tau_Ex, dt) * dl
f_Ex = w.calculate_value((timestep * dt) - tau_Ex, dt) * dl
fdot_Ex = wdot.calculate_value((timestep * dt) - tau_Ex, dt) * dl
@@ -131,17 +134,21 @@ def hertzian_dipole_fs(iterations, dt, dxdydz, rx):
fdot_Hy = wdot.calculate_value((timestep * dt) - tau_Hy, dt) * dl
# Ex
fields[timestep, 0] = ((Ex_x * Ex_z) / (4 * np.pi * config.sim_config.em_consts['e0'] * Er_x**5)) * (3 * (fint_Ex + (tau_Ex * f_Ex)) + (tau_Ex**2 * fdot_Ex))
fields[timestep, 0] = (((Ex_x * Ex_z) / (4 * np.pi * config.sim_config.em_consts['e0'] * Er_x**5)) *
(3 * (fint_Ex + (tau_Ex * f_Ex)) + (tau_Ex**2 * fdot_Ex)))
# Ey
try:
tmp = Ey_y / Ey_x
except ZeroDivisionError:
tmp = 0
fields[timestep, 1] = tmp * ((Ey_x * Ey_z) / (4 * np.pi * config.sim_config.em_consts['e0'] * Er_y**5)) * (3 * (fint_Ey + (tau_Ey * f_Ey)) + (tau_Ey**2 * fdot_Ey))
fields[timestep, 1] = (tmp * ((Ey_x * Ey_z) / (4 * np.pi * config.sim_config.em_consts['e0'] * Er_y**5)) *
(3 * (fint_Ey + (tau_Ey * f_Ey)) + (tau_Ey**2 * fdot_Ey)))
# Ez
fields[timestep, 2] = (1 / (4 * np.pi * config.sim_config.em_consts['e0'] * Er_z**5)) * ((2 * Ez_z**2 - (Ez_x**2 + Ez_y**2)) * (fint_Ez + (tau_Ez * f_Ez)) - (Ez_x**2 + Ez_y**2) * tau_Ez**2 * fdot_Ez)
fields[timestep, 2] = ((1 / (4 * np.pi * config.sim_config.em_consts['e0'] * Er_z**5)) *
((2 * Ez_z**2 - (Ez_x**2 + Ez_y**2)) * (fint_Ez + (tau_Ez * f_Ez)) -
(Ez_x**2 + Ez_y**2) * tau_Ez**2 * fdot_Ez))
# Hx
fields[timestep, 3] = - (Hx_y / (4 * np.pi * Hr_x**3)) * (f_Hx + (tau_Hx * fdot_Hx))

查看文件

@@ -1,211 +0,0 @@
# Copyright (C) 2015-2022: The University of Edinburgh, United Kingdom
# Authors: Craig Warren, Antonis Giannopoulos, and John Hartley
#
# This file is part of gprMax.
#
# gprMax is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# gprMax is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with gprMax. If not, see <http://www.gnu.org/licenses/>.
import argparse
import itertools
import os
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import numpy as np
from gprMax._version import __version__
from gprMax.utilities.host_info import get_host_info
from gprMax.utilities.utilities import human_size
"""Plots execution times and speedup factors from benchmarking models run with different numbers of CPU (OpenMP) threads. Can also benchmark GPU(s) if required. Results are read from a NumPy archive."""
# Parse command line arguments
parser = argparse.ArgumentParser(description='Plots execution times and speedup factors from benchmarking models run with different numbers of CPU (OpenMP) threads. Can also benchmark GPU(s) if required. Results are read from a NumPy archive.', usage='cd gprMax; python -m tests.benchmarking.plot_benchmark numpyfile')
parser.add_argument('baseresult', help='name of NumPy archive file including path')
parser.add_argument('--otherresults', default=None, help='list of NumPy archives file including path', nargs='+')
args = parser.parse_args()
# Load base result
baseresult = dict(np.load(args.baseresult))
# Get machine/CPU/OS details
hostinfo = get_host_info()
try:
machineIDlong = str(baseresult['machineID'])
# machineIDlong = 'Dell PowerEdge R630; Intel(R) Xeon(R) CPU E5-2630 v3 @ 2.40GHz; Linux (3.10.0-327.18.2.el7.x86_64)' # Use to manually describe machine
machineID = machineIDlong.split(';')[0]
cpuID = machineIDlong.split(';')[1]
cpuID = cpuID.split('GHz')[0].split('x')[1][1::] + 'GHz'
except KeyError:
hyperthreading = ', {} cores with Hyper-Threading'.format(hostinfo['logicalcores']) if hostinfo['hyperthreading'] else ''
machineIDlong = '{}; {} x {} ({} cores{}); {} RAM; {}'.format(hostinfo['machineID'], hostinfo['sockets'], hostinfo['cpuID'], hostinfo['physicalcores'], hyperthreading, human_size(hostinfo['ram'], a_kilobyte_is_1024_bytes=True), hostinfo['osversion'])
print('Host: {}'.format(machineIDlong))
# Base result - general info
print('Model: {}'.format(args.baseresult))
cells = np.array([baseresult['numcells'][0]]) # Length of cubic model side for cells per second metric
baseplotlabel = os.path.splitext(os.path.split(args.baseresult)[1])[0] + '.in'
# Base result - CPU threads and times info from Numpy archive
if baseresult['cputhreads'].size != 0:
for i in range(len(baseresult['cputhreads'])):
print('{} CPU (OpenMP) thread(s): {:g} s'.format(baseresult['cputhreads'][i], baseresult['cputimes'][i]))
cpucellspersec = np.array([(baseresult['numcells'][0] * baseresult['numcells'][1] * baseresult['numcells'][2] * baseresult['iterations']) / baseresult['cputimes'][0]])
# Base result - GPU time info
gpuIDs = baseresult['gpuIDs'].tolist()
if gpuIDs:
gpucellspersec = np.zeros((len(gpuIDs), 1))
for i in range(len(gpuIDs)):
print('NVIDIA {}: {:g} s'.format(gpuIDs[i], baseresult['gputimes'][i]))
gpucellspersec[i] = (baseresult['numcells'][0] * baseresult['numcells'][1] * baseresult['numcells'][2] * baseresult['iterations']) / baseresult['gputimes'][i]
# Load any other results and info
otherresults = []
otherplotlabels = []
if args.otherresults is not None:
for i, result in enumerate(args.otherresults):
otherresults.append(dict(np.load(result)))
print('\nModel: {}'.format(result))
cells = np.append(cells, otherresults[i]['numcells'][0]) # Length of cubic model side for cells per second metric
otherplotlabels.append(os.path.splitext(os.path.split(result)[1])[0] + '.in')
# CPU
if otherresults[i]['cputhreads'].size != 0:
for thread in range(len(otherresults[i]['cputhreads'])):
print('{} CPU (OpenMP) thread(s): {:g} s'.format(otherresults[i]['cputhreads'][thread], otherresults[i]['cputimes'][thread]))
cpucellspersec = np.append(cpucellspersec, (otherresults[i]['numcells'][0] * otherresults[i]['numcells'][1] * otherresults[i]['numcells'][2] * otherresults[i]['iterations']) / otherresults[i]['cputimes'][0])
# GPU
othergpuIDs = otherresults[i]['gpuIDs'].tolist()
if othergpuIDs:
# Array for cells per second metric
tmp = np.zeros((len(gpuIDs), len(args.otherresults) + 1))
tmp[:gpucellspersec.shape[0],:gpucellspersec.shape[1]] = gpucellspersec
gpucellspersec = tmp
for j in range(len(othergpuIDs)):
print('NVIDIA {}: {:g} s'.format(othergpuIDs[j], otherresults[i]['gputimes'][j]))
gpucellspersec[j,i+1] = (otherresults[i]['numcells'][0] * otherresults[i]['numcells'][1] * otherresults[i]['numcells'][2] * otherresults[i]['iterations']) / otherresults[i]['gputimes'][j]
# Get gprMax version
try:
version = str(baseresult['version'])
except KeyError:
version = __version__
# Create/setup plot figure
#colors = ['#E60D30', '#5CB7C6', '#A21797', '#A3B347'] # Plot colours from http://tools.medialab.sciences-po.fr/iwanthue/index.php
colorIDs = ['#015dbb', '#c23100', '#00a15a', '#c84cd0', '#ff9aa0']
colors = itertools.cycle(colorIDs)
lines = itertools.cycle(('--', ':', '-.', '-'))
markers = ['o', 'd', '^', 's', '*']
fig, ax = plt.subplots(num=machineID, figsize=(30, 10), facecolor='w', edgecolor='w')
fig.suptitle(machineIDlong + '\ngprMax v' + version)
gs = gridspec.GridSpec(1, 3, hspace=0.5)
plotcount = 0
###########################################
# Subplot of CPU (OpenMP) threads vs time #
###########################################
if baseresult['cputhreads'].size != 0:
ax = plt.subplot(gs[0, plotcount])
ax.plot(baseresult['cputhreads'], baseresult['cputimes'], color=next(colors), marker=markers[0], markeredgecolor='none', ms=8, lw=2, label=baseplotlabel)
if args.otherresults is not None:
for i, result in enumerate(otherresults):
ax.plot(result['cputhreads'], result['cputimes'], color=next(colors), marker=markers[0], markeredgecolor='none', ms=8, lw=2, ls=next(lines), label=otherplotlabels[i])
ax.set_xlabel('Number of CPU (OpenMP) threads')
ax.set_ylabel('Time [s]')
ax.grid()
legend = ax.legend(loc=1)
frame = legend.get_frame()
frame.set_edgecolor('white')
ax.set_xlim([0, baseresult['cputhreads'][0] * 1.1])
ax.set_xticks(np.append(baseresult['cputhreads'], 0))
ax.set_ylim(0, top=ax.get_ylim()[1] * 1.1)
plotcount += 1
######################################################
# Subplot of CPU (OpenMP) threads vs speed-up factor #
######################################################
colors = itertools.cycle(colorIDs) # Reset color iterator
if baseresult['cputhreads'].size != 0:
ax = plt.subplot(gs[0, plotcount])
ax.plot(baseresult['cputhreads'], baseresult['cputimes'][-1] / baseresult['cputimes'], color=next(colors), marker=markers[0], markeredgecolor='none', ms=8, lw=2, label=baseplotlabel)
if args.otherresults is not None:
for i, result in enumerate(otherresults):
ax.plot(result['cputhreads'], result['cputimes'][-1] / result['cputimes'], color=next(colors), marker=markers[0], markeredgecolor='none', ms=8, lw=2, ls=next(lines), label=otherplotlabels[i])
ax.set_xlabel('Number of CPU (OpenMP) threads')
ax.set_ylabel('Speed-up factor')
ax.grid()
legend = ax.legend(loc=2)
frame = legend.get_frame()
frame.set_edgecolor('white')
ax.set_xlim([0, baseresult['cputhreads'][0] * 1.1])
ax.set_xticks(np.append(baseresult['cputhreads'], 0))
ax.set_ylim(bottom=1, top=ax.get_ylim()[1] * 1.1)
plotcount += 1
###########################################
# Subplot of simulation size vs cells/sec #
###########################################
def autolabel(rects):
"""Attach a text label above each bar on a matplotlib bar chart displaying its height.
Args:
rects: Handle to bar chart
"""
for rect in rects:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width()/2, height,
'%d' % int(height),
ha='center', va='bottom', fontsize=10, rotation=90)
colors = itertools.cycle(colorIDs) # Reset color iterator
ax = plt.subplot(gs[0, plotcount])
barwidth = 8 # the width of the bars
if baseresult['cputhreads'].size != 0:
cpu = ax.bar(cells - (1/2) * barwidth, cpucellspersec / 1e6, barwidth, color=next(colors), edgecolor='none', label=cpuID)
autolabel(cpu)
if gpuIDs:
positions = np.arange(-gpucellspersec.shape[0] / 2, gpucellspersec.shape[0] / 2, 1)
for i in range(gpucellspersec.shape[0]):
gpu = ax.bar(cells + positions[i] * barwidth, gpucellspersec[i,:] / 1e6, barwidth, color=next(colors), edgecolor='none', label='NVIDIA ' + gpuIDs[i])
autolabel(gpu)
ax.set_xlabel('Side length of cubic domain [cells]')
ax.set_ylabel('Performance [Mcells/s]')
ax.grid()
legend = ax.legend(loc=2)
frame = legend.get_frame()
frame.set_edgecolor('white')
ax.set_xticks(cells)
ax.set_xticklabels(cells)
ax.set_xlim([0, cells[-1] * 1.1])
ax.set_ylim(bottom=0, top=ax.get_ylim()[1] * 1.1)
##########################
# Save a png of the plot #
##########################
fig.savefig(os.path.join(os.path.dirname(args.baseresult), machineID.replace(' ', '_') + '.png'), dpi=150, format='png', bbox_inches='tight', pad_inches=0.1)
#fig.savefig(os.path.join(os.path.dirname(args.baseresult), machineID.replace(' ', '_') + '.pdf'), dpi='none', format='pdf', bbox_inches='tight', pad_inches=0.1)
plt.show()

查看文件

@@ -17,16 +17,23 @@
# along with gprMax. If not, see <http://www.gnu.org/licenses/>.
import argparse
import logging
from pathlib import Path
import h5py
import matplotlib.pyplot as plt
import numpy as np
"""Plots a comparison of fields between given simulation output and experimental data files."""
logger = logging.getLogger(__name__)
"""Plots a comparison of fields between given simulation output and experimental
data files.
"""
# Parse command line arguments
parser = argparse.ArgumentParser(description='Plots a comparison of fields between given simulation output and experimental data files.', usage='cd gprMax; python -m tests.test_experimental modelfile realfile output')
parser = argparse.ArgumentParser(description='Plots a comparison of fields between ' +
'given simulation output and experimental data files.',
usage='cd gprMax; python -m tests.test_experimental modelfile realfile output')
parser.add_argument('modelfile', help='name of model output file including path')
parser.add_argument('realfile', help='name of file containing experimental data including path')
parser.add_argument('output', help='output to be plotted, i.e. Ex Ey Ez', nargs='+')
@@ -48,7 +55,8 @@ else:
polarity = 1
if args.output[0] not in availablecomponents:
logger.exception(f"{args.output[0]} output requested to plot, but the available output for receiver 1 is {', '.join(availablecomponents)}")
logger.exception(f"{args.output[0]} output requested to plot, but the " +
f"available output for receiver 1 is {', '.join(availablecomponents)}")
raise ValueError
floattype = f[path + args.output[0]].dtype
@@ -73,7 +81,8 @@ realmax = np.where(np.abs(real[:, 1]) == 1)[0][0]
difftime = - (timemodel[modelmax] - real[realmax, 0])
# Plot modelled and real data
fig, ax = plt.subplots(num=modelfile.stem + '_vs_' + realfile.stem, figsize=(20, 10), facecolor='w', edgecolor='w')
fig, ax = plt.subplots(num=modelfile.stem + '_vs_' + realfile.stem,
figsize=(20, 10), facecolor='w', edgecolor='w')
ax.plot(timemodel + difftime, model, 'r', lw=2, label='Model')
ax.plot(real[:, 0], real[:, 1], 'r', ls='--', lw=2, label='Experiment')
ax.set_xlabel('Time [s]')
@@ -86,7 +95,9 @@ ax.grid()
# Save a PDF/PNG of the figure
savename = modelfile.stem + '_vs_' + realfile.stem
savename = modelfile.parent / savename
# fig.savefig(savename.with_suffix('.pdf'), dpi=None, format='pdf', bbox_inches='tight', pad_inches=0.1)
# fig.savefig(savename.with_suffix('.png'), dpi=150, format='png', bbox_inches='tight', pad_inches=0.1)
# fig.savefig(savename.with_suffix('.pdf'), dpi=None, format='pdf',
# bbox_inches='tight', pad_inches=0.1)
# fig.savefig(savename.with_suffix('.png'), dpi=150, format='png',
# bbox_inches='tight', pad_inches=0.1)
plt.show()

查看文件

@@ -24,8 +24,7 @@ import gprMax
import h5py
import matplotlib.pyplot as plt
import numpy as np
from colorama import Fore, Style, init
init()
from tests.analytical_solutions import hertzian_dipole_fs
logger = logging.getLogger(__name__)
@@ -50,7 +49,9 @@ basepath = Path(__file__).parents[0] / modelset
# List of available basic test models
testmodels = ['hertzian_dipole_fs_analytical', '2D_ExHyHz', '2D_EyHxHz', '2D_EzHxHy', 'cylinder_Ascan_2D', 'hertzian_dipole_fs', 'hertzian_dipole_hs', 'hertzian_dipole_dispersive', 'magnetic_dipole_fs']
testmodels = ['hertzian_dipole_fs_analytical', '2D_ExHyHz', '2D_EyHxHz', '2D_EzHxHy',
'cylinder_Ascan_2D', 'hertzian_dipole_fs', 'hertzian_dipole_hs',
'hertzian_dipole_dispersive', 'magnetic_dipole_fs']
# List of available advanced test models
# testmodels = ['antenna_GSSI_1500_fs', 'antenna_MALA_1200_fs']
@@ -86,11 +87,13 @@ for i, model in enumerate(testmodels):
# Arrays for storing time
float_or_double = filetest[path + outputstest[0]].dtype
timetest = np.linspace(0, (filetest.attrs['Iterations'] - 1) * filetest.attrs['dt'], num=filetest.attrs['Iterations']) / 1e-9
timetest = np.linspace(0, (filetest.attrs['Iterations'] - 1) * filetest.attrs['dt'],
num=filetest.attrs['Iterations']) / 1e-9
timeref = timetest
# Arrays for storing field data
datatest = np.zeros((filetest.attrs['Iterations'], len(outputstest)), dtype=float_or_double)
datatest = np.zeros((filetest.attrs['Iterations'], len(outputstest)),
dtype=float_or_double)
for ID, name in enumerate(outputstest):
datatest[:, ID] = filetest[path + str(name)][:]
if np.any(np.isnan(datatest[:, ID])):
@@ -100,10 +103,14 @@ for i, model in enumerate(testmodels):
# Tx/Rx position to feed to analytical solution
rxpos = filetest[path].attrs['Position']
txpos = filetest['/srcs/src1/'].attrs['Position']
rxposrelative = ((rxpos[0] - txpos[0]), (rxpos[1] - txpos[1]), (rxpos[2] - txpos[2]))
rxposrelative = ((rxpos[0] - txpos[0]),
(rxpos[1] - txpos[1]),
(rxpos[2] - txpos[2]))
# Analytical solution of a dipole in free space
dataref = hertzian_dipole_fs(filetest.attrs['Iterations'], filetest.attrs['dt'], filetest.attrs['dx_dy_dz'], rxposrelative)
dataref = hertzian_dipole_fs(filetest.attrs['Iterations'],
filetest.attrs['dt'],
filetest.attrs['dx_dy_dz'], rxposrelative)
filetest.close()
@@ -125,19 +132,25 @@ for i, model in enumerate(testmodels):
# Check that type of float used to store fields matches
if filetest[path + outputstest[0]].dtype != fileref[path + outputsref[0]].dtype:
print(Fore.RED + f'WARNING: Type of floating point number in test model ({filetest[path + outputstest[0]].dtype}) does not match type in reference solution ({fileref[path + outputsref[0]].dtype})\n' + Style.RESET_ALL)
logger.warning(f'Type of floating point number in test model ' +
f'({filetest[path + outputstest[0]].dtype}) does not ' +
f'match type in reference solution ({fileref[path + outputsref[0]].dtype})\n')
float_or_doubleref = fileref[path + outputsref[0]].dtype
float_or_doubletest = filetest[path + outputstest[0]].dtype
# Arrays for storing time
timeref = np.zeros((fileref.attrs['Iterations']), dtype=float_or_doubleref)
timeref = np.linspace(0, (fileref.attrs['Iterations'] - 1) * fileref.attrs['dt'], num=fileref.attrs['Iterations']) / 1e-9
timeref = np.linspace(0, (fileref.attrs['Iterations'] - 1) * fileref.attrs['dt'],
num=fileref.attrs['Iterations']) / 1e-9
timetest = np.zeros((filetest.attrs['Iterations']), dtype=float_or_doubletest)
timetest = np.linspace(0, (filetest.attrs['Iterations'] - 1) * filetest.attrs['dt'], num=filetest.attrs['Iterations']) / 1e-9
timetest = np.linspace(0, (filetest.attrs['Iterations'] - 1) * filetest.attrs['dt'],
num=filetest.attrs['Iterations']) / 1e-9
# Arrays for storing field data
dataref = np.zeros((fileref.attrs['Iterations'], len(outputsref)), dtype=float_or_doubleref)
datatest = np.zeros((filetest.attrs['Iterations'], len(outputstest)), dtype=float_or_doubletest)
dataref = np.zeros((fileref.attrs['Iterations'], len(outputsref)),
dtype=float_or_doubleref)
datatest = np.zeros((filetest.attrs['Iterations'], len(outputstest)),
dtype=float_or_doubletest)
for ID, name in enumerate(outputsref):
dataref[:, ID] = fileref[path + str(name)][:]
datatest[:, ID] = filetest[path + str(name)][:]
@@ -152,7 +165,9 @@ for i, model in enumerate(testmodels):
datadiffs = np.zeros(datatest.shape, dtype=np.float64)
for i in range(len(outputstest)):
max = np.amax(np.abs(dataref[:, i]))
datadiffs[:, i] = np.divide(np.abs(dataref[:, i] - datatest[:, i]), max, out=np.zeros_like(dataref[:, i]), where=max != 0) # Replace any division by zero with zero
datadiffs[:, i] = np.divide(np.abs(dataref[:, i] - datatest[:, i]), max,
out=np.zeros_like(dataref[:, i]),
where=max != 0) # Replace any division by zero with zero
# Calculate power (ignore warning from taking a log of any zero values)
with np.errstate(divide='ignore'):
@@ -165,7 +180,13 @@ for i, model in enumerate(testmodels):
testresults[model]['Max diff'] = maxdiff
# Plot datasets
fig1, ((ex1, hx1), (ey1, hy1), (ez1, hz1)) = plt.subplots(nrows=3, ncols=2, sharex=False, sharey='col', subplot_kw=dict(xlabel='Time [ns]'), num=model + '.in', figsize=(20, 10), facecolor='w', edgecolor='w')
fig1, ((ex1, hx1), (ey1, hy1), (ez1, hz1)) = plt.subplots(nrows=3, ncols=2,
sharex=False, sharey='col',
subplot_kw=dict(xlabel='Time [ns]'),
num=model + '.in',
figsize=(20, 10),
facecolor='w',
edgecolor='w')
ex1.plot(timetest, datatest[:, 0], 'r', lw=2, label=model)
ex1.plot(timeref, dataref[:, 0], 'g', lw=2, ls='--', label=model + '(Ref)')
ey1.plot(timetest, datatest[:, 1], 'r', lw=2, label=model)
@@ -178,7 +199,9 @@ for i, model in enumerate(testmodels):
hy1.plot(timeref, dataref[:, 4], 'g', lw=2, ls='--', label=model + '(Ref)')
hz1.plot(timetest, datatest[:, 5], 'r', lw=2, label=model)
hz1.plot(timeref, dataref[:, 5], 'g', lw=2, ls='--', label=model + '(Ref)')
ylabels = ['$E_x$, field strength [V/m]', '$H_x$, field strength [A/m]', '$E_y$, field strength [V/m]', '$H_y$, field strength [A/m]', '$E_z$, field strength [V/m]', '$H_z$, field strength [A/m]']
ylabels = ['$E_x$, field strength [V/m]', '$H_x$, field strength [A/m]',
'$E_y$, field strength [V/m]', '$H_y$, field strength [A/m]',
'$E_z$, field strength [V/m]', '$H_z$, field strength [A/m]']
for i, ax in enumerate(fig1.axes):
ax.set_ylabel(ylabels[i])
ax.set_xlim(0, np.amax(timetest))
@@ -186,14 +209,22 @@ for i, model in enumerate(testmodels):
ax.legend()
# Plot diffs
fig2, ((ex2, hx2), (ey2, hy2), (ez2, hz2)) = plt.subplots(nrows=3, ncols=2, sharex=False, sharey='col', subplot_kw=dict(xlabel='Time [ns]'), num='Diffs: ' + model + '.in', figsize=(20, 10), facecolor='w', edgecolor='w')
fig2, ((ex2, hx2), (ey2, hy2), (ez2, hz2)) = plt.subplots(nrows=3, ncols=2,
sharex=False, sharey='col',
subplot_kw=dict(xlabel='Time [ns]'),
num='Diffs: ' + model + '.in',
figsize=(20, 10),
facecolor='w',
edgecolor='w')
ex2.plot(timeref, datadiffs[:, 0], 'r', lw=2, label='Ex')
ey2.plot(timeref, datadiffs[:, 1], 'r', lw=2, label='Ey')
ez2.plot(timeref, datadiffs[:, 2], 'r', lw=2, label='Ez')
hx2.plot(timeref, datadiffs[:, 3], 'r', lw=2, label='Hx')
hy2.plot(timeref, datadiffs[:, 4], 'r', lw=2, label='Hy')
hz2.plot(timeref, datadiffs[:, 5], 'r', lw=2, label='Hz')
ylabels = ['$E_x$, difference [dB]', '$H_x$, difference [dB]', '$E_y$, difference [dB]', '$H_y$, difference [dB]', '$E_z$, difference [dB]', '$H_z$, difference [dB]']
ylabels = ['$E_x$, difference [dB]', '$H_x$, difference [dB]',
'$E_y$, difference [dB]', '$H_y$, difference [dB]',
'$E_z$, difference [dB]', '$H_z$, difference [dB]']
for i, ax in enumerate(fig2.axes):
ax.set_ylabel(ylabels[i])
ax.set_xlim(0, np.amax(timetest))
@@ -203,14 +234,21 @@ for i, model in enumerate(testmodels):
# Save a PDF/PNG of the figure
filediffs = file.stem + '_diffs'
filediffs = file.parent / Path(filediffs)
# fig1.savefig(file.with_suffix('.pdf'), dpi=None, format='pdf', bbox_inches='tight', pad_inches=0.1)
# fig2.savefig(savediffs.with_suffix('.pdf'), dpi=None, format='pdf', bbox_inches='tight', pad_inches=0.1)
fig1.savefig(file.with_suffix('.png'), dpi=150, format='png', bbox_inches='tight', pad_inches=0.1)
fig2.savefig(filediffs.with_suffix('.png'), dpi=150, format='png', bbox_inches='tight', pad_inches=0.1)
# fig1.savefig(file.with_suffix('.pdf'), dpi=None, format='pdf',
# bbox_inches='tight', pad_inches=0.1)
# fig2.savefig(savediffs.with_suffix('.pdf'), dpi=None, format='pdf',
# bbox_inches='tight', pad_inches=0.1)
fig1.savefig(file.with_suffix('.png'), dpi=150, format='png',
bbox_inches='tight', pad_inches=0.1)
fig2.savefig(filediffs.with_suffix('.png'), dpi=150, format='png',
bbox_inches='tight', pad_inches=0.1)
# Summary of results
for name, data in sorted(testresults.items()):
if 'analytical' in name:
print(Fore.CYAN + f"Test '{name}.in' using v.{data['Test version']} compared to analytical solution. Max difference {data['Max diff']:.2f}dB." + Style.RESET_ALL)
logger.info(f"Test '{name}.in' using v.{data['Test version']} compared " +
f"to analytical solution. Max difference {data['Max diff']:.2f}dB.")
else:
print(Fore.CYAN + f"Test '{name}.in' using v.{data['Test version']} compared to reference solution using v.{data['Ref version']}. Max difference {data['Max diff']:.2f}dB." + Style.RESET_ALL)
logger.info(f"Test '{name}.in' using v.{data['Test version']} compared to " +
f"reference solution using v.{data['Ref version']}. Max difference " +
f"{data['Max diff']:.2f}dB.")